Merge changes from topic "changelog"

* changes:
  changelog: Added RELEASE_NOTE to Commit attributes
  changelog: Added gitcommit.go file to changelog package
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..01bdb0c
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module cos.googlesource.com/cos/tools
+
+go 1.14
diff --git a/src/cmd/cos_image_analyzer/internal/binary/binarydiff.go b/src/cmd/cos_image_analyzer/internal/binary/binarydiff.go
new file mode 100644
index 0000000..7f22f4c
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/binary/binarydiff.go
@@ -0,0 +1,48 @@
+package binary
+
+import (
+	"cos.googlesource.com/cos/tools/src/cmd/cos_image_analyzer/internal/utilities"
+	"fmt"
+)
+
+// Global variables
+var (
+	// Command-line path strings
+	// /etc/os-release is the file describing COS versioning
+	etcOSRelease = "/etc/os-release"
+)
+
+// BinaryDiff is a tool that finds all binary differneces of two COS images
+// (COS version, rootfs, kernel command line, stateful parition, ...)
+//
+// Input:  (string) img1Path - The path to the root directory for COS image1
+//		   (string) img2Path - The path to the root directory for COS image2
+//
+// Output: (stdout) terminal ouput - All differences printed to the terminal
+func BinaryDiff(img1Path, img2Path string) error {
+	fmt.Println("================== Binary Differences ==================")
+
+	// COS Verison Difference
+	fmt.Println("--------- COS Verison Difference ---------")
+	verMap1, err := utilities.ReadFileToMap(img1Path+etcOSRelease, "=")
+	if err != nil {
+		return err
+	}
+	verMap2, err := utilities.ReadFileToMap(img2Path+etcOSRelease, "=")
+	if err != nil {
+		return err
+	}
+
+	// Compare Version (Major)
+	_, err = utilities.CmpMapValues(verMap1, verMap2, "VERSION")
+	if err != nil {
+		return err
+	}
+	// Compare BUILD_ID (Minor)
+	_, err = utilities.CmpMapValues(verMap1, verMap2, "BUILD_ID")
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/src/cmd/cos_image_analyzer/internal/input/cleanup_api.go b/src/cmd/cos_image_analyzer/internal/input/cleanup_api.go
new file mode 100644
index 0000000..1d3bc50
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/input/cleanup_api.go
@@ -0,0 +1,23 @@
+package input
+
+import (
+	"os"
+	"os/exec"
+)
+
+// Cleanup is called to remove a mounted directory and its loop device
+//   (string) mountDir - Active mount directory ready to close
+//   (string) loopDevice - Active loop device ready to close
+// Output: nil on success, else error
+func Cleanup(mountDir, loopDevice string) error {
+	_, err := exec.Command("sudo", "umount", mountDir).Output()
+	if err != nil {
+		return err
+	}
+	_, err1 := exec.Command("sudo", "losetup", "-d", loopDevice).Output()
+	if err1 != nil {
+		return err1
+	}
+	os.Remove(mountDir)
+	return nil
+}
diff --git a/src/cmd/cos_image_analyzer/internal/input/gce_api.go b/src/cmd/cos_image_analyzer/internal/input/gce_api.go
new file mode 100644
index 0000000..c147b4c
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/input/gce_api.go
@@ -0,0 +1,96 @@
+package input
+
+import (
+	"bytes"
+	"encoding/json"
+	// "fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"path/filepath"
+	"strings"
+)
+
+const timeOut = "7200"
+const imageFormat = "vmdk"
+const name = "gcr.io/compute-image-tools/gce_vm_image_export:release"
+
+type Steps struct {
+	Args [6]string `json:"args"`
+	Name string    `json:"name"`
+	Env  [1]string `json:"env"`
+}
+
+type GcePayload struct {
+	Timeout string    `json:"timeout"`
+	Steps   [1]Steps  `json:"steps"`
+	Tags    [2]string `json:"tags"`
+}
+
+// gceExport calls the cloud build REST api that exports a public compute
+// image to a specfic GCS bucket.
+// Input:
+//   (string) projectID - project ID of the cloud project holding the image
+//   (string) bucket - name of the GCS bucket holding the COS Image
+//   (string) image - name of the source image to be exported
+// Output: None
+func gceExport(projectID, bucket, image string) error {
+	// API Variables
+	gceURL := "https://cloudbuild.googleapis.com/v1/projects/" + projectID + "/builds"
+	destURI := "gs://" + bucket + "/" + image + "." + imageFormat
+	args := [6]string{"-oauth=/usr/local/google/home/acueva/cos-googlesource/tools/src/cmd/cos_image_analyzer/internal/utilities/oauth.json", "-timeout=" + timeOut, "-source_image=" + image, "-client_id=api", "-format=" + imageFormat, "-destination_uri=" + destURI}
+	env := [1]string{"BUILD_ID=$BUILD_ID"}
+	tags := [2]string{"gce-daisy", "gce-daisy-image-export"}
+
+	// Build API bodies
+	steps := [1]Steps{Steps{Args: args, Name: name, Env: env}}
+	payload := &GcePayload{
+		Timeout: timeOut,
+		Steps:   steps,
+		Tags:    tags}
+
+	requestBody, err := json.Marshal(payload)
+	if err != nil {
+		return err
+	}
+	log.Println(string(requestBody))
+
+	resp, err := http.Post(gceURL, "application/json", bytes.NewBuffer(requestBody))
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return err
+	}
+
+	log.Println(string(body))
+	return nil
+}
+
+// GetCosImage calls the cloud build api to export a public COS image to a
+// a GCS bucket and then calls GetGcsImage() to download that image from GCS.
+// ADC is used for authorization.
+// Input:
+//   (string) cosCloudPath - The "projectID/gcs-bucket/image" path of the
+//   source image to be exported
+// Output:
+//   (string) imageDir - Path to the mounted directory of the  COS Image
+func GetCosImage(cosCloudPath string) (string, error) {
+	spiltPath := strings.Split(cosCloudPath, "/")
+	projectID, bucket, image := spiltPath[0], spiltPath[1], spiltPath[2]
+
+	if err := gceExport(projectID, bucket, image); err != nil {
+		return "", err
+	}
+
+	gcsPath := filepath.Join(bucket, image)
+	imageDir, err := GetGcsImage(gcsPath, 1)
+	if err != nil {
+		return "", err
+	}
+
+	return imageDir, nil
+}
diff --git a/src/cmd/cos_image_analyzer/internal/input/gcs_api.go b/src/cmd/cos_image_analyzer/internal/input/gcs_api.go
new file mode 100644
index 0000000..3f657d9
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/input/gcs_api.go
@@ -0,0 +1,155 @@
+package input
+
+import (
+	"bytes"
+	"cloud.google.com/go/storage"
+	"context"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const contextTimeOut = time.Second * 50
+
+// gcsDowndload calls the GCS client api to download a specifed object from
+// a GCS bucket. ADC is used for authorization
+// Input:
+//   (io.Writier) w - Output destination for download info
+//   (string) bucket - Name of the GCS bucket
+//   (string) object - Name of the GCS object
+//   (string) destDir - Destination for downloaded GCS object
+// Output:
+//   (string) downloadedFile - Path to downloaded GCS object
+func gcsDowndload(w io.Writer, bucket, object, destDir string) (string, error) {
+	// Call API to download GCS object into tempDir
+	ctx := context.Background()
+	client, err := storage.NewClient(ctx)
+	if err != nil {
+		return "", err
+	}
+	defer client.Close()
+
+	ctx, cancel := context.WithTimeout(ctx, contextTimeOut)
+	defer cancel()
+
+	rc, err := client.Bucket(bucket).Object(object).NewReader(ctx)
+	if err != nil {
+		return "", err
+	}
+	defer rc.Close()
+
+	data, err := ioutil.ReadAll(rc)
+	if err != nil {
+		return "", err
+	}
+
+	log.Print(log.New(w, "Blob "+object+" downloaded.\n", log.Ldate|log.Ltime|log.Lshortfile))
+
+	downloadedFile := filepath.Join(destDir, object)
+	if err := ioutil.WriteFile(downloadedFile, data, 0666); err != nil {
+		return "", err
+	}
+	return downloadedFile, nil
+}
+
+// getPartitionStart finds the start partition offset of the disk
+// Input:
+//   (string) diskFile - Name of DOS/MBR file (ex: disk.raw)
+//   (string) parition - The parition number you are pulling the offset from
+// Output:
+//   (int) start - The start of the partition on the disk
+func getPartitionStart(partition, diskRaw string) (int, error) {
+	//create command
+	cmd1 := exec.Command("fdisk", "-l", diskRaw)
+	cmd2 := exec.Command("grep", "disk.raw"+partition)
+
+	reader, writer := io.Pipe()
+	var buf bytes.Buffer
+
+	cmd1.Stdout = writer
+	cmd2.Stdin = reader
+	cmd2.Stdout = &buf
+
+	cmd1.Start()
+	cmd2.Start()
+	cmd1.Wait()
+	writer.Close()
+	cmd2.Wait()
+	reader.Close()
+
+	words := strings.Fields(buf.String())
+	start, err := strconv.Atoi(words[1])
+	if err != nil {
+		return -1, err
+	}
+
+	return start, nil
+}
+
+// mountDisk finds a free loop device and mounts a DOS/MBR disk file
+// Input:
+//   (string) diskFile - Name of DOS/MBR file (ex: disk.raw)
+//   (string) mountDir - Mount Destiination
+// Output: nil on success, else error
+func mountDisk(diskFile, mountDir string, flag int) error {
+	sectorSize := 512
+	startOfPartition, err := getPartitionStart("3", diskFile)
+	if err != nil {
+		return err
+	}
+	offset := strconv.Itoa(sectorSize * startOfPartition)
+	out, err := exec.Command("sudo", "losetup", "--show", "-fP", diskFile).Output()
+	if err != nil {
+		return err
+	}
+	_, err1 := exec.Command("sudo", "mount", "-o", "ro,loop,offset="+offset, string(out[:len(out)-1]), mountDir).Output()
+	if err1 != nil {
+		return err1
+	}
+
+	return nil
+}
+
+// GetGcsImage calls the GCS client api that downloads a specifed object from
+// a GCS bucket and unzips its contents. ADC is used for authorization
+// Input:
+//   (string) gcsPath - GCS "bucket/object" path for COS Image (.tar.gz file)
+// Output:
+//   (string) imageDir - Path to the mounted directory of the  COS Image
+func GetGcsImage(gcsPath string, flag int) (string, error) {
+	bucket := strings.Split(gcsPath, "/")[0]
+	object := strings.Split(gcsPath, "/")[1]
+
+	tempDir, err := ioutil.TempDir(".", "tempDir-"+object) // Removed at end
+	if err != nil {
+		return "", err
+	}
+
+	tarFile, err := gcsDowndload(os.Stdout, bucket, object, tempDir)
+	if err != nil {
+		return "", err
+	}
+
+	imageDir := filepath.Join(tempDir, "Image-"+object)
+	if err = os.Mkdir(imageDir, 0700); err != nil {
+		return "", err
+	}
+
+	_, err1 := exec.Command("tar", "-xzf", tarFile, "-C", imageDir).Output()
+	if err1 != nil {
+		return "", err1
+	}
+
+	diskRaw := filepath.Join(imageDir, "disk.raw")
+	if err = mountDisk(diskRaw, imageDir, flag); err != nil {
+		return "", err
+	}
+
+	return imageDir, nil
+}
diff --git a/src/cmd/cos_image_analyzer/internal/input/parse_input.go b/src/cmd/cos_image_analyzer/internal/input/parse_input.go
new file mode 100644
index 0000000..9e081d1
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/input/parse_input.go
@@ -0,0 +1,94 @@
+package input
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+)
+
+// Custom usage function. See -h flag
+func printUsage() {
+	usageTemplate := `NAME
+cos_image_analyzer - finds all meaningful differences of two COS Images
+(binary, package, commit, and release notes differences)
+
+SYNOPSIS 
+%s [-local] DIRECTORY-1 DIRECTORY-2 (default true)
+	DIRECTORY 1/2 - the local directory path to the root of the COS Image
+
+%s [-gcs] GCS-PATH-1 GCS-PATH-2 
+	GCS-PATH 1/2 - GCS "bucket/object" path for the COS Image (.tar.gz file) 
+	Ex: %s -gcs my-bucket/cos-77-12371-273-0.tar.gz my-bucket/cos-81-12871-119-0.tar.gz
+
+%s [-cos-cloud]  COS-CLOUD-PATH-1 COS-CLOUD-PATH-2 
+	COS-CLOUD-PATH 1/2 - The "projectID/gcs-bucket/image" path of the source image to be exported
+	Ex: %s -cos-cloud my-project/my-bucket/my-exported-image1 my-project/my-bucket/my-exported-image2
+
+DESCRIPTION
+`
+	usage := fmt.Sprintf(usageTemplate, os.Args[0], os.Args[0], os.Args[0], os.Args[0], os.Args[0])
+	fmt.Printf("%s", usage)
+	flag.PrintDefaults()
+	fmt.Println("\nOUTPUT\n(stdout) terminal output - All differences printed to the terminal")
+}
+
+// ParseInput handles the input based on its type and returns the root
+// directory path of both images to the start of the CosImageAnalyzer
+//
+// Input:  None (reads command-line args)
+//
+// Output: (string) rootImg1 - The local filesystem path for COS image1
+//		   (string) rootImg2 - The local filesystem path for COS image2
+func ParseInput() (string, string, error) {
+	// Flag Declaration
+	flag.Usage = printUsage
+	localPtr := flag.Bool("local", true, "input is two mounted images on local filesystem")
+	gcsPtr := flag.Bool("gcs", false, "input is two objects stored on Google Cloud Storage")
+	cosCloudPtr := flag.Bool("cos-cloud", false, "input is two public COS-cloud images")
+	flag.Parse()
+
+	if flag.NFlag() > 1 {
+		printUsage()
+		return "", "", errors.New("Error: Only one flag allowed")
+	}
+
+	// Input Selection
+	if *gcsPtr {
+		if len(flag.Args()) != 2 {
+			printUsage()
+			return "", "", errors.New("Error: GCS input requires two agruments")
+		}
+		rootImg1, err := GetGcsImage(flag.Args()[0], 1)
+		if err != nil {
+			return "", "", err
+		}
+		rootImg2, err := GetGcsImage(flag.Args()[1], 2)
+		if err != nil {
+			return "", "", err
+		}
+		return rootImg1, rootImg2, nil
+	} else if *cosCloudPtr {
+		if len(flag.Args()) != 2 {
+			printUsage()
+			return "", "", errors.New("Error: COS-cloud input requires two agruments")
+		}
+		rootImg1, err := GetCosImage(flag.Args()[0])
+		if err != nil {
+			return "", "", err
+		}
+		rootImg2, err := GetCosImage(flag.Args()[1])
+		if err != nil {
+			return "", "", err
+		}
+		return rootImg1, rootImg2, nil
+	} else if *localPtr {
+		if len(flag.Args()) != 2 {
+			printUsage()
+			return "", "", errors.New("Error: Local input requires two arguments")
+		}
+		return flag.Args()[0], flag.Args()[1], nil
+	}
+	printUsage()
+	return "", "", errors.New("Error: At least one flag needs to be true")
+}
diff --git a/src/cmd/cos_image_analyzer/internal/testdata/os-release-77 b/src/cmd/cos_image_analyzer/internal/testdata/os-release-77
new file mode 100644
index 0000000..b21554c
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/testdata/os-release-77
@@ -0,0 +1,2 @@
+BUILD_ID=12371.273.0
+ID=cos
\ No newline at end of file
diff --git a/src/cmd/cos_image_analyzer/internal/testdata/os-release-81 b/src/cmd/cos_image_analyzer/internal/testdata/os-release-81
new file mode 100644
index 0000000..826d8f8
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/testdata/os-release-81
@@ -0,0 +1,3 @@
+BUILD_ID=12871.119.0
+VERSION=81
+ID=cos
diff --git a/src/cmd/cos_image_analyzer/internal/utilities/logic_helper.go b/src/cmd/cos_image_analyzer/internal/utilities/logic_helper.go
new file mode 100644
index 0000000..bfdc0ea
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/utilities/logic_helper.go
@@ -0,0 +1,9 @@
+package utilities
+
+// // Helper Function for error checking
+// func check(e error) error {
+// 	if e != nil {
+// 		return e
+// 	}
+// 	return nil
+// }
diff --git a/src/cmd/cos_image_analyzer/internal/utilities/map_helpers.go b/src/cmd/cos_image_analyzer/internal/utilities/map_helpers.go
new file mode 100644
index 0000000..06b1d39
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/utilities/map_helpers.go
@@ -0,0 +1,61 @@
+package utilities
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"os"
+	"strings"
+)
+
+// ReadFileToMap reads a text file line by line into a map. For each line
+// key: first word split by separator, value: rest of line after separator.
+// Ex: Inputs:  textLine: "NAME=Container-Optimized OS", sep: "="
+//	   Outputs:  map: {"NAME":"Container-Optimized OS"}
+//
+// Input:	(string) filePath - The command-line path to the text file
+//			(string) sep - The separator string for the key and value pairs
+// Output: 	(map[string]string) mapOfFile - The map of the read-in text file
+func ReadFileToMap(filePath, sep string) (map[string]string, error) {
+	file, err := os.Open(filePath)
+	if err != nil {
+		return map[string]string{}, err
+	}
+	defer file.Close()
+
+	mapOfFile := make(map[string]string)
+	scanner := bufio.NewScanner(file) // Read file line by line to fill map
+	for scanner.Scan() {
+		key := strings.Split(string(scanner.Text()[:]), sep)[0]
+		mapOfFile[key] = strings.Split(string(scanner.Text()[:]), sep)[1]
+	}
+
+	if scanner.Err() != nil {
+		return map[string]string{}, scanner.Err()
+	}
+	return mapOfFile, nil
+}
+
+// CmpMapValues is a helper function that compares a value shared by two maps
+// Input:  (map[string]string) map1 - First map to be compared
+//		   (map[string]string) map2 - Second map to be compared
+//		   (string) key - The key of the value be compared in both maps
+//
+// Output: (stdout) terminal - If equal, print nothing. Else print difference
+//		   (int)	result - -1 error, 0 for no difference, 1 for difference
+func CmpMapValues(map1, map2 map[string]string, key string) (int, error) {
+	value1, ok1 := map1[key]
+	value2, ok2 := map2[key]
+
+	if !ok1 || !ok2 { // Error Check: At least one key is not present
+		return -1, errors.New("Error:" + key + "key not found in at least one of the maps")
+	}
+
+	if value1 != value2 {
+		fmt.Println(key, "Difference")
+		fmt.Println("< ", value1)
+		fmt.Println("> ", value2)
+		return 1, nil
+	}
+	return 0, nil
+}
diff --git a/src/cmd/cos_image_analyzer/internal/utilities/map_helpers_test.go b/src/cmd/cos_image_analyzer/internal/utilities/map_helpers_test.go
new file mode 100644
index 0000000..8e43712
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/internal/utilities/map_helpers_test.go
@@ -0,0 +1,36 @@
+package utilities
+
+import (
+	"testing"
+)
+
+// test ReadFileToMap function
+func TestReadFileToMap(t *testing.T) {
+	// test normal file
+	testFile, sep := "../testdata/os-release-77", "="
+	expectedMap := map[string]string{"BUILD_ID": "12371.273.0", "ID": "cos"}
+	resultMap, _ := ReadFileToMap(testFile, sep)
+
+	// Compare result with expected
+	if resultMap["BUILD_ID"] != expectedMap["BUILD_ID"] && resultMap["ID"] != expectedMap["ID"] {
+		t.Errorf("ReadFileToMap failed, expected %v, got %v", expectedMap, resultMap)
+	}
+}
+
+// test ReadFileToMap function
+func TestCmpMapValues(t *testing.T) {
+	// test data
+	testMap1 := map[string]string{"BUILD_ID": "12371.273.0", "VERSION": "77", "ID": "cos"}
+	testMap2 := map[string]string{"BUILD_ID": "12871.119.0", "VERSION": "81", "ID": "cos"}
+	testKey1, testKey2 := "ID", "VERSION"
+
+	// test similar keys
+	if result1, _ := CmpMapValues(testMap1, testMap2, testKey1); result1 != 0 { // Expect 0 for same values
+		t.Errorf("CmpMapValues failed, expected %v, got %v", 0, result1)
+	}
+
+	// test different keys
+	if result2, _ := CmpMapValues(testMap1, testMap2, testKey2); result2 != 1 { // Expect 1 for different values
+		t.Errorf("CmpMapValues failed, expected %v, got %v", 1, result2)
+	}
+}
diff --git a/src/cmd/cos_image_analyzer/main.go b/src/cmd/cos_image_analyzer/main.go
new file mode 100644
index 0000000..017d44a
--- /dev/null
+++ b/src/cmd/cos_image_analyzer/main.go
@@ -0,0 +1,47 @@
+// cos_Image_Analyzer finds all the meaningful differences of two COS Images
+// (binary, package, commit, and release notes differences)
+//
+// Input:  (string) rootImg1 - The path for COS image1
+//		   (string) rootImg2 - The path for COS image2
+//		   (int) inputFlag - 0-Local filesystem path to root directory,
+//		   1-COS cloud names, 2-GCS object names
+//
+// Output: (stdout) terminal ouput - All differences printed to the terminal
+package main
+
+import (
+	"cos.googlesource.com/cos/tools/src/cmd/cos_image_analyzer/internal/binary"
+	"cos.googlesource.com/cos/tools/src/cmd/cos_image_analyzer/internal/input"
+	"fmt"
+	"os"
+	"runtime"
+)
+
+func cosImageAnalyzer(img1Path, img2Path string) error {
+	err := binary.BinaryDiff(img1Path, img2Path)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func main() {
+	if runtime.GOOS != "linux" {
+		fmt.Printf("Error: This is a Linux tool, can not run on %s", runtime.GOOS)
+		os.Exit(1)
+	}
+	rootImg1, rootImg2, err := input.ParseInput()
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	err1 := cosImageAnalyzer(rootImg1, rootImg2)
+	if err1 != nil {
+		fmt.Println(err1)
+		os.Exit(1)
+	}
+	// Cleanup(rootImg1, loop1) Debating on a struct that holds this info
+	// Cleanup(rootImg2, loop2)
+
+}
diff --git a/src/pkg/changelog/changelog.go b/src/pkg/changelog/changelog.go
new file mode 100755
index 0000000..8d25802
--- /dev/null
+++ b/src/pkg/changelog/changelog.go
@@ -0,0 +1,311 @@
+// Copyright 2020 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package generates a changelog based on the commit history between
+// two build numbers. The changelog consists of two outputs - the commits
+// added to the target build that aren't present in the source build, and the
+// commits in the source build that aren't present in the target build. This
+// package uses concurrency to improve performance.
+//
+// This packages uses Gitiles to request information from a Git on Borg instance.
+// To generate a changelog, the package first retrieves the the manifest files for
+// the two requested builds using the provided manifest GoB instance and repository.
+// The package then parses the XML files and retrieves the committish and instance
+// URL. A request is sent on a seperate thread for each repository, asking for a list
+// of commits that occurred between the source committish and the target committish.
+// Finally, the resulting git.Commit objects are converted to Commit objects, and
+// consolidated into a mapping of repositoryName -> []*Commit.
+
+package changelog
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/beevik/etree"
+	"github.com/google/martian/log"
+	"go.chromium.org/luci/auth"
+	gitilesApi "go.chromium.org/luci/common/api/gitiles"
+	gitilesProto "go.chromium.org/luci/common/proto/gitiles"
+)
+
+const (
+	manifestFileName string = "snapshot.xml"
+
+	// These constants are used for exponential increase in Gitiles request size.
+	defaultPageSize          int32 = 1000
+	pageSizeGrowthMultiplier int32 = 5
+	maxPageSize              int32 = 10000
+)
+
+type repo struct {
+	// The Git on Borg instance to query from.
+	InstanceURL string
+	// A value that points to the last commit for a build on a given repo.
+	// Acceptable values:
+	// - A commit SHA
+	// - A ref, ex. "refs/heads/branch"
+	// - A ref defined as n-th parent of R in the form "R-n".
+	//   ex. "master-2" or "deadbeef-1".
+	// Source: https://pkg.go.dev/go.chromium.org/luci/common/proto/gitiles?tab=doc#LogRequest
+	Committish string
+}
+
+type commitsResult struct {
+	RepoURL string
+	Commits []*Commit
+	Err     error
+}
+
+type additionsResult struct {
+	Additions map[string][]*Commit
+	Err       error
+}
+
+func client(authenticator *auth.Authenticator, remoteURL string) (gitilesProto.GitilesClient, error) {
+	authedClient, err := authenticator.Client()
+	cl, err := gitilesApi.NewRESTClient(authedClient, remoteURL, true)
+	if err != nil {
+		return nil, errors.New("changelog: Failed to establish client to remote url: " + remoteURL)
+	}
+	return cl, nil
+}
+
+func createClients(clients map[string]gitilesProto.GitilesClient, authenticator *auth.Authenticator, repoMap map[string]*repo) error {
+	for _, repoData := range repoMap {
+		remoteURL := repoData.InstanceURL
+		if _, ok := clients[remoteURL]; ok {
+			continue
+		}
+		client, err := client(authenticator, remoteURL)
+		if err != nil {
+			return fmt.Errorf("createClients: error creating client mapping:\n%v", err)
+		}
+		clients[remoteURL] = client
+	}
+	return nil
+}
+
+// repoMap generates a mapping of repo name to instance URL and committish.
+// This eliminates the need to track remote names and allows lookup
+// of source committish when generating changelog.
+func repoMap(manifest string) (map[string]*repo, error) {
+	doc := etree.NewDocument()
+	if err := doc.ReadFromString(manifest); err != nil {
+		return nil, fmt.Errorf("repoMap: error parsing manifest xml:\n%v", err)
+	}
+	root := doc.SelectElement("manifest")
+
+	// Parse each <remote fetch=X name=Y> tag in the manifest xml file.
+	// Extract the "fetch" and "name" attributes from each remote tag, and map the name to the fetch URL.
+	remoteMap := make(map[string]string)
+	for _, remote := range root.SelectElements("remote") {
+		url := strings.Replace(remote.SelectAttr("fetch").Value, "https://", "", 1)
+		remoteMap[remote.SelectAttr("name").Value] = url
+	}
+
+	// Parse each <project name=X remote=Y revision=Z> tag in the manifest xml file.
+	// Extract the "name", "remote", and "revision" attributes from each project tag.
+	// Some projects do not have a "remote" attribute.
+	// If this is the case, they should use the default remoteURL.
+	remoteMap[""] = remoteMap[root.SelectElement("default").SelectAttr("remote").Value]
+	repos := make(map[string]*repo)
+	for _, project := range root.SelectElements("project") {
+		repos[project.SelectAttr("name").Value] = &repo{
+			InstanceURL: remoteMap[project.SelectAttrValue("remote", "")],
+			Committish:  project.SelectAttr("revision").Value,
+		}
+	}
+	return repos, nil
+}
+
+// mappedManifest retrieves a Manifest file from GoB and unmarshals XML.
+func mappedManifest(client gitilesProto.GitilesClient, repo string, buildNum string) (map[string]*repo, error) {
+	request := gitilesProto.DownloadFileRequest{
+		Project:    repo,
+		Committish: "refs/tags/" + buildNum,
+		Path:       manifestFileName,
+		Format:     1,
+	}
+	response, err := client.DownloadFile(context.Background(), &request)
+	if err != nil {
+		return nil, fmt.Errorf("mappedManifest: error downloading manifest file from repo %s:\n%v",
+			repo, err)
+	}
+	mappedManifest, err := repoMap(response.Contents)
+	if err != nil {
+		return nil, fmt.Errorf("mappedManifest: error parsing manifest contents from repo %s:\n%v",
+			repo, err)
+	}
+	return mappedManifest, nil
+}
+
+// commits get all commits that occur between committish and ancestor for a specific repo.
+func commits(client gitilesProto.GitilesClient, repo string, committish string, ancestor string, outputChan chan commitsResult) {
+	start := time.Now()
+	pageSize := defaultPageSize
+	request := gitilesProto.LogRequest{
+		Project:            repo,
+		Committish:         committish,
+		ExcludeAncestorsOf: ancestor,
+		PageSize:           pageSize,
+	}
+	response, err := client.Log(context.Background(), &request)
+	if err != nil {
+		outputChan <- commitsResult{Err: fmt.Errorf("commits: Error retrieving log for repo: %s with committish: %s and ancestor %s:\n%v",
+			repo, committish, ancestor, err)}
+		return
+	}
+	// No nextPageToken means there were less than <defaultPageSize> commits total.
+	// We can immediately return.
+	if response.NextPageToken == "" {
+		log.Infof("Retrieved %d commits from %s in %s\n", len(response.Log), repo, time.Since(start))
+		parsedCommits, err := ParseGitCommitLog(response.Log)
+		if err != nil {
+			outputChan <- commitsResult{Err: fmt.Errorf("commits: Error parsing log response for repo: %s with committish: %s and ancestor %s:\n%v",
+				repo, committish, ancestor, err)}
+			return
+		}
+		outputChan <- commitsResult{RepoURL: repo, Commits: parsedCommits}
+		return
+	}
+	// Retrieve remaining commits using exponential increase in pageSize.
+	allCommits := response.Log
+	for response.NextPageToken != "" {
+		if pageSize < maxPageSize {
+			pageSize *= pageSizeGrowthMultiplier
+		}
+		request := gitilesProto.LogRequest{
+			Project:            repo,
+			Committish:         committish,
+			ExcludeAncestorsOf: ancestor,
+			PageToken:          response.NextPageToken,
+			PageSize:           pageSize,
+		}
+		response, err = client.Log(context.Background(), &request)
+		if err != nil {
+			outputChan <- commitsResult{Err: fmt.Errorf("commits: Error retrieving log for repo: %s with committish: %s and ancestor %s:\n%v",
+				repo, committish, ancestor, err)}
+			return
+		}
+		allCommits = append(allCommits, response.Log...)
+	}
+	log.Infof("Retrieved %d commits from %s in %s\n", len(allCommits), repo, time.Since(start))
+	parsedCommits, err := ParseGitCommitLog(allCommits)
+	if err != nil {
+		outputChan <- commitsResult{Err: fmt.Errorf("commits: Error parsing log response for repo: %s with committish: %s and ancestor %s:\n%v",
+			repo, committish, ancestor, err)}
+		return
+	}
+	outputChan <- commitsResult{RepoURL: repo, Commits: parsedCommits}
+}
+
+// additions retrieves all commits that occured between 2 parsed manifest files for each repo.
+// Returns a map of repo name -> list of commits.
+func additions(clients map[string]gitilesProto.GitilesClient, sourceRepos map[string]*repo, targetRepos map[string]*repo, outputChan chan additionsResult) {
+	repoCommits := make(map[string][]*Commit)
+	commitsChan := make(chan commitsResult, len(targetRepos))
+	for repoURL, targetRepoInfo := range targetRepos {
+		cl := clients[targetRepoInfo.InstanceURL]
+		// If the source Manifest file does not contain a target repo,
+		// count every commit since target repo creation as an addition
+		ancestorCommittish := ""
+		if sourceRepoInfo, ok := sourceRepos[repoURL]; ok {
+			ancestorCommittish = sourceRepoInfo.Committish
+		}
+		go commits(cl, repoURL, targetRepoInfo.Committish, ancestorCommittish, commitsChan)
+	}
+	for i := 0; i < len(targetRepos); i++ {
+		res := <-commitsChan
+		if res.Err != nil {
+			outputChan <- additionsResult{Err: res.Err}
+			return
+		}
+		if len(res.Commits) > 0 {
+			repoCommits[res.RepoURL] = res.Commits
+		}
+	}
+	outputChan <- additionsResult{Additions: repoCommits}
+	return
+}
+
+// Changelog generates a changelog between 2 build numbers
+//
+// authenticator is an auth.Authenticator object that is used to build authenticated
+// Gitiles clients
+//
+// sourceBuildNum and targetBuildNum should be build numbers. It should match
+// a tag that links directly to snapshot.xml
+// Ex. For /refs/tags/15049.0.0, the argument should be 15049.0.0
+//
+// The host should be the GoB instance that Manifest files are hosted in
+// ex. "cos.googlesource.com"
+//
+// The repo should be the repository that build manifest files
+// are located, ex. "cos/manifest-snapshots"
+//
+// Outputs two changelogs
+// The first changelog contains new commits that were added to the target
+// build starting from the source build number
+//
+// The second changelog contains all commits that are present in the source build
+// but not present in the target build
+func Changelog(authenticator *auth.Authenticator, sourceBuildNum string, targetBuildNum string, host string, repo string) (map[string][]*Commit, map[string][]*Commit, error) {
+	clients := make(map[string]gitilesProto.GitilesClient)
+
+	// Since the manifest file is always in the cos instance, add cos client
+	// so that client knows what URL to use
+	manifestClient, err := client(authenticator, host)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Changelog: error creating client for GoB instance: %s:\n%v", host, err)
+	}
+	sourceRepos, err := mappedManifest(manifestClient, repo, sourceBuildNum)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Changelog: error retrieving mapped manifest for source build number: %s using manifest repository: %s:\n%v",
+			sourceBuildNum, repo, err)
+	}
+	targetRepos, err := mappedManifest(manifestClient, repo, targetBuildNum)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Changelog: error retrieving mapped manifest for target build number: %s using manifest repository: %s:\n%v",
+			targetBuildNum, repo, err)
+	}
+
+	clients[host] = manifestClient
+	err = createClients(clients, authenticator, sourceRepos)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Changelog: error creating source clients:\n%v", err)
+	}
+	err = createClients(clients, authenticator, targetRepos)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Changelog: error creating target clients:\n%v", err)
+	}
+
+	addChan := make(chan additionsResult, 1)
+	missChan := make(chan additionsResult, 1)
+	go additions(clients, sourceRepos, targetRepos, addChan)
+	go additions(clients, targetRepos, sourceRepos, missChan)
+	addRes := <-addChan
+	if addRes.Err != nil {
+		return nil, nil, fmt.Errorf("Changelog: failure when retrieving commit additions:\n%v", err)
+	}
+	missRes := <-missChan
+	if missRes.Err != nil {
+		return nil, nil, fmt.Errorf("Changelog: failure when retrieving missed commits:\n%v", err)
+	}
+
+	return addRes.Additions, missRes.Additions, nil
+}
diff --git a/src/pkg/changelog/changelog_test.go b/src/pkg/changelog/changelog_test.go
new file mode 100644
index 0000000..361750e
--- /dev/null
+++ b/src/pkg/changelog/changelog_test.go
@@ -0,0 +1,273 @@
+// Copyright 2020 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package changelog
+
+import (
+	"context"
+	"testing"
+
+	"go.chromium.org/luci/auth"
+	"go.chromium.org/luci/common/api/gerrit"
+	"go.chromium.org/luci/hardcoded/chromeinfra"
+)
+
+const cosInstance = "cos.googlesource.com"
+const defaultManifestRepo = "cos/manifest-snapshots"
+
+func getAuthenticator() *auth.Authenticator {
+	opts := chromeinfra.DefaultAuthOptions()
+	opts.Scopes = []string{gerrit.OAuthScope, auth.OAuthScopeEmail}
+	return auth.NewAuthenticator(context.Background(), auth.InteractiveLogin, opts)
+}
+
+func commitsMatch(commits []*Commit, expectedCommits []string) bool {
+	if len(commits) != len(expectedCommits) {
+		return false
+	}
+	for i, commit := range commits {
+		if commit == nil {
+			return false
+		}
+		if commit.SHA != expectedCommits[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func mappingInLog(log map[string][]*Commit, check []string) bool {
+	for _, check := range check {
+		if log, ok := log[check]; !ok || len(log) == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func TestChangelog(t *testing.T) {
+	authenticator := getAuthenticator()
+
+	// Test invalid source
+	additions, misses, err := Changelog(authenticator, "15", "15043.0.0", cosInstance, defaultManifestRepo)
+	if additions != nil {
+		t.Errorf("Changelog failed, expected nil additions, got %v", additions)
+	} else if misses != nil {
+		t.Errorf("Changelog failed, expected nil misses, got %v", misses)
+	} else if err == nil {
+		t.Errorf("Changelog failed, expected error, got nil")
+	}
+
+	// Test invalid target
+	additions, misses, err = Changelog(authenticator, "15043.0.0", "abx", cosInstance, defaultManifestRepo)
+	if additions != nil {
+		t.Errorf("Changelog failed, expected nil additions, got %v", additions)
+	} else if misses != nil {
+		t.Errorf("Changelog failed, expected nil misses, got %v", misses)
+	} else if err == nil {
+		t.Errorf("Changelog failed, expected error, got nil")
+	}
+
+	// Test invalid instance
+	additions, misses, err = Changelog(authenticator, "15036.0.0", "15041.0.0", "com", defaultManifestRepo)
+	if additions != nil {
+		t.Errorf("Changelog failed, expected nil additions, got %v", additions)
+	} else if misses != nil {
+		t.Errorf("Changelog failed, expected nil misses, got %v", misses)
+	} else if err == nil {
+		t.Errorf("Changelog failed, expected error, got nil")
+	}
+
+	// Test invalid manifest repo
+	additions, misses, err = Changelog(authenticator, "15036.0.0", "15041.0.0", cosInstance, "cos/not-a-repo")
+	if additions != nil {
+		t.Errorf("Changelog failed, expected nil additions, got %v", additions)
+	} else if misses != nil {
+		t.Errorf("Changelog failed, expected nil misses, got %v", misses)
+	} else if err == nil {
+		t.Errorf("Changelog failed, expected error, got nil")
+	}
+
+	// Test build number higher than latest release
+	additions, misses, err = Changelog(authenticator, "15036.0.0", "99999.0.0", cosInstance, defaultManifestRepo)
+	if additions != nil {
+		t.Errorf("Changelog failed, expected nil additions, got %v", additions)
+	} else if misses != nil {
+		t.Errorf("Changelog failed, expected nil misses, got %v", misses)
+	} else if err == nil {
+		t.Errorf("Changelog failed, expected error, got nil")
+	}
+
+	// Test 1 build number difference with only 1 repo change between them
+	// Ensure that commits are correctly inserted in proper order
+	source := "15050.0.0"
+	target := "15051.0.0"
+	expectedCommits := []string{
+		"6201c49afe667c8fa7796608a4d7162bb3f7f4f4",
+		"a8bcf0feaa0e3c0131a888fcd9d0dcbbe8c3850c",
+		"5e3ef32e062fb227aaa6b47138950557ec91d23e",
+		"654ed08e8a349e7199eb3a80b6d7704a20ff8ec4",
+		"d5c0e74fbb2a50517a1249cbbec4dcee3d049883",
+		"cd226061776dad6c0e35323f407eaa138795f4cc",
+		"4351d0dc5480e941fac96cb0ec898a87171eadda",
+		"cdbcf507749a86acad3e8787ffb3c3356ed76b3a",
+		"4fdd7f397bc09924e91f475d3ed55bb5a302bdaf",
+		"3adae69de78875a8d33061205357388a513ea51d",
+		"5fd85ec937d362984e5108762e8b5e20105a4219",
+		"03b6099c920c1b3cb4cbda2172089e80b4d4be6e",
+		"1febb203aaf99f00e5d9d80d965726458ba8348f",
+		"2de610687308b6ea00d9ac6190d83f0edb2a46b4",
+		"db3083c438442ea6ab34e84404b4602618d2e07b",
+		"13eb9486f2bf43d56ce58695df8461099fd7c314",
+		"12b8a449ef93289674d93f437c19a06530c2c966",
+		"6d9752b0abeeaf7438ab08ea7ff5b0f76c2dacca",
+		"8555ba160a5eee0be464b25a07abc6031dc9159c",
+		"a8c1c3c2971acc03f4246c20b1ddd5bb5376ded3",
+		"762495e014eaa74e3aa4d83caaaa778fcfb968a2",
+		"784782cd8c1d846c17541a3e527ad56857fe2e91",
+		"7c6916858860715db25eaadc2b3ec81865304095",
+		"76cc8bf290a133ee821a8a2b14207150de9a7803",
+		"dc07ec7806f249fdb0b7bda68c687a87b311c952",
+		"f18ad3b35466354d5a0e166008070f54a06759a6",
+		"34f008f664e11b6df2f06735b6db6d6a42804d25",
+		"a24eee7a6b6caed0448365e548e92724069a8448",
+		"64ddf2924656f07bd63269524ed1731a2357b82f",
+		"e40d4ce60313cd28ebf1c376860402f9b3d373cd",
+		"3018e2531a1f0f22c4d053ed0b8a5cc86ad81319",
+		"668cd418350d03e1535c7862ebe93801ace0b1c6",
+		"fabf26e3eab2af24371c48e19062d7c8df34bd9f",
+		"7b38982caecbeb16520b4dd84422ecad0edaf772",
+		"658380877ca2eedc3cd80d3b6daafa24ab96a261",
+		"63dee6c8cd318dfa20cfddf2e72243873e816046",
+		"bc194a3ce16407015da5bc8d46df55231cf4d625",
+		"ff75e90067c7c535116cb5566ebc14451785b36a",
+		"c64b1cc6b930024e77425fd105716ade26d0524c",
+		"d5123111900fd70d85b7acf5809df701da24f1ea",
+		"c617b261c68b52b0abefc0635c1ea03c4cb0cb11",
+		"a2619465e4eca49692d832b593cb205118042bc9",
+		"6f6451dd56a7fad25b2e8b31a053275adb2008a4",
+		"68d5d3901d5c3df44e3be8c3fac0c6b1e90d780c",
+		"308882e4e837f231e3ad0f37fd143cee419d816f",
+		"1cb20f5aa5a82a412d97fad7b9c13c87c9381f14",
+		"f6c0c6f1618676519efd74c8f946e191472b6a4e",
+		"dea6ca48a629e80cc2ffbf203c9cc1855a28a47e",
+		"fa0115b220b3471a1542b3b66463f9ec80c8c7f0",
+		"b815d624f7715ab51379e8a913c280cac1eafde4",
+		"39fe5d201b87e02baedf4da8b02523571c4ccbcc",
+		"58aff81e0829100cc9d3239791573300e2d2398d",
+		"cd570b8e278aca36f166eb84b5003eaee3c03ecc",
+		"50f9936fe8ab106d2716e007a342860c695f7822",
+		"2a1e98d6c3dca9b52bcb7b02c7a242c10c0a0de9",
+		"b9fe6cc174f215d576954e6b2c93bc4de8ba2c34",
+		"f78d275ec9d0c4061f75ae2f97f958657a71ebd1",
+		"315ea4a344e3f8b300e8c3e48fafc21eaee767fe",
+		"1c9392eb35c68ca38a1f0178cd191f07d387f52d",
+		"9cd44834d383b5414bd9bac873e9c620a67eff1d",
+		"e0f3f79316591affedeaf2702a350d3512bd6a69",
+		"148bba54f3762b23a79057825a763c1132bd1d55",
+		"48ce30dd18de40852cea15dccaaa833b4017ae10",
+		"474e61f82f79d9779b0e2c3bc63d920d9f75b5b6",
+		"b93f0e4f3edbe3e64b0128db38ee231a737f06c9",
+		"714065afa108556b6ff43ff312b731c239d6e551",
+		"45a780a84daa27307addd836df94afa2c70dccb6",
+		"0df346778d142f9c6bf221d67bdac96d9d636408",
+		"6ad098080fb6437da98511e56026476fa71cce87",
+		"3f2915159ab1e42b258ee78d2a71f2dc59d51d35",
+		"1d5a9ebc23d1455966963a042bd610fdb38cd705",
+		"e31b072bbc2d83db107d913a3f32d907de119ca2",
+		"6da63745bd4318577ab8937100871e654df04cb3",
+		"d5a54c19f7bf1f8250bc5ac779f80450764e836e",
+		"54c59bdcf9965dbb77a6dd9682f255e21e4821a1",
+		"67b538de711500bfb1ed5d322e916e8cd3f74700",
+		"2814ccbb44a3d19cb4d696705794ced3beb31ef3",
+		"deb92542c03e9096fe37d8833532a50a6bb1df3c",
+		"d2b9b62c2ad5440005b72826bb55a36dfc115ac2",
+		"da9cd84436f716c3c7a6d90e820afb87a9a218b9",
+		"d0937f57cd2904df1af7449f32c75aaadaeac2a2",
+		"65441913baef06967e59158f3848e41dce18b43a",
+		"7cc03e836eba4d13526969b84aaa8dd61d8b6216",
+		"dff08d118cab7f8416b8f171aac91b8ca3f6b44d",
+		"aedb933f853499a0c736deb2d2ab899b607aacee",
+		"aa592bf7b0b7b13eee2b20fa54fd81e11e96cf56",
+		"f495c107eefc879b10fdf2e3a2a0155259210dba",
+		"7e4e0964a1426d46cdbcbccd861cee7a106a9430",
+		"d0ca437a1ed89e2adbd6b2d1bd572b475cd1d8ec",
+		"0dce9e5070718b7ba950f0b6575bb3bbd0e362bc",
+		"ddd73889c36e93c6128a4d791b6d673cd655447e",
+		"04e70ee7abbb702e4939fef98d50b5e6cc018ccd",
+		"86da591dd3d8515ebf4d1eebc68a61092ad13e95",
+		"8676fbad9fa41e0d0f69dafb2b4f8bd4b5a3b3cc",
+		"b8b3a8cc67fcdf58d495489c19e5d3aa23d22563",
+		"7441c2cf859b84f7cedff8946dbd0c3dc7ef956b",
+		"7f3e0778e212c8a22f8262e2819a6aebfca8b879",
+		"a82b808965dbe304e0a95cb9534b09b3b5c0486a",
+		"0388f30783e2454ea9f0c3978f92c797fc0bdf20",
+		"67f6e97cee8a5b33f8e27b4d2426fb009c0ae435",
+		"094bef7b6bd0c034ea19aa3cb9744ca35998ecc8",
+		"ec07a4f7eb15d867e453c8c8991656b361a29882",
+		"0a304d6481d01d774fe97f31c9574c970fdb532f",
+		"3f77b91ad1abb2d2074286635927fa6472eb0a2e",
+		"ca721a37ec8edc8f1b8aeb4c143aa936dc032ac1",
+		"c0b7d2df81ae29869f9d7a1874b741eeec0d5d18",
+		"9bc12bb411f357188d008864f80dfba43210b9d8",
+		"bf0dd3757826b9bc9d7082f5f749ff7615d4bcb3",
+	}
+	additions, misses, err = Changelog(authenticator, source, target, cosInstance, defaultManifestRepo)
+	if err != nil {
+		t.Errorf("Changelog failed, expected no error, got %v", err)
+	} else if len(misses) != 0 {
+		t.Errorf("Changelog failed, expected empty misses list, got %v", misses)
+	} else if len(additions) != 1 {
+		t.Errorf("Changelog failed, expected only 1 repo in additions, got %v", additions)
+	} else if _, ok := additions["cos/overlays/board-overlays"]; !ok {
+		t.Errorf("Changelog failed, expected \"cos/overlays/board-overlays\" in additions, got %v", additions)
+	} else if changes, _ := additions["cos/overlays/board-overlays"]; len(changes) != 108 {
+		t.Errorf("Changelog failed, expected 108 changes for \"cos/overlays/board-overlays\", got %d", len(changes))
+	} else if !commitsMatch(additions["cos/overlays/board-overlays"], expectedCommits) {
+		t.Errorf("Changelog failed, Changelog output does not match expected commits or is not sorted")
+	}
+
+	// Test build numbers further apart from each other with multiple repo differences
+	// Also ensures that misses are correctly populated
+	source = "15020.0.0"
+	target = "15056.0.0"
+	additionRepos := []string{
+		"mirrors/cros/chromiumos/platform/crosutils",
+		"cos/manifest",
+		"mirrors/cros/chromiumos/platform/vboot_reference",
+		"mirrors/cros/chromiumos/platform/dev-util",
+		"mirrors/cros/chromiumos/platform/crostestutils",
+		"mirrors/cros/chromiumos/infra/proto",
+		"mirrors/cros/chromiumos/third_party/toolchain-utils",
+		"mirrors/cros/chromiumos/third_party/coreboot",
+		"cos/overlays/board-overlays",
+		"mirrors/cros/chromiumos/platform2",
+		"mirrors/cros/chromiumos/overlays/eclass-overlay",
+		"mirrors/cros/chromiumos/chromite",
+		"mirrors/cros/chromiumos/third_party/autotest",
+		"mirrors/cros/chromiumos/overlays/chromiumos-overlay",
+		"third_party/kernel",
+		"mirrors/cros/chromium/tools/depot_tools",
+		"mirrors/cros/chromiumos/repohooks",
+		"mirrors/cros/chromiumos/overlays/portage-stable",
+	}
+	additions, misses, err = Changelog(authenticator, source, target, cosInstance, defaultManifestRepo)
+	if err != nil {
+		t.Errorf("Changelog failed, expected no error, got %v", err)
+	} else if _, ok := misses["third_party/kernel"]; len(misses) != 1 && ok {
+		t.Errorf("Changelog failed, expected miss list containing only \"third_party/kernel\", got %v", misses)
+	} else if !mappingInLog(additions, additionRepos) {
+		t.Errorf("Changelog failed, additions repo output does not match expected repos %v", additionRepos)
+	}
+}