blob: 0538d5f05c76c52079a805f704fefe4f14103f01 [file] [log] [blame]
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcs
import (
"bytes"
"context"
"fmt"
"os"
"path"
"path/filepath"
"sort"
"testing"
"time"
"cos.googlesource.com/cos/tools.git/src/pkg/fakes"
"github.com/google/go-cmp/cmp"
)
// TestListGCSBucket is testing the functionality to lists all the objectNames in gcsBucket with prefix.
func TestListGCSBucket(t *testing.T) {
fakeGCS := fakes.GCSForTest(t)
fakeBucket := "cos-tools"
fakeGCSClient := fakeGCS.Client
ctx := context.Background()
var fakeData = []struct {
artifactPath string
artifactContent string
}{
{
"10000.00.00/lakitu/gpu_535_version",
"535.1.1",
},
{
"10000.00.00/lakitu/gpu_525_version",
"525.1.1",
},
{
"10000.00.00/lakitu/cpu_410_version",
"empty",
},
{
"10000.00.00/lakitu/empty_file",
"",
},
}
for _, data := range fakeData {
fakeGCS.Objects[path.Join("/", fakeBucket, data.artifactPath)] = []byte(data.artifactContent)
}
defer fakeGCS.Close()
var testCases = []struct {
prefix string
expected []string
}{
{
"10000",
[]string{"10000.00.00/lakitu/gpu_535_version", "10000.00.00/lakitu/gpu_525_version", "10000.00.00/lakitu/cpu_410_version", "10000.00.00/lakitu/empty_file"},
},
{
"10000.00.00/lakitu/cpu_",
[]string{"10000.00.00/lakitu/cpu_410_version"},
},
{
"10000.00.00/lakitu/empty_file",
[]string{"10000.00.00/lakitu/empty_file"},
},
{
"999",
nil,
},
}
for index, tc := range testCases {
t.Run(fmt.Sprintf("Test %d: List artifacts with prefix:%s.", index, tc.prefix), func(t *testing.T) {
actualResult, err := ListGCSBucket(ctx, fakeGCSClient, fakeBucket, tc.prefix)
if err != nil {
t.Fatalf("TestListArtifacts Failed: fail to list the artifact with prefix: %s from the fake GCS client in bucket: %s with error: %v", tc.prefix, fakeBucket, err)
}
sort.Strings(actualResult)
sort.Strings(tc.expected)
if !cmp.Equal(actualResult, tc.expected) {
t.Errorf("TestListArtifacts Failed: listing the artifact with prefix: %s from the fake GCS client in bucket: %s, the expected result is: %s, but the actual result is: %s.", tc.prefix, fakeBucket, tc.expected, actualResult)
}
})
}
}
func TestUploadDownloadDelete(t *testing.T) {
var err error
fakeGCS := fakes.GCSForTest(t)
ctx := context.Background()
testData := []struct {
path string
data string
}{
{
"a/b.txt",
"abc",
},
{
"a/c/d.txt",
"def",
},
{
"a/c/e.txt",
"ghi",
},
}
src := t.TempDir()
for _, data := range testData {
srcDir := path.Join(src, filepath.Dir(data.path))
err = os.MkdirAll(srcDir, 0777)
if err != nil {
t.Fatalf("could not make dir %s: %v", srcDir, err)
}
srcPath := path.Join(src, data.path)
err = os.WriteFile(srcPath, []byte(data.data), 0666)
if err != nil {
t.Fatalf("could not write data to file %s: %v", srcPath, err)
}
}
bucketPrefixes := []string{"tmp/test/prefix", "tmp/test/prefix/"}
for _, prefix := range bucketPrefixes {
dst := t.TempDir()
bucket := NewGCSBucket(fakeGCS.Client, "test", prefix)
defer func() {
err := bucket.DeleteDir(ctx, src)
if err != nil {
t.Fatalf("failed to clean up %s; you will need to clean it up manually", bucket.URI(src))
}
}()
err = bucket.UploadDir(ctx, src, src)
if err != nil {
t.Fatalf("could not upload path (%v)", err)
}
err = bucket.DownloadDir(ctx, src, dst)
if err != nil {
t.Fatalf("could not download dir (%v)", err)
}
for _, data := range testData {
dstPath := path.Join(dst, data.path)
contents, err := os.ReadFile(dstPath)
if err != nil {
t.Fatalf("could not read contents for %s: %v", dstPath, err)
}
if !bytes.Equal(contents, []byte(data.data)) {
t.Fatalf("wrong contents for %s (expected %s, got %s)", dstPath, contents, data.data)
}
}
}
}
func TestListDir(t *testing.T) {
var err error
fakeGCS := fakes.GCSForTest(t)
bucket := NewGCSBucket(fakeGCS.Client, "test", "tmp/test/prefix")
ctx := context.Background()
src := t.TempDir()
testPaths := []string{
"a/b.txt",
"a/c/d.txt",
"a/c/e.txt",
"f/g.txt",
}
for _, p := range testPaths {
srcDir := path.Join(src, filepath.Dir(p))
err = os.MkdirAll(srcDir, 0777)
if err != nil {
t.Fatalf("could not make dir %s: %v", srcDir, err)
}
srcPath := path.Join(src, p)
err = os.WriteFile(srcPath, []byte("abc"), 0666)
if err != nil {
t.Fatalf("could not write data to file %s: %v", srcPath, err)
}
}
defer func() {
err := bucket.DeleteDir(ctx, src)
if err != nil {
t.Fatalf("failed to clean up %s; you will need to clean it up manually", bucket.URI(src))
}
}()
err = bucket.UploadDir(ctx, src, src)
if err != nil {
t.Fatalf("could not upload path (%v)", err)
}
entries, err := bucket.ListDir(ctx, path.Join(src, "a"))
if err != nil {
t.Fatalf("could not list dir (%v)", err)
}
sort.Strings(entries)
expected := []string{path.Join(src, "a/b.txt"), path.Join(src, "a/c") + "/"}
sort.Strings(expected)
fmt.Printf("entries: %v\n", entries)
fmt.Printf("expected: %v\n", expected)
if diff := cmp.Diff(expected, entries); diff != "" {
t.Errorf("listed entries did not match expected;\ndiff: %s", diff)
}
}
func TestDownloadParallel(t *testing.T) {
var err error
fakeGCS := fakes.GCSForTest(t)
fakeGCS.Delay = time.Millisecond * 100
ctx := context.Background()
testData := []struct {
path string
data string
}{
{
"a/b.txt",
"abc",
},
{
"a/c/d.txt",
"def",
},
{
"a/c/e.txt",
"ghi",
},
{
"d.txt",
"jkl",
},
{
"e.txt",
"mno",
},
}
src := t.TempDir()
for _, data := range testData {
srcDir := path.Join(src, filepath.Dir(data.path))
err = os.MkdirAll(srcDir, 0777)
if err != nil {
t.Fatalf("could not make dir %s: %v", srcDir, err)
}
srcPath := path.Join(src, data.path)
err = os.WriteFile(srcPath, []byte(data.data), 0666)
if err != nil {
t.Fatalf("could not write data to file %s: %v", srcPath, err)
}
}
workers := []int{-1, 0, 1, 3, 5, 10}
for _, workers := range workers {
dst := t.TempDir()
bucket := NewGCSBucket(fakeGCS.Client, "test", "test-bucket")
defer func() {
err := bucket.DeleteDir(ctx, src)
if err != nil {
t.Fatalf("failed to clean up %s; you will need to clean it up manually", bucket.URI(src))
}
}()
err = bucket.UploadDir(ctx, src, src)
if err != nil {
t.Fatalf("could not upload path (%v)", err)
}
startTime := time.Now()
err = bucket.DownloadDirParallel(ctx, src, dst, workers)
if err != nil {
t.Fatalf("could not download dir (%v)", err)
}
// The time to download should be inversely proportional to the number of workers, with some overhead.
duration := time.Since(startTime)
maxDuration := fakeGCS.Delay
if workers > 0 && workers < len(testData) {
maxDuration = time.Duration(float64(fakeGCS.Delay) * float64(len(testData)) / float64(workers))
}
// Try to account for the overhead of a download.
maxDuration += 50 * time.Millisecond
if duration > maxDuration {
t.Fatalf(
"parallel download with %v workers took too long; expected max duration of %v, got %v",
workers, maxDuration, duration,
)
}
for _, data := range testData {
dstPath := path.Join(dst, data.path)
contents, err := os.ReadFile(dstPath)
if err != nil {
t.Fatalf("could not read contents for %s: %v", dstPath, err)
}
if !bytes.Equal(contents, []byte(data.data)) {
t.Fatalf("wrong contents for %s (expected %s, got %s)", dstPath, contents, data.data)
}
}
}
}