Add tests to gtest-parallel. (#31)

* Add tests to gtest-parallel.
diff --git a/gtest_parallel.py b/gtest_parallel.py
index f6afa36..c2bc6ac 100755
--- a/gtest_parallel.py
+++ b/gtest_parallel.py
@@ -177,11 +177,12 @@
   Logger, TestResults and TestTimes classes, and in case of failure, retries the
   test as specified by the --retry_failed flag.
   """
-  def __init__(self, times, logger, test_results, times_to_retry,
+  def __init__(self, times, logger, test_results, task_factory, times_to_retry,
                initial_execution_number):
     self.times = times
     self.logger = logger
     self.test_results = test_results
+    self.task_factory = task_factory
     self.times_to_retry = times_to_retry
     self.initial_execution_number = initial_execution_number
 
@@ -233,8 +234,9 @@
         execution_number = self.__get_next_execution_number(task.test_id)
         # We need create a new Task instance. Each task represents a single test
         # execution, with its own runtime, exit code and log file.
-        task = Task(task.test_binary, task.test_name, task.test_command,
-                    execution_number, task.last_execution_time, task.output_dir)
+        task = self.task_factory(task.test_binary, task.test_name,
+                                 task.test_command, execution_number,
+                                 task.last_execution_time, task.output_dir)
 
     with self.lock:
       if task.exit_code != 0:
@@ -332,11 +334,6 @@
     json.dump(self.test_results, self.json_dump_file)
     self.json_dump_file.close()
 
-class DummyTimer(object):
-  def start(self):
-    pass
-  def cancel(self):
-    pass
 
 # Record of test runtimes. Has built-in locking.
 class TestTimes(object):
@@ -464,13 +461,15 @@
     return t
 
   try:
-    timeout.start()
+    if timeout:
+      timeout.start()
     worker_fn = WorkerFn(tasks)
     workers = [start_daemon(worker_fn) for _ in range(pool_size)]
     for worker in workers:
       worker.join()
   finally:
-    timeout.cancel()
+    if timeout:
+      timeout.cancel()
 
 
 def main():
@@ -552,8 +551,9 @@
     if e.errno != errno.EEXIST or not os.path.isdir(options.output_dir):
       raise e
 
-  timeout = (DummyTimer() if options.timeout is None
-             else threading.Timer(options.timeout, sigint_handler.interrupt))
+  timeout = None
+  if options.timeout is not None:
+    timeout = threading.Timer(options.timeout, sigint_handler.interrupt)
 
   test_results = None
   if options.dump_json_test_results is not None:
@@ -563,8 +563,8 @@
   times = TestTimes(save_file)
   logger = FilterFormat(options.output_dir)
 
-  task_manager = TaskManager(times, logger, test_results, options.retry_failed,
-                             options.repeat + 1)
+  task_manager = TaskManager(times, logger, test_results, Task,
+                             options.retry_failed, options.repeat + 1)
 
   tasks = find_tests(binaries, additional_args, options, times)
   logger.log_tasks(len(tasks))
diff --git a/gtest_parallel_tests.py b/gtest_parallel_tests.py
new file mode 100644
index 0000000..60bcf71
--- /dev/null
+++ b/gtest_parallel_tests.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python2
+# Copyright 2017 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import collections
+import gtest_parallel
+import unittest
+
+
+class LoggerMock(object):
+  def __init__(self):
+    self.runtimes = collections.defaultdict(list)
+    self.exit_codes = collections.defaultdict(list)
+    self.last_execution_times = collections.defaultdict(list)
+    self.execution_numbers = collections.defaultdict(list)
+
+  def log_exit(self, task):
+    self.runtimes[task.test_id].append(task.runtime_ms)
+    self.exit_codes[task.test_id].append(task.exit_code)
+    self.last_execution_times[task.test_id].append(task.last_execution_time)
+    self.execution_numbers[task.test_id].append(task.execution_number)
+
+
+class TimesMock(object):
+  def __init__(self):
+    self.last_execution_times = collections.defaultdict(list)
+
+  def record_test_time(self, test_binary, test_name, last_execution_time):
+    test_id = (test_binary, test_name)
+    self.last_execution_times[test_id].append(last_execution_time)
+
+
+class TestResultsMock(object):
+  def __init__(self):
+    self.results = []
+
+  def log(self, test_name, runtime_ms, actual_result):
+    self.results.append((test_name, runtime_ms, actual_result))
+
+
+class TaskMockFactory(object):
+  def __init__(self, test_data):
+    self.data = test_data
+    self.passed = []
+    self.failed = []
+
+  def get_task(self, test_id, execution_number=0):
+    task = TaskMock(test_id, execution_number, self.data[test_id])
+    if task.exit_code == 0:
+      self.passed.append(task)
+    else:
+      self.failed.append(task)
+    return task
+
+  def __call__(self, test_binary, test_name, test_command, execution_number,
+               last_execution_time, output_dir):
+    return self.get_task((test_binary, test_name), execution_number)
+
+
+class TaskMock(object):
+  def __init__(self, test_id, execution_number, test_data):
+    self.test_id = test_id
+    self.execution_number = execution_number
+
+    self.runtime_ms = test_data['runtime_ms'][execution_number]
+    self.exit_code = test_data['exit_code'][execution_number]
+    self.last_execution_time = (
+        test_data['last_execution_time'][execution_number])
+    self.test_command = None
+    self.output_dir = None
+
+    self.test_binary = test_id[0]
+    self.test_name = test_id[1]
+    self.task_id = (test_id[0], test_id[1], execution_number)
+
+  def run(self):
+    pass
+
+
+class TestTaskManager(unittest.TestCase):
+  def setUp(self):
+    self.times = TimesMock()
+    self.logger = LoggerMock()
+    self.test_results = TestResultsMock()
+
+    self.test_data = [
+        # Passing task
+        (('fake_binary', 'Fake.PassingTest'), {
+            'runtime_ms': [10],
+            'exit_code': [0],
+            'last_execution_time': [10],
+        }),
+        # Fails once, then succeeds
+        (('another_binary', 'Fake.Test.FailOnce'), {
+            'runtime_ms': [21, 22],
+            'exit_code': [3, 0],
+            'last_execution_time': [None, 22],
+        }),
+        # Fails twice, then succeeds
+        (('yet_another_binary', 'Fake.Test.FailTwice'), {
+            'runtime_ms': [23, 25, 24],
+            'exit_code': [2, 2, 0],
+            'last_execution_time': [None, None, 24],
+        }),
+        # Failing task
+        (('fake_binary', 'Fake.FailingTest'), {
+            'runtime_ms': [20, 30, 40],
+            'exit_code': [1, 1, 1],
+            'last_execution_time': [None, None, None],
+        })
+    ]
+
+  def test_run_task_basic(self):
+    repeat = 1
+    retry_failed = 0
+
+    task_mock_factory = TaskMockFactory(dict(self.test_data))
+    task_manager = gtest_parallel.TaskManager(
+        self.times, self.logger, self.test_results,
+        task_mock_factory, retry_failed, repeat)
+
+    for test_id, expected in self.test_data:
+      task = task_mock_factory.get_task(test_id)
+      task_manager.run_task(task)
+
+    self.assertEqual(len(task_manager.started), 0)
+    self.assertListEqual(
+        sorted(task.task_id for task in task_manager.passed),
+        sorted(task.task_id for task in task_mock_factory.passed))
+    self.assertListEqual(
+        sorted(task.task_id for task in task_manager.failed),
+        sorted(task.task_id for task in task_mock_factory.failed))
+
+    self.assertEqual(task_manager.global_exit_code, 1)
+
+
+if __name__ == '__main__':
+  unittest.main()