Rotate recover_duts log files

This CL limits the number of log files to 10, 5M per file.

TEST=Ran the script locally, excersize the new function.
BUG=chromium:535621

Change-Id: Iefac314a1e5c571880421e8c6f03531c828f194e
Previous-Reviewed-on: https://chromium-review.googlesource.com/302193
(cherry picked from commit fd6d70bda6834ea59ff4e08faefead0c21ffb0bb)
Reviewed-on: https://chromium-review.googlesource.com/302453
Reviewed-by: Fang Deng <fdeng@chromium.org>
Commit-Queue: Fang Deng <fdeng@chromium.org>
Tested-by: Fang Deng <fdeng@chromium.org>
diff --git a/recover_duts/recover_duts.py b/recover_duts/recover_duts.py
index b09a95e..ffaac0f 100755
--- a/recover_duts/recover_duts.py
+++ b/recover_duts/recover_duts.py
@@ -4,38 +4,59 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-# This module runs at system startup on Chromium OS test images. It runs through
-# a set of hooks to keep a DUT from being bricked without manual intervention.
-# Example hook:
-#   Check to see if ethernet is connected. If its not, unload and reload the
-#     ethernet driver.
+"""Recover duts.
+
+This module runs at system startup on Chromium OS test images. It runs through
+a set of hooks to keep a DUT from being bricked without manual intervention.
+Example hook:
+  Check to see if ethernet is connected. If its not, unload and reload the
+    ethernet driver.
+"""
 
 import logging
 import os
 import subprocess
 import time
 
+from logging import handlers
+
 LOGGING_SUBDIR = '/var/log/recover_duts'
-LOG_FILENAME_FORMAT = os.path.join(LOGGING_SUBDIR,
-                                   'recover_duts_log_%Y%m%d_%H%M%S.txt')
+LOG_FILENAME = 'recover_duts.log'
 LOGGING_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
 LONG_REBOOT_DELAY = 300
 SLEEP_DELAY = 600
+LOG_FILE_BACKUP_COUNT = 10
+LOG_FILE_SIZE = 1024 * 5000 # 5000 KB
 
 
+def _setup_logging(log_file):
+  """Setup logging.
+
+  Args:
+    log_file: path to log file.
+  """
+  log_formatter = logging.Formatter(LOGGING_FORMAT)
+  handler = handlers.RotatingFileHandler(
+      filename=log_file, maxBytes=LOG_FILE_SIZE,
+      backupCount=LOG_FILE_BACKUP_COUNT)
+  handler.setFormatter(log_formatter)
+  logger = logging.getLogger()
+  log_level = logging.DEBUG
+  logger.setLevel(log_level)
+  logger.addHandler(handler)
+
 def main():
   if not os.path.isdir(LOGGING_SUBDIR):
     os.makedirs(LOGGING_SUBDIR)
 
-  log_filename = time.strftime(LOG_FILENAME_FORMAT)
-  logging.basicConfig(filename=log_filename, level=logging.DEBUG,
-                      format=LOGGING_FORMAT)
+  log_file = os.path.join(LOGGING_SUBDIR, LOG_FILENAME)
+  _setup_logging(log_file)
   hooks_dir = os.path.join(os.path.dirname(__file__), 'hooks')
 
   # Additional sleep as networking not be up in the case of a long reboot.
   time.sleep(LONG_REBOOT_DELAY)
   try:
-    while(True):
+    while True:
       for script in os.listdir(hooks_dir):
         script = os.path.join(hooks_dir, script)
         if os.path.isfile(script) and script.endswith('.hook'):
@@ -49,8 +70,7 @@
           else:
             logging.warn('Running of %s failed with output:\n%s', script,
                          output)
-      else:
-        time.sleep(SLEEP_DELAY)
+      time.sleep(SLEEP_DELAY)
 
   except Exception as e:
     # Since this is run from an upstart job we want to ensure we log this into