blob: 37b14dcf577a62a20ee3d2746428a2c90d9c14dc [file] [log] [blame]
#! /usr/bin/python
"""Chronological analysis of special tasks on a host.
To analyze all special tasks that ran after/before a given job: job_id -id 123 -cutoff 5
To analyze all special tasks that ran on a host between 4-5 pm on 3/25/2014: host -host 123.123\
-start "2014-03-25 16:00:00" -end "2014-03-25 17:00:00
One can use the script to get host history information, figure out what jobs ran
after/before a failed SERVER_JOB, or just add clarity to jobs running on hosts.
import argparse
import datetime as datetime_base
from datetime import datetime
import logging
import sys
import common
from autotest_lib.client.common_lib import time_utils
from autotest_lib.frontend import setup_django_environment
from autotest_lib.frontend.afe import models
from autotest_lib.server.cros.dynamic_suite import job_status
TASK_LOGS = ('http://cautotest/tko/retrieve_logs.cgi?job=/results/hosts/'
def _parse_args(args):
description=(' job_id -id 123 -cutoff 5 or \n'
' host -name 123.123 '
'-start "2014-03-25 16:26:31" -end "2014-03-25 16:26:31"\n')
if not args:
print ('Too few arguments, execute \n%s\nor try '
'./ --help' % description)
parser = argparse.ArgumentParser(
description='A script to get the special tasks on a host or job.')
subparsers = parser.add_subparsers(help='Get tasks based on a job or host.')
parser_job = subparsers.add_parser('job', help='Per Job analysis mode.')
parser_job.add_argument('-id', help='job_id.')
parser_job.add_argument('-cutoff', default=5, type=int,
help='Hours after the job.')
parser_host = subparsers.add_parser('host', help='Per host analysis mode.')
help='Hostname for which you would like tasks.')
'-start', help='Start time. Eg: 2014-03-25 16:26:31')
'-end', help='End time Eg: 2014-03-25 18:26:31.')
return parser.parse_args(args)
def get_logs_for_tasks(task_ids):
"""Get links to the logs for the given task ids."""
tasks = models.SpecialTask.objects.filter(id__in=task_ids)
task_logs = {}
for task in tasks:
task_dict = {'hostname':,
'taskname': task.task.lower()}
task_logs[] = TASK_LOGS % task_dict
return task_logs
def _tasks_with_filter(**task_filter):
"""Get tasks applying a filter."""
tasks = models.SpecialTask.objects.filter(**task_filter)
task_logs = get_logs_for_tasks([ for task in tasks])
for task in tasks:
task_dict = {'task': task.task, 'id':,
'hqe': task.queue_entry_id,
'job': task.queue_entry.job_id if task.queue_entry else None,
'status': 'Passed' if task.success else 'Failed',
'logs': task_logs[], 'time': task.time_started}
print ('\t%(task)s (%(id)s), for (hqe %(hqe)s, job %(job)s) at '
'%(time)s [%(status)s]: %(logs)s' % task_dict)
def lookup_host(hostname, start, end):
"""Lookup tasks on a host, within the start and end times."""
host__hostname=hostname, time_started__gte=start, time_started__lte=end)
def lookup_job(job_id, cutoff=5, taskname=None, success=None):
"""Lookup tasks on a job, within cutoff of the job's start time."""
hqe = models.HostQueueEntry.objects.filter(job_id=job_id)
if len(hqe) > 1:
logging.error('Support for jobs with multiple hqes not implemented. '
'%s is one such job.', job_id)
hqe = hqe[0]
cutoff = hqe.started_on + datetime_base.timedelta(hours=cutoff)
print '\nThe tasks before the job were:\n'
lookup_host(, hqe.started_on - datetime_base.timedelta(minutes=5),
print ('\nJob %s (%s), started on %s on %s. Getting tasks before %s\n' %
(, job_id,, hqe.started_on, cutoff))
print '\nThe tasks after the job were:\n'
lookup_host(, hqe.started_on, cutoff)
if __name__ == '__main__':
args = _parse_args(sys.argv[1:])
if args.which == 'job':
lookup_job(, args.cutoff)
elif args.which == 'host':
datetime.strptime(args.start, time_utils.TIME_FMT),
datetime.strptime(args.end, time_utils.TIME_FMT))
print 'Unrecognized options. Try --help'