blob: b3acb0eb3ca420a26ba8ec0e6131c9b11426462a [file] [log] [blame]
# pylint: disable-msg=C0111
"""\
Functions to expose over the RPC interface.
For all modify* and delete* functions that ask for an 'id' parameter to
identify the object to operate on, the id may be either
* the database row ID
* the name of the object (label name, hostname, user login, etc.)
* a dictionary containing uniquely identifying field (this option should seldom
be used)
When specifying foreign key fields (i.e. adding hosts to a label, or adding
users to an ACL group), the given value may be either the database row ID or the
name of the object.
All get* functions return lists of dictionaries. Each dictionary represents one
object and maps field names to values.
Some examples:
modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
modify_test('sleeptest', test_type='Client', params=', seconds=60')
delete_acl_group(1) # delete by ID
delete_acl_group('Everyone') # delete by name
acl_group_add_users('Everyone', ['mbligh', 'showard'])
get_jobs(owner='showard', status='Queued')
See doctests/001_rpc_test.txt for (lots) more examples.
"""
__author__ = 'showard@google.com (Steve Howard)'
import datetime
import common
from autotest_lib.client.common_lib import error, priorities
from autotest_lib.frontend.afe import models, model_logic, model_attributes
from autotest_lib.frontend.afe import control_file, rpc_utils
def get_parameterized_autoupdate_image_url(job):
"""Get the parameterized autoupdate image url from a parameterized job."""
known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
name='image')
para_set = job.parameterized_job.parameterizedjobparameter_set
job_test_para = para_set.get(test_parameter=image_parameter)
return job_test_para.parameter_value
# labels
def add_label(name, kernel_config=None, platform=None, only_if_needed=None):
return models.Label.add_object(
name=name, kernel_config=kernel_config, platform=platform,
only_if_needed=only_if_needed).id
def modify_label(id, **data):
models.Label.smart_get(id).update_object(data)
def delete_label(id):
models.Label.smart_get(id).delete()
def label_add_hosts(id, hosts):
host_objs = models.Host.smart_get_bulk(hosts)
label = models.Label.smart_get(id)
if label.platform:
models.Host.check_no_platform(host_objs)
label.host_set.add(*host_objs)
def label_remove_hosts(id, hosts):
host_objs = models.Host.smart_get_bulk(hosts)
models.Label.smart_get(id).host_set.remove(*host_objs)
def get_labels(**filter_data):
"""\
@returns A sequence of nested dictionaries of label information.
"""
return rpc_utils.prepare_rows_as_nested_dicts(
models.Label.query_objects(filter_data),
('atomic_group',))
# atomic groups
def add_atomic_group(name, max_number_of_machines=None, description=None):
return models.AtomicGroup.add_object(
name=name, max_number_of_machines=max_number_of_machines,
description=description).id
def modify_atomic_group(id, **data):
models.AtomicGroup.smart_get(id).update_object(data)
def delete_atomic_group(id):
models.AtomicGroup.smart_get(id).delete()
def atomic_group_add_labels(id, labels):
label_objs = models.Label.smart_get_bulk(labels)
models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
def atomic_group_remove_labels(id, labels):
label_objs = models.Label.smart_get_bulk(labels)
models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
def get_atomic_groups(**filter_data):
return rpc_utils.prepare_for_serialization(
models.AtomicGroup.list_objects(filter_data))
# hosts
def add_host(hostname, status=None, locked=None, protection=None):
return models.Host.add_object(hostname=hostname, status=status,
locked=locked, protection=protection).id
def modify_host(id, **data):
rpc_utils.check_modify_host(data)
host = models.Host.smart_get(id)
rpc_utils.check_modify_host_locking(host, data)
host.update_object(data)
def modify_hosts(host_filter_data, update_data):
"""
@param host_filter_data: Filters out which hosts to modify.
@param update_data: A dictionary with the changes to make to the hosts.
"""
rpc_utils.check_modify_host(update_data)
hosts = models.Host.query_objects(host_filter_data)
# Check all hosts before changing data for exception safety.
for host in hosts:
rpc_utils.check_modify_host_locking(host, update_data)
for host in hosts:
host.update_object(update_data)
def host_add_labels(id, labels):
labels = models.Label.smart_get_bulk(labels)
host = models.Host.smart_get(id)
platforms = [label.name for label in labels if label.platform]
if len(platforms) > 1:
raise model_logic.ValidationError(
{'labels': 'Adding more than one platform label: %s' %
', '.join(platforms)})
if len(platforms) == 1:
models.Host.check_no_platform([host])
host.labels.add(*labels)
def host_remove_labels(id, labels):
labels = models.Label.smart_get_bulk(labels)
models.Host.smart_get(id).labels.remove(*labels)
def set_host_attribute(attribute, value, **host_filter_data):
"""
@param attribute string name of attribute
@param value string, or None to delete an attribute
@param host_filter_data filter data to apply to Hosts to choose hosts to act
upon
"""
assert host_filter_data # disallow accidental actions on all hosts
hosts = models.Host.query_objects(host_filter_data)
models.AclGroup.check_for_acl_violation_hosts(hosts)
for host in hosts:
host.set_or_delete_attribute(attribute, value)
def delete_host(id):
models.Host.smart_get(id).delete()
def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
exclude_atomic_group_hosts=False, valid_only=True, **filter_data):
"""
@param multiple_labels: match hosts in all of the labels given. Should
be a list of label names.
@param exclude_only_if_needed_labels: Exclude hosts with at least one
"only_if_needed" label applied.
@param exclude_atomic_group_hosts: Exclude hosts that have one or more
atomic group labels associated with them.
"""
hosts = rpc_utils.get_host_query(multiple_labels,
exclude_only_if_needed_labels,
exclude_atomic_group_hosts,
valid_only, filter_data)
hosts = list(hosts)
models.Host.objects.populate_relationships(hosts, models.Label,
'label_list')
models.Host.objects.populate_relationships(hosts, models.AclGroup,
'acl_list')
models.Host.objects.populate_relationships(hosts, models.HostAttribute,
'attribute_list')
host_dicts = []
for host_obj in hosts:
host_dict = host_obj.get_object_dict()
host_dict['labels'] = [label.name for label in host_obj.label_list]
host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
find_platform_and_atomic_group(host_obj))
host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
host_dict['attributes'] = dict((attribute.attribute, attribute.value)
for attribute in host_obj.attribute_list)
host_dicts.append(host_dict)
return rpc_utils.prepare_for_serialization(host_dicts)
def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
exclude_atomic_group_hosts=False, valid_only=True,
**filter_data):
"""
Same parameters as get_hosts().
@returns The number of matching hosts.
"""
hosts = rpc_utils.get_host_query(multiple_labels,
exclude_only_if_needed_labels,
exclude_atomic_group_hosts,
valid_only, filter_data)
return hosts.count()
# tests
def add_test(name, test_type, path, author=None, dependencies=None,
experimental=True, run_verify=None, test_class=None,
test_time=None, test_category=None, description=None,
sync_count=1):
return models.Test.add_object(name=name, test_type=test_type, path=path,
author=author, dependencies=dependencies,
experimental=experimental,
run_verify=run_verify, test_time=test_time,
test_category=test_category,
sync_count=sync_count,
test_class=test_class,
description=description).id
def modify_test(id, **data):
models.Test.smart_get(id).update_object(data)
def delete_test(id):
models.Test.smart_get(id).delete()
def get_tests(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Test.list_objects(filter_data))
# profilers
def add_profiler(name, description=None):
return models.Profiler.add_object(name=name, description=description).id
def modify_profiler(id, **data):
models.Profiler.smart_get(id).update_object(data)
def delete_profiler(id):
models.Profiler.smart_get(id).delete()
def get_profilers(**filter_data):
return rpc_utils.prepare_for_serialization(
models.Profiler.list_objects(filter_data))
# users
def add_user(login, access_level=None):
return models.User.add_object(login=login, access_level=access_level).id
def modify_user(id, **data):
models.User.smart_get(id).update_object(data)
def delete_user(id):
models.User.smart_get(id).delete()
def get_users(**filter_data):
return rpc_utils.prepare_for_serialization(
models.User.list_objects(filter_data))
# acl groups
def add_acl_group(name, description=None):
group = models.AclGroup.add_object(name=name, description=description)
group.users.add(models.User.current_user())
return group.id
def modify_acl_group(id, **data):
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
group.update_object(data)
group.add_current_user_if_empty()
def acl_group_add_users(id, users):
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
users = models.User.smart_get_bulk(users)
group.users.add(*users)
def acl_group_remove_users(id, users):
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
users = models.User.smart_get_bulk(users)
group.users.remove(*users)
group.add_current_user_if_empty()
def acl_group_add_hosts(id, hosts):
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
hosts = models.Host.smart_get_bulk(hosts)
group.hosts.add(*hosts)
group.on_host_membership_change()
def acl_group_remove_hosts(id, hosts):
group = models.AclGroup.smart_get(id)
group.check_for_acl_violation_acl_group()
hosts = models.Host.smart_get_bulk(hosts)
group.hosts.remove(*hosts)
group.on_host_membership_change()
def delete_acl_group(id):
models.AclGroup.smart_get(id).delete()
def get_acl_groups(**filter_data):
acl_groups = models.AclGroup.list_objects(filter_data)
for acl_group in acl_groups:
acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
acl_group['users'] = [user.login
for user in acl_group_obj.users.all()]
acl_group['hosts'] = [host.hostname
for host in acl_group_obj.hosts.all()]
return rpc_utils.prepare_for_serialization(acl_groups)
# jobs
def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
client_control_file='', use_container=False,
profile_only=None, upload_kernel_config=False):
"""
Generates a client-side control file to load a kernel and run tests.
@param tests List of tests to run.
@param kernel A list of kernel info dictionaries configuring which kernels
to boot for this job and other options for them
@param label Name of label to grab kernel config from.
@param profilers List of profilers to activate during the job.
@param client_control_file The contents of a client-side control file to
run at the end of all tests. If this is supplied, all tests must be
client side.
TODO: in the future we should support server control files directly
to wrap with a kernel. That'll require changing the parameter
name and adding a boolean to indicate if it is a client or server
control file.
@param use_container unused argument today. TODO: Enable containers
on the host during a client side test.
@param profile_only A boolean that indicates what default profile_only
mode to use in the control file. Passing None will generate a
control file that does not explcitly set the default mode at all.
@param upload_kernel_config: if enabled it will generate server control
file code that uploads the kernel config file to the client and
tells the client of the new (local) path when compiling the kernel;
the tests must be server side tests
@returns a dict with the following keys:
control_file: str, The control file text.
is_server: bool, is the control file a server-side control file?
synch_count: How many machines the job uses per autoserv execution.
synch_count == 1 means the job is asynchronous.
dependencies: A list of the names of labels on which the job depends.
"""
if not tests and not client_control_file:
return dict(control_file='', is_server=False, synch_count=1,
dependencies=[])
cf_info, test_objects, profiler_objects, label = (
rpc_utils.prepare_generate_control_file(tests, kernel, label,
profilers))
cf_info['control_file'] = control_file.generate_control(
tests=test_objects, kernels=kernel, platform=label,
profilers=profiler_objects, is_server=cf_info['is_server'],
client_control_file=client_control_file, profile_only=profile_only,
upload_kernel_config=upload_kernel_config)
return cf_info
def create_parameterized_job(name, priority, test, parameters, kernel=None,
label=None, profilers=(), profiler_parameters=None,
use_container=False, profile_only=None,
upload_kernel_config=False, hosts=(),
meta_hosts=(), one_time_hosts=(),
atomic_group_name=None, synch_count=None,
is_template=False, timeout=None,
timeout_mins=None, max_runtime_mins=None,
run_verify=False, email_list='', dependencies=(),
reboot_before=None, reboot_after=None,
parse_failed_repair=None, hostless=False,
keyvals=None, drone_set=None, run_reset=True):
"""
Creates and enqueues a parameterized job.
Most parameters a combination of the parameters for generate_control_file()
and create_job(), with the exception of:
@param test name or ID of the test to run
@param parameters a map of parameter name ->
tuple of (param value, param type)
@param profiler_parameters a dictionary of parameters for the profilers:
key: profiler name
value: dict of param name -> tuple of
(param value,
param type)
"""
# Save the values of the passed arguments here. What we're going to do with
# them is pass them all to rpc_utils.get_create_job_common_args(), which
# will extract the subset of these arguments that apply for
# rpc_utils.create_job_common(), which we then pass in to that function.
args = locals()
# Set up the parameterized job configs
test_obj = models.Test.smart_get(test)
control_type = test_obj.test_type
try:
label = models.Label.smart_get(label)
except models.Label.DoesNotExist:
label = None
kernel_objs = models.Kernel.create_kernels(kernel)
profiler_objs = [models.Profiler.smart_get(profiler)
for profiler in profilers]
parameterized_job = models.ParameterizedJob.objects.create(
test=test_obj, label=label, use_container=use_container,
profile_only=profile_only,
upload_kernel_config=upload_kernel_config)
parameterized_job.kernels.add(*kernel_objs)
for profiler in profiler_objs:
parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
parameterized_job=parameterized_job,
profiler=profiler)
profiler_params = profiler_parameters.get(profiler.name, {})
for name, (value, param_type) in profiler_params.iteritems():
models.ParameterizedJobProfilerParameter.objects.create(
parameterized_job_profiler=parameterized_profiler,
parameter_name=name,
parameter_value=value,
parameter_type=param_type)
try:
for parameter in test_obj.testparameter_set.all():
if parameter.name in parameters:
param_value, param_type = parameters.pop(parameter.name)
parameterized_job.parameterizedjobparameter_set.create(
test_parameter=parameter, parameter_value=param_value,
parameter_type=param_type)
if parameters:
raise Exception('Extra parameters remain: %r' % parameters)
return rpc_utils.create_job_common(
parameterized_job=parameterized_job.id,
control_type=control_type,
**rpc_utils.get_create_job_common_args(args))
except:
parameterized_job.delete()
raise
def create_job(name, priority, control_file, control_type,
hosts=(), meta_hosts=(), one_time_hosts=(),
atomic_group_name=None, synch_count=None, is_template=False,
timeout=None, timeout_mins=None, max_runtime_mins=None,
run_verify=False, email_list='', dependencies=(),
reboot_before=None, reboot_after=None, parse_failed_repair=None,
hostless=False, keyvals=None, drone_set=None, image=None,
parent_job_id=None, test_retry=0, run_reset=True):
"""\
Create and enqueue a job.
@param name name of this job
@param priority Integer priority of this job. Higher is more important.
@param control_file String contents of the control file.
@param control_type Type of control file, Client or Server.
@param synch_count How many machines the job uses per autoserv execution.
synch_count == 1 means the job is asynchronous. If an atomic group is
given this value is treated as a minimum.
@param is_template If true then create a template job.
@param timeout Hours after this call returns until the job times out.
@param timeout_mins Minutes after this call returns until the job times
out.
@param max_runtime_mins Minutes from job starting time until job times out
@param run_verify Should the host be verified before running the test?
@param email_list String containing emails to mail when the job is done
@param dependencies List of label names on which this job depends
@param reboot_before Never, If dirty, or Always
@param reboot_after Never, If all tests passed, or Always
@param parse_failed_repair if true, results of failed repairs launched by
this job will be parsed as part of the job.
@param hostless if true, create a hostless job
@param keyvals dict of keyvals to associate with the job
@param hosts List of hosts to run job on.
@param meta_hosts List where each entry is a label name, and for each entry
one host will be chosen from that label to run the job on.
@param one_time_hosts List of hosts not in the database to run the job on.
@param atomic_group_name The name of an atomic group to schedule the job on.
@param drone_set The name of the drone set to run this test on.
@param image OS image to install before running job.
@param parent_job_id id of a job considered to be parent of created job.
@param test_retry: Number of times to retry test if the test did not
complete successfully. (optional, default: 0)
@param run_reset: Should the host be reset before running the test?
@returns The created Job id number.
"""
# Force control files to only contain ascii characters.
try:
control_file.encode('ascii')
except UnicodeDecodeError as e:
raise error.ControlFileMalformed(str(e))
if image is None:
return rpc_utils.create_job_common(
**rpc_utils.get_create_job_common_args(locals()))
# When image is supplied use a known parameterized test already in the
# database to pass the OS image path from the front end, through the
# scheduler, and finally to autoserv as the --image parameter.
# The test autoupdate_ParameterizedJob is in afe_autotests and used to
# instantiate a Test object and from there a ParameterizedJob.
known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
known_parameterized_job = models.ParameterizedJob.objects.create(
test=known_test_obj)
# autoupdate_ParameterizedJob has a single parameter, the image parameter,
# stored in the table afe_test_parameters. We retrieve and set this
# instance of the parameter to the OS image path.
image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
name='image')
known_parameterized_job.parameterizedjobparameter_set.create(
test_parameter=image_parameter, parameter_value=image,
parameter_type='string')
# By passing a parameterized_job to create_job_common the job entry in
# the afe_jobs table will have the field parameterized_job_id set.
# The scheduler uses this id in the afe_parameterized_jobs table to
# match this job to our known test, and then with the
# afe_parameterized_job_parameters table to get the actual image path.
return rpc_utils.create_job_common(
parameterized_job=known_parameterized_job.id,
**rpc_utils.get_create_job_common_args(locals()))
def abort_host_queue_entries(**filter_data):
"""\
Abort a set of host queue entries.
"""
query = models.HostQueueEntry.query_objects(filter_data)
# Dont allow aborts on:
# 1. Jobs that have already completed (whether or not they were aborted)
# 2. Jobs that we have already been aborted (but may not have completed)
query = query.filter(complete=False).filter(aborted=False)
models.AclGroup.check_abort_permissions(query)
host_queue_entries = list(query.select_related())
rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
def abort_special_tasks(**filter_data):
"""\
Abort the special task, or tasks, specified in the filter.
"""
query = models.SpecialTask.query_objects(filter_data)
special_tasks = query.filter(is_active=True)
for task in special_tasks:
task.abort()
def _call_special_tasks_on_hosts(task, hosts):
"""\
Schedules a set of hosts for a special task.
@returns A list of hostnames that a special task was created for.
"""
models.AclGroup.check_for_acl_violation_hosts(hosts)
for host in hosts:
models.SpecialTask.schedule_special_task(host, task)
return list(sorted(host.hostname for host in hosts))
def reverify_hosts(**filter_data):
"""\
Schedules a set of hosts for verify.
@returns A list of hostnames that a verify task was created for.
"""
return _call_special_tasks_on_hosts(models.SpecialTask.Task.VERIFY,
models.Host.query_objects(filter_data))
def repair_hosts(**filter_data):
"""\
Schedules a set of hosts for repair.
@returns A list of hostnames that a repair task was created for.
"""
return _call_special_tasks_on_hosts(models.SpecialTask.Task.REPAIR,
models.Host.query_objects(filter_data))
def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
"""\
Extra filter args for get_jobs:
-not_yet_run: Include only jobs that have not yet started running.
-running: Include only jobs that have start running but for which not
all hosts have completed.
-finished: Include only jobs for which all hosts have completed (or
aborted).
At most one of these three fields should be specified.
"""
filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
running,
finished)
job_dicts = []
jobs = list(models.Job.query_objects(filter_data))
models.Job.objects.populate_relationships(jobs, models.Label,
'dependencies')
models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
for job in jobs:
job_dict = job.get_object_dict()
job_dict['dependencies'] = ','.join(label.name
for label in job.dependencies)
job_dict['keyvals'] = dict((keyval.key, keyval.value)
for keyval in job.keyvals)
if job.parameterized_job:
job_dict['image'] = get_parameterized_autoupdate_image_url(job)
job_dicts.append(job_dict)
return rpc_utils.prepare_for_serialization(job_dicts)
def get_num_jobs(not_yet_run=False, running=False, finished=False,
**filter_data):
"""\
See get_jobs() for documentation of extra filter parameters.
"""
filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
running,
finished)
return models.Job.query_count(filter_data)
def get_jobs_summary(**filter_data):
"""\
Like get_jobs(), but adds a 'status_counts' field, which is a dictionary
mapping status strings to the number of hosts currently with that
status, i.e. {'Queued' : 4, 'Running' : 2}.
"""
jobs = get_jobs(**filter_data)
ids = [job['id'] for job in jobs]
all_status_counts = models.Job.objects.get_status_counts(ids)
for job in jobs:
job['status_counts'] = all_status_counts[job['id']]
return rpc_utils.prepare_for_serialization(jobs)
def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
"""\
Retrieves all the information needed to clone a job.
"""
job = models.Job.objects.get(id=id)
job_info = rpc_utils.get_job_info(job,
preserve_metahosts,
queue_entry_filter_data)
host_dicts = []
for host in job_info['hosts']:
host_dict = get_hosts(id=host.id)[0]
other_labels = host_dict['labels']
if host_dict['platform']:
other_labels.remove(host_dict['platform'])
host_dict['other_labels'] = ', '.join(other_labels)
host_dicts.append(host_dict)
for host in job_info['one_time_hosts']:
host_dict = dict(hostname=host.hostname,
id=host.id,
platform='(one-time host)',
locked_text='')
host_dicts.append(host_dict)
# convert keys from Label objects to strings (names of labels)
meta_host_counts = dict((meta_host.name, count) for meta_host, count
in job_info['meta_host_counts'].iteritems())
info = dict(job=job.get_object_dict(),
meta_host_counts=meta_host_counts,
hosts=host_dicts)
info['job']['dependencies'] = job_info['dependencies']
if job_info['atomic_group']:
info['atomic_group_name'] = (job_info['atomic_group']).name
else:
info['atomic_group_name'] = None
info['hostless'] = job_info['hostless']
info['drone_set'] = job.drone_set and job.drone_set.name
if job.parameterized_job:
info['job']['image'] = get_parameterized_autoupdate_image_url(job)
return rpc_utils.prepare_for_serialization(info)
# host queue entries
def get_host_queue_entries(**filter_data):
"""\
@returns A sequence of nested dictionaries of host and job information.
"""
return rpc_utils.prepare_rows_as_nested_dicts(
models.HostQueueEntry.query_objects(filter_data),
('host', 'atomic_group', 'job'))
def get_num_host_queue_entries(**filter_data):
"""\
Get the number of host queue entries associated with this job.
"""
return models.HostQueueEntry.query_count(filter_data)
def get_hqe_percentage_complete(**filter_data):
"""
Computes the fraction of host queue entries matching the given filter data
that are complete.
"""
query = models.HostQueueEntry.query_objects(filter_data)
complete_count = query.filter(complete=True).count()
total_count = query.count()
if total_count == 0:
return 1
return float(complete_count) / total_count
# special tasks
def get_special_tasks(**filter_data):
return rpc_utils.prepare_rows_as_nested_dicts(
models.SpecialTask.query_objects(filter_data),
('host', 'queue_entry'))
# support for host detail view
def get_host_queue_entries_and_special_tasks(hostname, query_start=None,
query_limit=None):
"""
@returns an interleaved list of HostQueueEntries and SpecialTasks,
in approximate run order. each dict contains keys for type, host,
job, status, started_on, execution_path, and ID.
"""
total_limit = None
if query_limit is not None:
total_limit = query_start + query_limit
filter_data = {'host__hostname': hostname,
'query_limit': total_limit,
'sort_by': ['-id']}
queue_entries = list(models.HostQueueEntry.query_objects(filter_data))
special_tasks = list(models.SpecialTask.query_objects(filter_data))
interleaved_entries = rpc_utils.interleave_entries(queue_entries,
special_tasks)
if query_start is not None:
interleaved_entries = interleaved_entries[query_start:]
if query_limit is not None:
interleaved_entries = interleaved_entries[:query_limit]
return rpc_utils.prepare_for_serialization(interleaved_entries)
def get_num_host_queue_entries_and_special_tasks(hostname):
filter_data = {'host__hostname': hostname}
return (models.HostQueueEntry.query_count(filter_data)
+ models.SpecialTask.query_count(filter_data))
# recurring run
def get_recurring(**filter_data):
return rpc_utils.prepare_rows_as_nested_dicts(
models.RecurringRun.query_objects(filter_data),
('job', 'owner'))
def get_num_recurring(**filter_data):
return models.RecurringRun.query_count(filter_data)
def delete_recurring_runs(**filter_data):
to_delete = models.RecurringRun.query_objects(filter_data)
to_delete.delete()
def create_recurring_run(job_id, start_date, loop_period, loop_count):
owner = models.User.current_user().login
job = models.Job.objects.get(id=job_id)
return job.create_recurring_job(start_date=start_date,
loop_period=loop_period,
loop_count=loop_count,
owner=owner)
# other
def echo(data=""):
"""\
Returns a passed in string. For doing a basic test to see if RPC calls
can successfully be made.
"""
return data
def get_motd():
"""\
Returns the message of the day as a string.
"""
return rpc_utils.get_motd()
def get_static_data():
"""\
Returns a dictionary containing a bunch of data that shouldn't change
often and is otherwise inaccessible. This includes:
priorities: List of job priority choices.
default_priority: Default priority value for new jobs.
users: Sorted list of all users.
labels: Sorted list of all labels.
atomic_groups: Sorted list of all atomic groups.
tests: Sorted list of all tests.
profilers: Sorted list of all profilers.
current_user: Logged-in username.
host_statuses: Sorted list of possible Host statuses.
job_statuses: Sorted list of possible HostQueueEntry statuses.
job_timeout_default: The default job timeout length in minutes.
parse_failed_repair_default: Default value for the parse_failed_repair job
option.
reboot_before_options: A list of valid RebootBefore string enums.
reboot_after_options: A list of valid RebootAfter string enums.
motd: Server's message of the day.
status_dictionary: A mapping from one word job status names to a more
informative description.
"""
job_fields = models.Job.get_field_dict()
default_drone_set_name = models.DroneSet.default_drone_set_name()
drone_sets = ([default_drone_set_name] +
sorted(drone_set.name for drone_set in
models.DroneSet.objects.exclude(
name=default_drone_set_name)))
result = {}
result['priorities'] = priorities.Priority.choices()
default_priority = priorities.Priority.DEFAULT
result['default_priority'] = 'Default'
result['max_schedulable_priority'] = priorities.Priority.DEFAULT
result['users'] = get_users(sort_by=['login'])
result['labels'] = get_labels(sort_by=['-platform', 'name'])
result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
result['tests'] = get_tests(sort_by=['name'])
result['profilers'] = get_profilers(sort_by=['name'])
result['current_user'] = rpc_utils.prepare_for_serialization(
models.User.current_user().get_object_dict())
result['host_statuses'] = sorted(models.Host.Status.names)
result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
result['job_max_runtime_mins_default'] = (
models.Job.DEFAULT_MAX_RUNTIME_MINS)
result['parse_failed_repair_default'] = bool(
models.Job.DEFAULT_PARSE_FAILED_REPAIR)
result['reboot_before_options'] = model_attributes.RebootBefore.names
result['reboot_after_options'] = model_attributes.RebootAfter.names
result['motd'] = rpc_utils.get_motd()
result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
result['drone_sets'] = drone_sets
result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
result['status_dictionary'] = {"Aborted": "Aborted",
"Verifying": "Verifying Host",
"Provisioning": "Provisioning Host",
"Pending": "Waiting on other hosts",
"Running": "Running autoserv",
"Completed": "Autoserv completed",
"Failed": "Failed to complete",
"Queued": "Queued",
"Starting": "Next in host's queue",
"Stopped": "Other host(s) failed verify",
"Parsing": "Awaiting parse of final results",
"Gathering": "Gathering log files",
"Template": "Template job for recurring run",
"Waiting": "Waiting for scheduler action",
"Archiving": "Archiving results",
"Resetting": "Resetting hosts"}
return result
def get_server_time():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")