Пример #1
0
def get_hosts(multiple_labels=[], exclude_only_if_needed_labels=False,
              **filter_data):
    """\
    multiple_labels: match hosts in all of the labels given.  Should be a
    list of label names.
    exclude_only_if_needed_labels: exclude hosts with at least one
    "only_if_needed" label applied.
    """
    hosts = rpc_utils.get_host_query(multiple_labels,
                                     exclude_only_if_needed_labels,
                                     filter_data)
    hosts = list(hosts)
    models.Host.objects.populate_relationships(hosts, models.Label,
                                               'label_list')
    models.Host.objects.populate_relationships(hosts, models.AclGroup,
                                               'acl_list')
    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
                                               'attribute_list')
    host_dicts = []
    for host_obj in hosts:
        host_dict = host_obj.get_object_dict()
        host_dict['labels'] = [label.name for label in host_obj.label_list]
        host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
                find_platform_and_atomic_group(host_obj))
        host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
        host_dict['attributes'] = dict((attribute.attribute, attribute.value)
                                       for attribute in host_obj.attribute_list)
        host_dicts.append(host_dict)
    return rpc_utils.prepare_for_serialization(host_dicts)
Пример #2
0
def get_detailed_test_views(**filter_data):
    test_views = models.TestView.list_objects(filter_data)

    tests_by_id = models.Test.objects.in_bulk([test_view['test_idx']
                                               for test_view in test_views])
    tests = tests_by_id.values()
    models.Test.objects.populate_relationships(tests, models.TestAttribute,
                                               'attributes')
    models.Test.objects.populate_relationships(tests, models.IterationAttribute,
                                               'iteration_attributes')
    models.Test.objects.populate_relationships(tests, models.IterationResult,
                                               'iteration_results')
    models.Test.objects.populate_relationships(tests, models.TestLabel,
                                               'labels')

    jobs_by_id = models.Job.objects.in_bulk([test_view['job_idx']
                                             for test_view in test_views])
    jobs = jobs_by_id.values()
    models.Job.objects.populate_relationships(jobs, models.JobKeyval,
                                              'keyvals')

    for test_view in test_views:
        test = tests_by_id[test_view['test_idx']]
        test_view['attributes'] = _attributes_to_dict(test.attributes)
        test_view['iterations'] = _format_iteration_keyvals(test)
        test_view['labels'] = [label.name for label in test.labels]

        job = jobs_by_id[test_view['job_idx']]
        test_view['job_keyvals'] = _job_keyvals_to_dict(job.keyvals)

    return rpc_utils.prepare_for_serialization(test_views)
Пример #3
0
def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
    """\
    Extra filter args for get_jobs:
    -not_yet_run: Include only jobs that have not yet started running.
    -running: Include only jobs that have start running but for which not
    all hosts have completed.
    -finished: Include only jobs for which all hosts have completed (or
    aborted).
    At most one of these three fields should be specified.
    """
    filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
                                                            running,
                                                            finished)
    job_dicts = []
    jobs = list(models.Job.query_objects(filter_data))
    models.Job.objects.populate_relationships(jobs, models.Label,
                                              'dependencies')
    models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
    for job in jobs:
        job_dict = job.get_object_dict()
        job_dict['dependencies'] = ','.join(label.name
                                            for label in job.dependencies)
        job_dict['keyvals'] = dict((keyval.key, keyval.value)
                                   for keyval in job.keyvals)
        job_dicts.append(job_dict)
    return rpc_utils.prepare_for_serialization(job_dicts)
Пример #4
0
def get_acl_groups(**filter_data):
    acl_groups = models.AclGroup.list_objects(filter_data)
    for acl_group in acl_groups:
        acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
        acl_group['users'] = [user.login
                              for user in acl_group_obj.users.all()]
        acl_group['hosts'] = [host.hostname
                              for host in acl_group_obj.hosts.all()]
    return rpc_utils.prepare_for_serialization(acl_groups)
Пример #5
0
def get_jobs_summary(**filter_data):
    """\
    Like get_jobs(), but adds a 'status_counts' field, which is a dictionary
    mapping status strings to the number of hosts currently with that
    status, i.e. {'Queued' : 4, 'Running' : 2}.
    """
    jobs = get_jobs(**filter_data)
    ids = [job['id'] for job in jobs]
    all_status_counts = models.Job.objects.get_status_counts(ids)
    for job in jobs:
        job['status_counts'] = all_status_counts[job['id']]
    return rpc_utils.prepare_for_serialization(jobs)
Пример #6
0
def get_hosts(plan_id):
    """
    Gets the hostnames of all the hosts in this test plan.

    Resolves host labels in the plan.
    """
    plan = models.Plan.smart_get(plan_id)

    hosts = set(plan.hosts.all().values_list('hostname', flat=True))
    for label in plan.host_labels.all():
        hosts.update(label.host_set.all().values_list('hostname', flat=True))

    return afe_rpc_utils.prepare_for_serialization(hosts)
Пример #7
0
def get_group_counts(group_by, header_groups=[], fixed_headers={},
                     machine_label_headers={}, extra_select_fields={},
                     **filter_data):
    """
    Queries against TestView grouping by the specified fields and computings
    counts for each group.
    * group_by should be a list of field names.
    * extra_select_fields can be used to specify additional fields to select
      (usually for aggregate functions).
    * header_groups can be used to get lists of unique combinations of group
      fields.  It should be a list of tuples of fields from group_by.  It's
      primarily for use by the spreadsheet view.
    * fixed_headers can map header fields to lists of values.  the header will
      guaranteed to return exactly those value.  this does not work together
      with header_groups.
    * machine_label_headers can specify special headers to be constructed from
      machine labels.  It should map arbitrary names to lists of machine labels.
      a field will be created with the given name containing a comma-separated
      list indicating which of the given machine labels are on each test.  this
      field can then be grouped on.

    Returns a dictionary with two keys:
    * header_values contains a list of lists, one for each header group in
      header_groups.  Each list contains all the values for the corresponding
      header group as tuples.
    * groups contains a list of dicts, one for each row.  Each dict contains
      keys for each of the group_by fields, plus a 'group_count' key for the
      total count in the group, plus keys for each of the extra_select_fields.
      The keys for the extra_select_fields are determined by the "AS" alias of
      the field.
    """
    extra_select_fields = dict(extra_select_fields)
    query = models.TestView.objects.get_query_set_with_joins(
        filter_data, include_host_labels=bool(machine_label_headers))
    query = models.TestView.query_objects(filter_data, initial_query=query)
    count_alias, count_sql = models.TestView.objects.get_count_sql(query)
    extra_select_fields[count_alias] = count_sql
    if 'test_idx' not in group_by:
        extra_select_fields['test_idx'] = 'test_idx'
    tko_rpc_utils.add_machine_label_headers(machine_label_headers,
                                            extra_select_fields)

    group_processor = tko_rpc_utils.GroupDataProcessor(query, group_by,
                                                       header_groups,
                                                       fixed_headers,
                                                       extra_select_fields)
    group_processor.process_group_dicts()
    return rpc_utils.prepare_for_serialization(group_processor.get_info_dict())
Пример #8
0
def get_latest_tests(group_by, header_groups=[], fixed_headers={},
                     extra_info=[], **filter_data):
    """
    Similar to get_status_counts, but return only the latest test result per
    group.  It still returns the same information (i.e. with pass count etc.)
    for compatibility.  It includes an additional field "test_idx" with each
    group.
    @param extra_info a list containing the field names that should be returned
                      with each cell. The fields are returned in the extra_info
                      field of the return dictionary.
    """
    # find latest test per group
    initial_query = models.TestView.objects.get_query_set_with_joins(
            filter_data)
    query = models.TestView.query_objects(filter_data,
                                          initial_query=initial_query,
                                          apply_presentation=False)
    query = query.exclude(status__in=tko_rpc_utils._INVALID_STATUSES)
    query = query.extra(
            select={'latest_test_idx' : 'MAX(%s)' %
                    models.TestView.objects.get_key_on_this_table('test_idx')})
    query = models.TestView.apply_presentation(query, filter_data)

    group_processor = tko_rpc_utils.GroupDataProcessor(query, group_by,
                                                       header_groups,
                                                       fixed_headers)
    group_processor.process_group_dicts()
    info = group_processor.get_info_dict()

    # fetch full info for these tests so we can access their statuses
    all_test_ids = [group['latest_test_idx'] for group in info['groups']]
    test_views = initial_query.in_bulk(all_test_ids)

    for group_dict in info['groups']:
        test_idx = group_dict.pop('latest_test_idx')
        group_dict['test_idx'] = test_idx
        test_view = test_views[test_idx]

        tko_rpc_utils.add_status_counts(group_dict, test_view.status)
        group_dict['extra_info'] = []
        for field in extra_info:
            group_dict['extra_info'].append(getattr(test_view, field))

    return rpc_utils.prepare_for_serialization(info)
Пример #9
0
def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
    """\
    Retrieves all the information needed to clone a job.
    """
    job = models.Job.objects.get(id=id)
    job_info = rpc_utils.get_job_info(job,
                                      preserve_metahosts,
                                      queue_entry_filter_data)

    host_dicts = []
    for host in job_info['hosts']:
        host_dict = get_hosts(id=host.id)[0]
        other_labels = host_dict['labels']
        if host_dict['platform']:
            other_labels.remove(host_dict['platform'])
        host_dict['other_labels'] = ', '.join(other_labels)
        host_dicts.append(host_dict)

    for host in job_info['one_time_hosts']:
        host_dict = dict(hostname=host.hostname,
                         id=host.id,
                         platform='(one-time host)',
                         locked_text='')
        host_dicts.append(host_dict)

    # convert keys from Label objects to strings (names of labels)
    meta_host_counts = dict((meta_host.name, count) for meta_host, count
                            in job_info['meta_host_counts'].iteritems())

    info = dict(job=job.get_object_dict(),
                meta_host_counts=meta_host_counts,
                hosts=host_dicts)
    info['job']['dependencies'] = job_info['dependencies']
    if job_info['atomic_group']:
        info['atomic_group_name'] = (job_info['atomic_group']).name
    else:
        info['atomic_group_name'] = None
    info['hostless'] = job_info['hostless']
    info['drone_set'] = job.drone_set and job.drone_set.name

    return rpc_utils.prepare_for_serialization(info)
Пример #10
0
def get_group_counts(group_by, header_groups=None, fixed_headers=None,
                     extra_select_fields=None, **filter_data):
    """
    Queries against TestView grouping by the specified fields and computings
    counts for each group.
    * group_by should be a list of field names.
    * extra_select_fields can be used to specify additional fields to select
      (usually for aggregate functions).
    * header_groups can be used to get lists of unique combinations of group
      fields.  It should be a list of tuples of fields from group_by.  It's
      primarily for use by the spreadsheet view.
    * fixed_headers can map header fields to lists of values.  the header will
      guaranteed to return exactly those value.  this does not work together
      with header_groups.

    Returns a dictionary with two keys:
    * header_values contains a list of lists, one for each header group in
      header_groups.  Each list contains all the values for the corresponding
      header group as tuples.
    * groups contains a list of dicts, one for each row.  Each dict contains
      keys for each of the group_by fields, plus a 'group_count' key for the
      total count in the group, plus keys for each of the extra_select_fields.
      The keys for the extra_select_fields are determined by the "AS" alias of
      the field.
    """
    query = models.TestView.objects.get_query_set_with_joins(filter_data)
    # don't apply presentation yet, since we have extra selects to apply
    query = models.TestView.query_objects(filter_data, initial_query=query,
                                          apply_presentation=False)
    count_alias, count_sql = models.TestView.objects.get_count_sql(query)
    query = query.extra(select={count_alias: count_sql})
    if extra_select_fields:
        query = query.extra(select=extra_select_fields)
    query = models.TestView.apply_presentation(query, filter_data)

    group_processor = tko_rpc_utils.GroupDataProcessor(query, group_by,
                                                       header_groups or [],
                                                       fixed_headers or {})
    group_processor.process_group_dicts()
    return rpc_utils.prepare_for_serialization(group_processor.get_info_dict())
Пример #11
0
def get_iteration_views(result_keys, **test_filter_data):
    """
    Similar to get_test_views, but returns a dict for each iteration rather
    than for each test.  Accepts the same filter data as get_test_views.

    @param result_keys: list of iteration result keys to include.  Only
            iterations contains all these keys will be included.
    @returns a list of dicts, one for each iteration.  Each dict contains:
            * all the same information as get_test_views()
            * all the keys specified in result_keys
            * an additional key 'iteration_index'
    """
    iteration_views = tko_rpc_utils.get_iteration_view_query(result_keys,
                                                             test_filter_data)

    final_filter_data = tko_rpc_utils.extract_presentation_params(
            test_filter_data)
    final_filter_data['no_distinct'] = True
    fields = (models.TestView.get_field_dict().keys() + result_keys +
              ['iteration_index'])
    iteration_dicts = models.TestView.list_objects(
            final_filter_data, initial_query=iteration_views, fields=fields)
    return rpc_utils.prepare_for_serialization(iteration_dicts)
Пример #12
0
def get_hosts_and_tests():
    """\
    Gets every host that has had a benchmark run on it. Additionally, also
    gets a dictionary mapping the host names to the benchmarks.
    """

    host_info = {}
    q = (dbmodels.Q(test_name__startswith='kernbench') |
         dbmodels.Q(test_name__startswith='dbench') |
         dbmodels.Q(test_name__startswith='tbench') |
         dbmodels.Q(test_name__startswith='unixbench') |
         dbmodels.Q(test_name__startswith='iozone'))
    test_query = models.TestView.objects.filter(q).values(
        'test_name', 'hostname', 'machine_idx').distinct()
    for result_dict in test_query:
        hostname = result_dict['hostname']
        test = result_dict['test_name']
        machine_idx = result_dict['machine_idx']
        host_info.setdefault(hostname, {})
        host_info[hostname].setdefault('tests', [])
        host_info[hostname]['tests'].append(test)
        host_info[hostname]['id'] = machine_idx
    return rpc_utils.prepare_for_serialization(host_info)
Пример #13
0
def get_host_queue_entries_and_special_tasks(hostname, query_start=None,
                                             query_limit=None):
    """
    @returns an interleaved list of HostQueueEntries and SpecialTasks,
            in approximate run order.  each dict contains keys for type, host,
            job, status, started_on, execution_path, and ID.
    """
    total_limit = None
    if query_limit is not None:
        total_limit = query_start + query_limit
    filter_data = {'host__hostname': hostname,
                   'query_limit': total_limit,
                   'sort_by': ['-id']}

    queue_entries = list(models.HostQueueEntry.query_objects(filter_data))
    special_tasks = list(models.SpecialTask.query_objects(filter_data))

    interleaved_entries = rpc_utils.interleave_entries(queue_entries,
                                                       special_tasks)
    if query_start is not None:
        interleaved_entries = interleaved_entries[query_start:]
    if query_limit is not None:
        interleaved_entries = interleaved_entries[:query_limit]
    return rpc_utils.prepare_for_serialization(interleaved_entries)
Пример #14
0
def get_test_labels_for_tests(**test_filter_data):
    label_ids = models.TestView.objects.query_test_label_ids(test_filter_data)
    labels = models.TestLabel.list_objects({'id__in' : label_ids})
    return rpc_utils.prepare_for_serialization(labels)
Пример #15
0
def get_users(**filter_data):
    return rpc_utils.prepare_for_serialization(
        models.User.list_objects(filter_data))
Пример #16
0
def get_atomic_groups(**filter_data):
    return rpc_utils.prepare_for_serialization(
            models.AtomicGroup.list_objects(filter_data))
Пример #17
0
def get_test_views(**filter_data):
    return rpc_utils.prepare_for_serialization(
        models.TestView.list_objects(filter_data))
Пример #18
0
def get_static_data():
    result = {}
    group_fields = []
    for field in models.TestView.group_fields:
        if field in models.TestView.extra_fields:
            name = models.TestView.extra_fields[field]
        else:
            name = models.TestView.get_field_dict()[field].verbose_name
        group_fields.append((name.capitalize(), field))
    model_fields = [(field.verbose_name.capitalize(), field.column)
                    for field in models.TestView._meta.fields]
    extra_fields = [(field_name.capitalize(), field_sql)
                    for field_sql, field_name
                    in models.TestView.extra_fields.iteritems()]

    benchmark_key = {
        'kernbench' : 'elapsed',
        'dbench' : 'throughput',
        'tbench' : 'throughput',
        'unixbench' : 'score',
        'iozone' : '32768-4096-fwrite'
    }

    tko_perf_view = [
        ['Test Index', 'test_idx'],
        ['Job Index', 'job_idx'],
        ['Test Name', 'test_name'],
        ['Subdirectory', 'subdir'],
        ['Kernel Index', 'kernel_idx'],
        ['Status Index', 'status_idx'],
        ['Reason', 'reason'],
        ['Host Index', 'machine_idx'],
        ['Test Started Time', 'test_started_time'],
        ['Test Finished Time', 'test_finished_time'],
        ['Job Tag', 'job_tag'],
        ['Job Name', 'job_name'],
        ['Owner', 'job_owner'],
        ['Job Queued Time', 'job_queued_time'],
        ['Job Started Time', 'job_started_time'],
        ['Job Finished Time', 'job_finished_time'],
        ['Hostname', 'hostname'],
        ['Platform', 'platform'],
        ['Machine Owner', 'machine_owner'],
        ['Kernel Hash', 'kernel_hash'],
        ['Kernel Base', 'kernel_base'],
        ['Kernel', 'kernel'],
        ['Status', 'status'],
        ['Iteration Number', 'iteration'],
        ['Performance Keyval (Key)', 'iteration_key'],
        ['Performance Keyval (Value)', 'iteration_value'],
    ]

    result['group_fields'] = sorted(group_fields)
    result['all_fields'] = sorted(model_fields + extra_fields)
    result['test_labels'] = get_test_labels(sort_by=['name'])
    result['current_user'] = rpc_utils.prepare_for_serialization(
            afe_models.User.current_user().get_object_dict())
    result['benchmark_key'] = benchmark_key
    result['tko_perf_view'] = tko_perf_view
    result['tko_test_view'] = model_fields
    result['preconfigs'] = preconfigs.manager.all_preconfigs()
    result['motd'] = rpc_utils.get_motd()

    return result
Пример #19
0
def get_plan(id):
    return afe_rpc_utils.prepare_for_serialization(
            models.Plan.smart_get(id).get_object_dict())
Пример #20
0
def get_static_data():
    """\
    Returns a dictionary containing a bunch of data that shouldn't change
    often and is otherwise inaccessible.  This includes:

    priorities: List of job priority choices.
    default_priority: Default priority value for new jobs.
    users: Sorted list of all users.
    labels: Sorted list of all labels.
    atomic_groups: Sorted list of all atomic groups.
    tests: Sorted list of all tests.
    profilers: Sorted list of all profilers.
    current_user: Logged-in username.
    host_statuses: Sorted list of possible Host statuses.
    job_statuses: Sorted list of possible HostQueueEntry statuses.
    job_timeout_default: The default job timeout length in hours.
    parse_failed_repair_default: Default value for the parse_failed_repair job
    option.
    reboot_before_options: A list of valid RebootBefore string enums.
    reboot_after_options: A list of valid RebootAfter string enums.
    motd: Server's message of the day.
    status_dictionary: A mapping from one word job status names to a more
            informative description.
    """

    job_fields = models.Job.get_field_dict()

    result = {}
    result['priorities'] = models.Job.Priority.choices()
    default_priority = job_fields['priority'].default
    default_string = models.Job.Priority.get_string(default_priority)
    result['default_priority'] = default_string
    result['users'] = get_users(sort_by=['login'])
    result['labels'] = get_labels(sort_by=['-platform', 'name'])
    result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
    result['tests'] = get_tests(sort_by=['name'])
    result['profilers'] = get_profilers(sort_by=['name'])
    result['current_user'] = rpc_utils.prepare_for_serialization(
        thread_local.get_user().get_object_dict())
    result['host_statuses'] = sorted(models.Host.Status.names)
    result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
    result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
    result['job_max_runtime_hrs_default'] = models.Job.DEFAULT_MAX_RUNTIME_HRS
    result['parse_failed_repair_default'] = bool(
        models.Job.DEFAULT_PARSE_FAILED_REPAIR)
    result['reboot_before_options'] = models.RebootBefore.names
    result['reboot_after_options'] = models.RebootAfter.names
    result['motd'] = rpc_utils.get_motd()

    result['status_dictionary'] = {"Aborted": "Aborted",
                                   "Verifying": "Verifying Host",
                                   "Pending": "Waiting on other hosts",
                                   "Running": "Running autoserv",
                                   "Completed": "Autoserv completed",
                                   "Failed": "Failed to complete",
                                   "Queued": "Queued",
                                   "Starting": "Next in host's queue",
                                   "Stopped": "Other host(s) failed verify",
                                   "Parsing": "Awaiting parse of final results",
                                   "Gathering": "Gathering log files",
                                   "Template": "Template job for recurring run"}
    return result
Пример #21
0
def get_saved_queries(**filter_data):
    return rpc_utils.prepare_for_serialization(
        models.SavedQuery.list_objects(filter_data))