Example #1
0
def get_all_dimension_data_range(project):

    mtm = factory.get_mtm(project)
    data = mtm.get_all_dimension_data_range(None, None)
    mtm.disconnect()

    return data
Example #2
0
def get_metrics_data(
    project, branch, revision, product_name=None, os_name=None,
    os_version=None, branch_version=None, processor=None, build_type=None,
    test_name=None, page_name=None
    ):
    """Return metrics data based on the parameters and optional filters."""

    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    # get the testrun ids from perftest
    test_run_ids = ptm.get_test_run_ids(
        branch, [revision], product_name, os_name, os_version,
        branch_version, processor, build_type, test_name
        )

    #test page metric
    metrics_data = mtm.get_metrics_data_from_test_run_ids(
        test_run_ids, page_name
        )

    ptm.disconnect()
    mtm.disconnect()

    return metrics_data
Example #3
0
def get_metrics_data(project,
                     branch,
                     revision,
                     product_name=None,
                     os_name=None,
                     os_version=None,
                     branch_version=None,
                     processor=None,
                     build_type=None,
                     test_name=None,
                     page_name=None):
    """Return metrics data based on the parameters and optional filters."""

    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    # get the testrun ids from perftest
    test_run_ids = ptm.get_test_run_ids(branch, [revision], product_name,
                                        os_name, os_version, branch_version,
                                        processor, build_type, test_name)

    #test page metric
    metrics_data = mtm.get_metrics_data_from_test_run_ids(
        test_run_ids, page_name)

    ptm.disconnect()
    mtm.disconnect()

    return metrics_data
Example #4
0
def get_all_dimension_data_range(project):

    mtm = factory.get_mtm(project)
    data = mtm.get_all_dimension_data_range(None, None)
    mtm.disconnect()

    return data
Example #5
0
def get_platforms_and_tests(project, product, branch, min_timestamp, max_timestamp):

    mtm = factory.get_mtm(project)
    data = mtm.get_platforms_and_tests(
        product, branch, min_timestamp, max_timestamp)
    mtm.disconnect()

    return data
Example #6
0
def get_platforms_and_tests(project, product, branch, min_timestamp,
                            max_timestamp):

    mtm = factory.get_mtm(project)
    data = mtm.get_platforms_and_tests(product, branch, min_timestamp,
                                       max_timestamp)
    mtm.disconnect()

    return data
Example #7
0
def get_test_data_all_dimensions(project, product, branch, os, os_version,
                                 test, page, start_time, stop_time):

    mtm = factory.get_mtm(project)
    data = mtm.get_data_all_dimensions(product, branch, os, os_version, test,
                                       page, start_time, stop_time)
    mtm.disconnect()

    return data
Example #8
0
def get_test_data_all_dimensions(
    project, product, branch, os, os_version, test, page,
    start_time, stop_time):

    mtm = factory.get_mtm(project)
    data = mtm.get_data_all_dimensions(
        product, branch, os, os_version, test, page, start_time,
        stop_time
        )
    mtm.disconnect()

    return data
Example #9
0
def get_metrics_summary(project,
                        branch,
                        revision,
                        product_name=None,
                        os_name=None,
                        os_version=None,
                        branch_version=None,
                        processor=None,
                        build_type=None,
                        test_name=None,
                        pushlog_project=None):
    """Return a metrics summary based on the parameters and optional filters."""

    plm = factory.get_plm(pushlog_project)
    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    # get the testrun ids from perftest
    test_run_ids = ptm.get_test_run_ids(branch, [revision], product_name,
                                        os_name, os_version, branch_version,
                                        processor, build_type, test_name)

    #test page metric
    metrics_data = mtm.get_metrics_summary(test_run_ids)

    metrics_data['product_info'] = {
        'version': branch_version,
        'name': product_name,
        'branch': branch,
        'revision': revision
    }

    #get push info
    push_data = plm.get_node_from_revision(revision, branch)
    metrics_data['push_data'] = push_data

    #get the products associated with this revision/branch combination
    products = ptm.get_revision_products(revision, branch)
    metrics_data['products'] = products

    plm.disconnect()
    ptm.disconnect()
    mtm.disconnect()

    return metrics_data
Example #10
0
def get_metrics_summary(
    project, branch, revision, product_name=None, os_name=None,
    os_version=None, branch_version=None, processor=None, build_type=None,
    test_name=None, pushlog_project=None
    ):
    """Return a metrics summary based on the parameters and optional filters."""

    plm = factory.get_plm(pushlog_project)
    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    # get the testrun ids from perftest
    test_run_ids = ptm.get_test_run_ids(
        branch, [revision], product_name, os_name, os_version,
        branch_version, processor, build_type, test_name
        )

    #test page metric
    metrics_data = mtm.get_metrics_summary(test_run_ids)

    metrics_data['product_info'] = {
        'version': branch_version,
        'name': product_name,
        'branch': branch,
        'revision': revision
        }

    #get push info
    push_data = plm.get_node_from_revision(revision, branch)
    metrics_data['push_data'] = push_data

    #get the products associated with this revision/branch combination
    products = ptm.get_revision_products(revision, branch)
    metrics_data['products'] = products

    plm.disconnect()
    ptm.disconnect()
    mtm.disconnect()

    return metrics_data
Example #11
0
def get_application_log(project, revision):

    mtm = factory.get_mtm(project)
    log = mtm.get_application_log(revision)
    return log
Example #12
0
def get_metrics_pushlog(
    project, branch, revision, product_name=None, os_name=None,
    os_version=None, branch_version=None, processor=None, build_type=None,
    test_name=None, page_name=None, pushes_before=None, pushes_after=None,
    pushlog_project=None
    ):
    """Return a metrics summary based on the parameters and optional filters."""

    plm = factory.get_plm(pushlog_project)
    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    aggregate_pushlog, changeset_lookup = plm.get_branch_pushlog_by_revision(
        revision, branch, pushes_before, pushes_after
        )

    pushlog_id_index_map = {}
    all_revisions = []

    for index, node in enumerate(aggregate_pushlog):

        pushlog_id_index_map[node['pushlog_id']] = index

        aggregate_pushlog[index]['metrics_data'] = []
        aggregate_pushlog[index]['dz_revision'] = ""
        aggregate_pushlog[index]['branch_name'] = branch

        changesets = changeset_lookup[ node['pushlog_id'] ]

        #The revisions associated with a push are returned in reverse order
        #from the pushlog web service.  This orders them the same way tbpl
        #does.
        changesets['revisions'].reverse()

        #truncate the revision strings and collect them
        for cset_index, revision_data in enumerate(changesets['revisions']):

            full_revision = revision_data['revision']

            revision = mtm.truncate_revision(full_revision)
            changesets['revisions'][cset_index]['revision'] = revision

            all_revisions.append(revision)

        aggregate_pushlog[index]['revisions'] = changesets['revisions']


    pushlog_id_list = pushlog_id_index_map.keys()

    # get the testrun ids from perftest
    filtered_test_run_ids = ptm.get_test_run_ids(
        branch, all_revisions, product_name, os_name, os_version,
        branch_version, processor, build_type, test_name
        )

    # get the test run ids associated with the pushlog ids
    pushlog_test_run_ids = mtm.get_test_run_ids_from_pushlog_ids(
        pushlog_ids=pushlog_id_list
        )

    # get intersection
    test_run_ids = list( set(filtered_test_run_ids).intersection(
        set(pushlog_test_run_ids)) )

    # get the metrics data for the intersection
    metrics_data = mtm.get_metrics_data_from_test_run_ids(
        test_run_ids, page_name
        )

    #decorate aggregate_pushlog with the metrics data
    for d in metrics_data:

        pushlog_id = d['push_info'].get('pushlog_id', None)

        #A defined pushlog_id is required to decorate the correct push
        if not pushlog_id:
            continue

        pushlog_index = pushlog_id_index_map[pushlog_id]
        aggregate_pushlog[pushlog_index]['metrics_data'].append(d)
        aggregate_pushlog[pushlog_index]['dz_revision'] = d['test_build']['revision']

    plm.disconnect()
    ptm.disconnect()
    mtm.disconnect()

    return aggregate_pushlog
Example #13
0
def get_application_log(project, revision):

    mtm = factory.get_mtm(project)
    log = mtm.get_application_log(revision)
    return log
Example #14
0
def get_metrics_pushlog(project,
                        branch,
                        revision,
                        product_name=None,
                        os_name=None,
                        os_version=None,
                        branch_version=None,
                        processor=None,
                        build_type=None,
                        test_name=None,
                        page_name=None,
                        pushes_before=None,
                        pushes_after=None,
                        pushlog_project=None):
    """Return a metrics summary based on the parameters and optional filters."""

    plm = factory.get_plm(pushlog_project)
    ptm = factory.get_ptm(project)
    mtm = factory.get_mtm(project)

    aggregate_pushlog, changeset_lookup = plm.get_branch_pushlog_by_revision(
        revision, branch, pushes_before, pushes_after)

    pushlog_id_index_map = {}
    all_revisions = []

    for index, node in enumerate(aggregate_pushlog):

        pushlog_id_index_map[node['pushlog_id']] = index

        aggregate_pushlog[index]['metrics_data'] = []
        aggregate_pushlog[index]['dz_revision'] = ""
        aggregate_pushlog[index]['branch_name'] = branch

        changesets = changeset_lookup[node['pushlog_id']]

        #The revisions associated with a push are returned in reverse order
        #from the pushlog web service.  This orders them the same way tbpl
        #does.
        changesets['revisions'].reverse()

        #truncate the revision strings and collect them
        for cset_index, revision_data in enumerate(changesets['revisions']):

            full_revision = revision_data['revision']

            revision = mtm.truncate_revision(full_revision)
            changesets['revisions'][cset_index]['revision'] = revision

            all_revisions.append(revision)

        aggregate_pushlog[index]['revisions'] = changesets['revisions']

    pushlog_id_list = pushlog_id_index_map.keys()

    # get the testrun ids from perftest
    filtered_test_run_ids = ptm.get_test_run_ids(branch, all_revisions,
                                                 product_name, os_name,
                                                 os_version, branch_version,
                                                 processor, build_type,
                                                 test_name)

    # get the test run ids associated with the pushlog ids
    pushlog_test_run_ids = mtm.get_test_run_ids_from_pushlog_ids(
        pushlog_ids=pushlog_id_list)

    # get intersection
    test_run_ids = list(
        set(filtered_test_run_ids).intersection(set(pushlog_test_run_ids)))

    # get the metrics data for the intersection
    metrics_data = mtm.get_metrics_data_from_test_run_ids(
        test_run_ids, page_name)

    #decorate aggregate_pushlog with the metrics data
    for d in metrics_data:

        pushlog_id = d['push_info'].get('pushlog_id', None)

        #A defined pushlog_id is required to decorate the correct push
        if not pushlog_id:
            continue

        pushlog_index = pushlog_id_index_map[pushlog_id]
        aggregate_pushlog[pushlog_index]['metrics_data'].append(d)
        aggregate_pushlog[pushlog_index]['dz_revision'] = d['test_build'][
            'revision']

    plm.disconnect()
    ptm.disconnect()
    mtm.disconnect()

    return aggregate_pushlog