def test_get_metrics_data(client, mtm, ptm, plm, monkeypatch):
    """
    Test metrics data retrieval through the web service.
    """

    setup_data = setup_pushlog_walk_tests(mtm, ptm, plm, monkeypatch, True)

    metric_values = get_metric_values()

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    for index, revision in enumerate(setup_data['sample_revisions']):

        uri = "/{0}/testdata/metrics/{1}/{2}".format(
            ptm.project, setup_data['branch'], revision
            )

        response = client.get(uri)

        if revision == fail_revision:
            #All tests should fail
            for page in response.json[0]['pages']:

                metric_struct = response.json[0]

                h0_rejected = metric_struct['pages'][page]['h0_rejected']
                fdr = metric_struct['pages'][page]['fdr']
                teval = metric_struct['pages'][page]['test_evaluation']

                assert h0_rejected == 1
                assert fdr == 1
                assert teval == 0

        elif revision == skip_revision:

            assert response.json == []

        else:
            #All tests should pass
            if index > 0:

                for page in response.json[0]['pages']:

                    metric_struct = response.json[0]

                    h0_rejected = metric_struct['pages'][page]['h0_rejected']
                    fdr = metric_struct['pages'][page]['fdr']
                    teval = metric_struct['pages'][page]['test_evaluation']

                    assert h0_rejected == 0
                    assert fdr == 0
                    assert teval == 1

                    revision_metric_values = \
                        set(metric_struct['pages'][page].keys())

                    assert metric_values.issubset(revision_metric_values)
def test_get_metrics_data_with_parameters(
    client, mtm, ptm, plm, monkeypatch
    ):
    """
    Test metrics data retrieval with all available query parameters.
    """

    setup_data = setup_pushlog_walk_tests(mtm, ptm, plm, monkeypatch, True)

    metric_values = get_metric_values()

    sample_data = TestData(perftest_data())

    sample_data['test_build']['branch'] = setup_data['branch']
    parameters = _get_uri_parameters(sample_data)

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    revision = setup_data['sample_revisions'][3]

    uri = "/{0}/testdata/metrics/{1}/{2}".format(
        ptm.project, setup_data['branch'], revision
        )

    reference_response = client.get(uri)

    for params in parameters:

        uri_with_params = "{0}?{1}".format(
            uri, params['query_params']
            )
        fail_uri_with_params = "{0}?{1}".format(
            uri, params['fail_params']
            )

        success_response = client.get(uri_with_params)
        fail_response = client.get(fail_uri_with_params)

        if 'page_name' in params['query_params']:

            page_sample = reference_response.json[0].copy()
            page_filter = page_sample['pages']['three.com']
            page_sample['pages'] = { 'three.com':page_filter }

            assert success_response.json[0] == page_sample

            #Failed page result will return the rest of the
            #data associated with the test but results will be
            #empty
            assert fail_response.json == []

        else:

            assert success_response.json[0] == reference_response.json[0]
            assert fail_response.json == []
def test_get_metrics_pushlog(client, mtm, ptm, plm, monkeypatch):
    """
    Test the metrics pushlog through the web service.
    """

    setup_data = setup_pushlog_walk_tests(mtm, ptm, plm, monkeypatch, True)

    target_revision_index = setup_data['target_revision_index']
    revision = setup_data['sample_revisions'][target_revision_index]

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    uri = "/{0}/testdata/metrics/{1}/{2}/pushlog?".format(
        ptm.project, setup_data['branch'], revision)

    uri_and_params = (
        "{0}product=Firefox&pushes_before=5&"
        "pushes_after=5&test_name={1}&pushlog_project={2}"
        "&page_name={3}"
        )

    uri_and_params = uri_and_params.format(
         uri, "Talos tp5r", plm.project, "one.com"
        )

    response = client.get(uri_and_params)

    match_count = 0

    metric_values = get_metric_values()

    for push in response.json:

        if push['dz_revision']:

            match_count += 1

            last_index = len(push['revisions']) - 1

            assert push['revisions'][last_index]['revision'] == \
                push['dz_revision']

            for data in push['metrics_data']:
                for page_data in data['pages']:
                    metric_data_keys = data['pages'][page_data].keys()
                    assert metric_values.issubset(metric_data_keys)

    assert match_count == 2
def test_get_metrics_summary_with_parameters(
    client, mtm, ptm, plm, monkeypatch
    ):
    """
    Test the metrics data summary with all available query parameters.
    """

    setup_data = setup_pushlog_walk_tests(mtm, ptm, plm, monkeypatch, True)

    metric_values = get_metric_values()

    sample_data = TestData(perftest_data())

    sample_data['test_build']['branch'] = setup_data['branch']
    parameters = _get_uri_parameters(sample_data)

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    revision = setup_data['sample_revisions'][3]

    uri = ("/{0}/testdata/metrics/{1}/{2}/summary?"
           "product=Firefox&pushlog_project={3}")

    uri = uri.format(
        ptm.project, setup_data['branch'], revision, plm.project
        )

    reference_response = client.get(uri)

    for params in parameters:

        if 'page_name' in params['query_params']:
            continue

        uri_with_params = "{0}&{1}".format(
            uri, params['query_params']
            )
        fail_uri_with_params = "{0}&{1}".format(
            uri, params['fail_params']
            )

        success_response = client.get(uri_with_params)
        fail_response = client.get(fail_uri_with_params)

        assert success_response.json.keys() == reference_response.json.keys()
        assert 'summary' not in fail_response.json