Exemplo n.º 1
0
def test_metrics_factory():

    sample_data = TestData(perftest_data())

    metric_collection_data = get_metric_collection_data()

    mmf = MetricsMethodFactory(
        metric_collection_data['initialization_data']
        )

    m_one = mmf.get_metric_method(metric_collection_data['testsuite_name'])

    #Should have one cached metric method instance
    assert len(mmf.metric_method_instances) == 1

    #Retrieve metric method again, should still have one cached metric
    #method instance
    m_two = mmf.get_metric_method(metric_collection_data['testsuite_name'])

    assert len(mmf.metric_method_instances) == 1

    #get_metric_method should return a TtestMethod class instance for
    #the sample data
    tm = TtestMethod(metric_collection_data['initialization_data'])
    assert m_one.__class__.__name__ == tm.__class__.__name__
    assert m_two.__class__.__name__ == tm.__class__.__name__
Exemplo n.º 2
0
def test_metrics_factory():

    sample_data = TestData(perftest_data())

    metric_collection_data = get_metric_collection_data()

    mmf = MetricsMethodFactory(metric_collection_data['initialization_data'])

    m_one = mmf.get_metric_method(metric_collection_data['testsuite_name'])

    #Should have one cached metric method instance
    assert len(mmf.metric_method_instances) == 1

    #Retrieve metric method again, should still have one cached metric
    #method instance
    m_two = mmf.get_metric_method(metric_collection_data['testsuite_name'])

    assert len(mmf.metric_method_instances) == 1

    #get_metric_method should return a TtestMethod class instance for
    #the sample data
    tm = TtestMethod(metric_collection_data['initialization_data'])
    assert m_one.__class__.__name__ == tm.__class__.__name__
    assert m_two.__class__.__name__ == tm.__class__.__name__
Exemplo n.º 3
0
def _test_metric_evaluations(setup_data, mtm, target_pass_count):
    """
    The metrics data associated with the fail revision should evaluate
    to test failure. All other revisions should evaluate to test success.
    """

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    metric_collection_data = get_metric_collection_data()
    mmf = MetricsMethodFactory(
        metric_collection_data['initialization_data']
        )

    mm = mmf.get_metric_method(setup_data['testsuite_name'])

    metric_fail_count = 0
    metric_pass_count = 0

    metric_summary_fail_count = 0
    metric_summary_pass_count = 0

    for revision in setup_data['sample_revisions']:

        metrics_data = mtm.get_metrics_data(revision)

        if revision == skip_revision:
            #We should have no data for the skip revision
            assert metrics_data == {}
            continue

        for key in metrics_data:

            for data in metrics_data[key]['values']:

                test_result = {}
                test_result[ data['metric_value_name'] ] = data['value']

                if data['metric_value_name'] == 'h0_rejected':
                    metric_evaluation = mm.evaluate_metric_result(
                        test_result
                        )
                    if metric_evaluation == False:
                        metric_fail_count += 1
                        #test evaluation should indicate failure
                        assert revision == fail_revision
                    else:
                        metric_pass_count += 1
                        #all other tests should pass
                        assert revision != fail_revision

                if data['metric_value_name'] == mm.SUMMARY_NAME:

                    summary_evaluation = mm.evaluate_metric_summary_result(
                        test_result
                        )

                    if summary_evaluation == False:
                        metric_summary_fail_count += 1
                        #test evaluation should indicate failure
                        assert revision == fail_revision
                    else:
                        metric_summary_pass_count += 1
                        #all other tests should pass
                        assert revision != fail_revision


    target_fail_count = 3

    assert metric_fail_count == target_fail_count
    assert metric_pass_count == target_pass_count
    assert metric_summary_fail_count == target_fail_count
    assert metric_summary_pass_count == target_pass_count
Exemplo n.º 4
0
def _test_metric_evaluations(setup_data, mtm, target_pass_count):
    """
    The metrics data associated with the fail revision should evaluate
    to test failure. All other revisions should evaluate to test success.
    """

    fail_revision = setup_data['fail_revision']
    skip_revision = setup_data['skip_revision']

    metric_collection_data = get_metric_collection_data()
    mmf = MetricsMethodFactory(metric_collection_data['initialization_data'])

    mm = mmf.get_metric_method(setup_data['testsuite_name'])

    metric_fail_count = 0
    metric_pass_count = 0

    metric_summary_fail_count = 0
    metric_summary_pass_count = 0

    for revision in setup_data['sample_revisions']:

        metrics_data = mtm.get_metrics_data(revision)

        if revision == skip_revision:
            #We should have no data for the skip revision
            assert metrics_data == {}
            continue

        for key in metrics_data:

            for data in metrics_data[key]['values']:

                test_result = {}
                test_result[data['metric_value_name']] = data['value']

                if data['metric_value_name'] == 'h0_rejected':
                    metric_evaluation = mm.evaluate_metric_result(test_result)
                    if metric_evaluation == False:
                        metric_fail_count += 1
                        #test evaluation should indicate failure
                        assert revision == fail_revision
                    else:
                        metric_pass_count += 1
                        #all other tests should pass
                        assert revision != fail_revision

                if data['metric_value_name'] == mm.SUMMARY_NAME:

                    summary_evaluation = mm.evaluate_metric_summary_result(
                        test_result)

                    if summary_evaluation == False:
                        metric_summary_fail_count += 1
                        #test evaluation should indicate failure
                        assert revision == fail_revision
                    else:
                        metric_summary_pass_count += 1
                        #all other tests should pass
                        assert revision != fail_revision

    target_fail_count = 3

    assert metric_fail_count == target_fail_count
    assert metric_pass_count == target_pass_count
    assert metric_summary_fail_count == target_fail_count
    assert metric_summary_pass_count == target_pass_count