def run(ApplyVerdictToSummary, DynamicResultViewName, OperationType,
        ResultCount, MinResultCount, MaxResultCount, ReportGroup, DisplayName,
        PassedVerdictExplanation, FailedVerdictExplanation):
    logger = PLLogger.GetLogger('methodology')
    global vdrvc_object
    try:
        drv = vdrvc_object
        # subscribe or refresh
        if drv_utils.subscribe(drv) is False:
            drv_utils.refresh(drv)

        # get verdict and text
        verdict_data = pu.get_comparision_verdict_with_text(OperationType,
                                                            drv.Get('ResultCount'),
                                                            ResultCount,
                                                            0,
                                                            0,
                                                            'result count')

        # generate drill down data
        prq = drv.GetObject('PresentationResultQuery')
        col_names = prq.GetCollection('SelectProperties')
        col_display_names = drv_utils.get_column_display_names(drv, col_names)

        active_groupby = drv_utils.get_active_groupby(drv)
        active_view_data = drv_utils.get_drilldown_data(drv,
                                                        active_groupby,
                                                        col_names,
                                                        col_display_names,
                                                        False)

        group_order_list = drv_utils.get_export_groupby_ordered(col_names, False)
        viewdata = []
        for groupbyKey in reversed(group_order_list):
            if active_groupby == groupbyKey:
                viewdata.append(active_view_data)
            else:
                viewdata.append(drv_utils.get_drilldown_data(drv,
                                                             groupbyKey,
                                                             col_names,
                                                             col_display_names,
                                                             True))

        drilldown_data = drv_utils.get_formatted_drilldown_data(viewdata)
        provider_data = p.get_table_drv_drilldown_data(DynamicResultViewName,
                                                       verdict_data[pc.VERDICT],
                                                       verdict_data[pc.VERDICT_TEXT],
                                                       ApplyVerdictToSummary,
                                                       drilldown_data,
                                                       ReportGroup,
                                                       DisplayName,
                                                       PassedVerdictExplanation,
                                                       FailedVerdictExplanation)
        p.submit_provider_data(provider_data)
        # revert drv config changes
        drv_utils.set_groupby(drv, active_groupby)
        drv_utils.refresh(drv)

    except Exception, e:
        stack_trace = traceback.format_exc()
        logger.LogError(stack_trace)
        p.submit_command_execution_error(DisplayName,
                                         str(e),
                                         stack_trace)
        return False
def test_get_comparision_verdict_with_text():
    prop_name = 'row count'
    # 'LESS_THAN'
    data = utils.get_comparision_verdict_with_text('LESS_THAN', 10, 20, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: less than 20.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('LESS_THAN', 22, 20, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 22; ' + \
        'expected count: less than 20.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'LESS_THAN_OR_EQUAL'
    data = utils.get_comparision_verdict_with_text('LESS_THAN_OR_EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: less than or equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'GREATER_THAN'
    data = utils.get_comparision_verdict_with_text('GREATER_THAN', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 10; ' + \
        'expected count: greater than 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'GREATER_THAN_OR_EQUAL'
    data = utils.get_comparision_verdict_with_text('GREATER_THAN_OR_EQUAL',
                                                   10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: greater than or equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'EQUAL'
    data = utils.get_comparision_verdict_with_text('EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'NOT_EQUAL'
    data = utils.get_comparision_verdict_with_text('NOT_EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 10; ' + \
        'expected count: not equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'BETWEEN'
    data = utils.get_comparision_verdict_with_text('BETWEEN', 5, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 5; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 4, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 4; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 10, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 11, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 11; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str