def get_dbs_list(database):
    if database == "SUMMARY":
        return [get_active_results_db()]
    elif database == "ALL_ITERATION":
        return pu.get_db_files(get_active_results_db(), True)
    elif database == "LAST_ITERATION":
        return [pu.get_db_files(get_active_results_db(), False)[0]]
    else:
        raise RuntimeError('Invalid database selected: ' + str(database))
def create_error_chart():
    plLogger = PLLogger.GetLogger('Methodology')
    plLogger.LogDebug('AclBasicChartScript.create_error_chart()')

    # We want to use the summary (the latter summary with ACL enabled)...
    db_list = [pu.get_active_result_db_filename()]

    queries = [('Out Of Seq', 'SELECT SUM (OutSeqFrameCount) FROM RxEotStreamResults'),
               ('Sequence Errors', 'SELECT SUM (DroppedFrameCount + ReorderedFrameCount + '
                'FcsErrorFrameCount + PrbsBitErrorCount + DuplicateFrameCount + '
                'LateFrameCount) FROM RxEotStreamResults'),
               ('CRC Errors', 'SELECT SUM(GeneratorCrcErrorFrameCount) FROM GeneratorPortResults'),
               ('Checksum Errors', 'SELECT SUM(GeneratorL3ChecksumErrorCount + '
                'GeneratorL4ChecksumErrorCount) FROM GeneratorPortResults'),
               ('Data Error', 'SELECT SUM(PrbsBitErrorCount) FROM AnalyzerPortResults')
               ]
    total_error_count = 0
    errors = []
    captions = []
    for caption, query in queries:
        error_count = get_data_from_query(db_list, query)[0]
        errors.append(error_count)
        captions.append(caption)
        total_error_count += error_count

    if total_error_count == 0:
        errors.append(1)
        captions.append('No Errors')

    template_error_pie['series'][0]['data'] = zip(captions, errors)
    template_error_pie['xAxis']['categories'] = captions

    result_data = init_chart_data_dict("SUMMARY", template_error_pie)
    pdg.submit_provider_data(result_data)
    return ""
def test_get_db_files_single_true():
    result_file = os.path.join(os.getcwd(), TEST_DB_FILE)
    fail_message = ''
    try:
        utils.get_db_files(result_file, True)
    except:
        exc_info = sys.exc_info()
        fail_list = traceback.format_exception_only(exc_info[0],
                                                    exc_info[1])
        if len(fail_list) == 1:
            fail_message = fail_list[0]
        else:
            fail_message = '\n'.join(fail_list)
    if 'ValueError' not in fail_message:
        raise AssertionError('Command failed with unexpected error: "' +
                             fail_message + '"')
def get_active_results_db():
    # In its own function to allow for easier unit testing using MagicMock
    return pu.get_active_result_db_filename()
def get_dbs(UseMultipleResultsDatabases, UseSummary):
    if UseSummary and not UseMultipleResultsDatabases:
        return [get_active_results_db()]
    return pu.get_db_files(get_active_results_db(), UseMultipleResultsDatabases)
def run(ApplyVerdictToSummary, DynamicResultViewName, OperationType,
        ResultCount, MinResultCount, MaxResultCount, ReportGroup, DisplayName,
        PassedVerdictExplanation, FailedVerdictExplanation):
    logger = PLLogger.GetLogger('methodology')
    global vdrvc_object
    try:
        drv = vdrvc_object
        # subscribe or refresh
        if drv_utils.subscribe(drv) is False:
            drv_utils.refresh(drv)

        # get verdict and text
        verdict_data = pu.get_comparision_verdict_with_text(OperationType,
                                                            drv.Get('ResultCount'),
                                                            ResultCount,
                                                            0,
                                                            0,
                                                            'result count')

        # generate drill down data
        prq = drv.GetObject('PresentationResultQuery')
        col_names = prq.GetCollection('SelectProperties')
        col_display_names = drv_utils.get_column_display_names(drv, col_names)

        active_groupby = drv_utils.get_active_groupby(drv)
        active_view_data = drv_utils.get_drilldown_data(drv,
                                                        active_groupby,
                                                        col_names,
                                                        col_display_names,
                                                        False)

        group_order_list = drv_utils.get_export_groupby_ordered(col_names, False)
        viewdata = []
        for groupbyKey in reversed(group_order_list):
            if active_groupby == groupbyKey:
                viewdata.append(active_view_data)
            else:
                viewdata.append(drv_utils.get_drilldown_data(drv,
                                                             groupbyKey,
                                                             col_names,
                                                             col_display_names,
                                                             True))

        drilldown_data = drv_utils.get_formatted_drilldown_data(viewdata)
        provider_data = p.get_table_drv_drilldown_data(DynamicResultViewName,
                                                       verdict_data[pc.VERDICT],
                                                       verdict_data[pc.VERDICT_TEXT],
                                                       ApplyVerdictToSummary,
                                                       drilldown_data,
                                                       ReportGroup,
                                                       DisplayName,
                                                       PassedVerdictExplanation,
                                                       FailedVerdictExplanation)
        p.submit_provider_data(provider_data)
        # revert drv config changes
        drv_utils.set_groupby(drv, active_groupby)
        drv_utils.refresh(drv)

    except Exception, e:
        stack_trace = traceback.format_exc()
        logger.LogError(stack_trace)
        p.submit_command_execution_error(DisplayName,
                                         str(e),
                                         stack_trace)
        return False
def test_get_db_files_multiple_true():
    result_file = os.path.join(os.getcwd(), TEST_MULTIPLE_DB_FILE)
    db_list = utils.get_db_files(result_file, True)
    assert len(db_list) == 2
    assert db_list[0] == os.path.join(os.getcwd(), os.path.normpath(TEST_MULTIPLE_DB_FILE_1))
    assert db_list[1] == os.path.join(os.getcwd(), os.path.normpath(TEST_MULTIPLE_DB_FILE_2))
def test_get_comparision_verdict_with_text():
    prop_name = 'row count'
    # 'LESS_THAN'
    data = utils.get_comparision_verdict_with_text('LESS_THAN', 10, 20, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: less than 20.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('LESS_THAN', 22, 20, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 22; ' + \
        'expected count: less than 20.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'LESS_THAN_OR_EQUAL'
    data = utils.get_comparision_verdict_with_text('LESS_THAN_OR_EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: less than or equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'GREATER_THAN'
    data = utils.get_comparision_verdict_with_text('GREATER_THAN', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 10; ' + \
        'expected count: greater than 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'GREATER_THAN_OR_EQUAL'
    data = utils.get_comparision_verdict_with_text('GREATER_THAN_OR_EQUAL',
                                                   10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: greater than or equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'EQUAL'
    data = utils.get_comparision_verdict_with_text('EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'NOT_EQUAL'
    data = utils.get_comparision_verdict_with_text('NOT_EQUAL', 10, 10, 0, 0, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 10; ' + \
        'expected count: not equal to 10.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    # 'BETWEEN'
    data = utils.get_comparision_verdict_with_text('BETWEEN', 5, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 5; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 4, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 4; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 10, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is True
    exp_str = 'Actual row count matches expected row count. Actual count: 10; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str

    data = utils.get_comparision_verdict_with_text('BETWEEN', 11, 0, 5, 10, prop_name)
    assert data[ProviderConst.VERDICT] is False
    exp_str = 'Actual row count does not match expected row count. Actual count: 11; ' + \
        'expected count: between 5 and 10, inclusive.'
    assert data[ProviderConst.VERDICT_TEXT] == exp_str
def test_get_db_files_single_false():
    result_file = os.path.join(os.getcwd(), TEST_DB_FILE)
    db_list = utils.get_db_files(result_file, False)
    assert len(db_list) == 1
    assert db_list[0] == os.path.join(os.getcwd(), os.path.normpath(TEST_DB_FILE))