예제 #1
0
def sensor_analysis_main(sensor_df, switch_params_aggregated_df,
                         report_columns_usage_dct, report_data_lst):
    """Main function to analyze zoning configuration"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['sensor_aggregated', 'Датчики']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    sensor_aggregated_df, sensor_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'switch_params_aggregated', 'fabric_labels', 'sensor'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Generating sensor readings table'
        print(info, end=" ")

        # aggregated DataFrames
        sensor_aggregated_df = sensor_aggregation(sensor_df,
                                                  switch_params_aggregated_df)

        # after finish display status
        status_info('ok', max_title, len(info))

        # report tables
        sensor_report_df = sensor_report(sensor_aggregated_df, data_names,
                                         report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [sensor_aggregated_df, sensor_report_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)

    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        sensor_aggregated_df, sensor_report_df = verify_data(
            report_data_lst, data_names, *data_lst)

        data_lst = [sensor_aggregated_df, sensor_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return sensor_aggregated_df
예제 #2
0
def fabriclabels_main(switchshow_ports_df, switch_params_df, fabricshow_df,
                      ag_principal_df, report_data_lst):
    """Function to set Fabric labels"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    customer_name, report_path, _, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['fabric_labels']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    fabricshow_ag_labels_df, = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = []
    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)

    if force_run:
        print('\nSETTING UP FABRICS NAMES AND LABELS  ...\n')

        fabricshow_summary_df = auto_fabrics_labeling(switchshow_ports_df,
                                                      switch_params_df,
                                                      fabricshow_df,
                                                      report_data_lst)

        # display automatic fabric labeling
        info_labels = [
            'Fabric_name', 'Fabric_label', 'chassis_name',
            'Principal_switch_name', 'Fabric_ID', 'FC_Route', 'Total_switch',
            'Domain_IDs', 'Switch_names', 'Device_ports', 'Online_ports',
            'LS_type', 'Fabric_Name'
        ]
        # service file name for detailed information
        current_date = str(date.today())
        file_name = customer_name + '_' + report_steps_dct[
            'fabricshow_summary'][2] + '_' + current_date + '.xlsx'
        # file_name = customer_name + '_analysis_report_' + current_date + '.xlsx'
        print('\nAutomatic fabrics labeling\n')
        # set option to show all columns
        with pd.option_context('display.max_columns', None,
                               'display.expand_frame_repr', False):
            # pd.set_option('max_columns', None)
            # pd.set_option('expand_frame_repr', False)
            print(fabricshow_summary_df.loc[:, info_labels])
        print(
            f"\nFor detailed switch port types and numbers statistic in each fabric check '{file_name}' file 'fabricshow_statistics' sheet in"
        )
        print(f'{report_path} directory')
        print('ATTN! CLOSE file after check\n')

        # ask user if Automatic Fabric labeling need to be corrected
        query = 'Do you want to change Fabrics Names or Labels? (y)es/(n)o: '
        reply = reply_request(query)
        if reply == 'y':
            # saving DataFrame to Excel to check during manual labeling if required
            save_xlsx_file(fabricshow_summary_df,
                           'fabricshow_summary',
                           report_data_lst,
                           force_flag=True)
            fabricshow_summary_df = manual_fabrics_labeling(
                fabricshow_summary_df, info_labels)

        # takes all switches working in Native and AG switches
        # merge the in one DataFrame and identify which Fabrics they belong too with fabricshow_summary DataFrame
        fabricshow_ag_labels_df = native_ag_labeling(fabricshow_df,
                                                     ag_principal_df,
                                                     fabricshow_summary_df)

        # # disable option to show all columns
        # pd.reset_option('max_columns')
        # pd.reset_option('expand_frame_repr')

        # create list with partitioned DataFrames
        data_lst = [fabricshow_ag_labels_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        fabricshow_ag_labels_df = verify_data(report_data_lst, data_names,
                                              *data_lst)
        data_lst = [fabricshow_ag_labels_df]
    # save data to excel file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return fabricshow_ag_labels_df
예제 #3
0
def portinfo_extract(switch_params_lst, report_data_lst):
    """Function to extract switch port information"""
    
    # report_data_lst contains information: 
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['sfpshow', 'portcfgshow']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration    
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    sfpshow_lst, portcfgshow_lst = data_lst

    # when any data from data_lst was not saved (file not found) or 
    # force extract flag is on then re-extract data from configuration files  
    force_run = verify_force_run(data_names, data_lst, report_steps_dct, max_title)

    # # data force extract check 
    # # list of keys for each data from data_lst representing if it is required 
    # # to re-collect or re-analyze data even they were obtained on previous iterations
    # force_extract_keys_lst = [report_steps_dct[data_name][1] for data_name in data_names]
    # # print data which were loaded but for which force extract flag is on
    # force_extract_check(data_names, data_lst, force_extract_keys_lst, max_title)
    
    # # when any of data_lst was not saved or 
    # # force extract flag is on then re-extract data  from configueation files  
    # if not all(data_lst) or any(force_extract_keys_lst):
    if force_run:    
        print('\nEXTRACTING SWITCH PORTS SFP, PORTCFG INFORMATION FROM SUPPORTSHOW CONFIGURATION FILES ...\n')   
        
        # extract chassis parameters names from init file
        switch_columns = columns_import('switch', max_title, 'columns')
        # number of switches to check
        switch_num = len(switch_params_lst)   
     
        # data imported from init file to extract values from config file
        params, params_add, comp_keys, match_keys, comp_dct = data_extract_objects('portinfo', max_title)
        portcfg_params = columns_import('portinfo', max_title, 'portcfg_params')
        # dictionary to save portcfg ALL information for all ports in fabric
        portcfgshow_dct = dict((key, []) for key in portcfg_params)
        # list to store only REQUIRED switch parameters
        # collecting sfpshow data for all switches ports during looping
        sfpshow_lst = []
        # list to save portcfg information for all ports in fabric
        portcfgshow_lst = []

        
        # switch_params_lst [[switch_params_sw1], [switch_params_sw1]]
        # checking each switch for switch level parameters
        for i, switch_params_data in enumerate(switch_params_lst):       

            # data unpacking from iter param
            # dictionary with parameters for the current switch
            switch_params_data_dct = dict(zip(switch_columns, switch_params_data))
            switch_info_keys = ['configname', 'chassis_name', 'chassis_wwn', 'switch_index', 
                                'SwitchName', 'switchWwn']
            switch_info_lst = [switch_params_data_dct.get(key) for key in switch_info_keys]
            ls_mode_on = True if switch_params_data_dct['LS_mode'] == 'ON' else False
            
            sshow_file, _, _, switch_index, switch_name, *_ = switch_info_lst
            
            # current operation information string
            info = f'[{i+1} of {switch_num}]: {switch_name} ports sfp and cfg'
            print(info, end =" ")           
            # search control dictionary. continue to check sshow_file until all parameters groups are found
            collected = {'sfpshow': False, 'portcfgshow': False}
            with open(sshow_file, encoding='utf-8', errors='ignore') as file:
                # check file until all groups of parameters extracted
                while not all(collected.values()):
                    line = file.readline()                        
                    if not line:
                        break
                    # sfpshow section start
                    if re.search(r'^(SWITCHCMD )?(/fabos/cliexec/)?sfpshow +-all *: *$', line) and not collected['sfpshow']:
                        collected['sfpshow'] = True
                        if ls_mode_on:
                            while not re.search(fr'^CURRENT CONTEXT -- {switch_index} *, \d+$',line):
                                line = file.readline()
                                if not line:
                                    break
                        while not re.search(r'^(real [\w.]+)|(\*\* SS CMD END \*\*)$',line):
                            line = file.readline()
                            match_dct ={match_key: comp_dct[comp_key].match(line) for comp_key, match_key in zip(comp_keys, match_keys)}
                            if match_dct[match_keys[0]]:
                                # dictionary to store all DISCOVERED switch ports information
                                # collecting data only for the logical switch in current loop
                                sfpshow_dct = {}
                                _, slot_num, port_num = line_to_list(comp_dct[comp_keys[0]], line)
                                # if switch has no slots then all ports have slot 0
                                slot_num = '0' if not slot_num else slot_num
                                while not re.match('\r?\n', line):
                                    line = file.readline()
                                    match_dct ={match_key: comp_dct[comp_key].match(line) for comp_key, match_key in zip(comp_keys, match_keys)}
                                    # power_match
                                    if match_dct[match_keys[1]]:
                                        sfp_power_lst = line_to_list(comp_dct[comp_keys[1]], line)
                                        # cut off RX or TX Power
                                        sfp_power_value_unit = sfp_power_lst[1:]
                                        for v, k in zip(sfp_power_value_unit[::2], sfp_power_value_unit[1::2]):
                                            if k == 'uWatts':
                                                k = 'uW'
                                            key = sfp_power_lst[0] + '_' + k
                                            sfpshow_dct[key] = v
                                    # transceiver_match
                                    elif match_dct[match_keys[2]]:
                                        sfpshow_dct[match_dct[match_keys[2]].group(1).rstrip()] = match_dct[match_keys[2]].group(2).rstrip()
                                    # no_sfp_match
                                    elif match_dct[match_keys[3]]:
                                            sfpshow_dct['Vendor Name'] = 'No SFP module'
                                    # not_available_match
                                    elif match_dct[match_keys[4]]:
                                            sfpshow_dct[match_dct[match_keys[4]].group(1).rstrip()] = match_dct[match_keys[4]].group(2).rstrip()
                                    # sfp_info_match
                                    elif match_dct[match_keys[5]]:
                                        sfpshow_dct[match_dct[match_keys[5]].group(1).rstrip()] = match_dct[match_keys[5]].group(2).rstrip()                                        
                                    if not line:
                                        break
                                    
                                # additional values which need to be added to the dictionary with all DISCOVERED parameters during current loop iteration
                                # values axtracted in manual mode. if change values order change keys order in init.xlsx "chassis_params_add" column                                   
                                sfpshow_port_values = [*switch_info_lst, slot_num, port_num]                                       
                                # adding additional parameters and values to the sfpshow_dct
                                update_dct(params_add, sfpshow_port_values, sfpshow_dct)               
                                # appending list with only REQUIRED port info for the current loop iteration to the list with all fabrics port info
                                sfpshow_lst.append([sfpshow_dct.get(param, None) for param in params])
                    # sfpshow section end
                    # portcfgshow section start
                    if re.search(r'^(SWITCHCMD )?(/fabos/cliexec/)?portcfgshow *: *$', line) and not collected['portcfgshow']:
                        collected['portcfgshow'] = True
                        if ls_mode_on:
                            while not re.search(fr'^CURRENT CONTEXT -- {switch_index} *, \d+$',line):
                                line = file.readline()
                                if not line:
                                    break
                        while not re.search(r'^(real [\w.]+)|(\*\* SS CMD END \*\*)$|No ports found in switch',line):
                            line = file.readline()
                            match_dct ={match_key: comp_dct[comp_key].match(line) for comp_key, match_key in zip(comp_keys, match_keys)}
                            # 'slot_port_line_match'
                            if match_dct[match_keys[6]]:
                                # dictionary to store all DISCOVERED switch ports information
                                portcfgshow_tmp_dct = {}
                                # extract slot and port numbers
                                slot_num, port_nums_str = line_to_list(comp_dct[comp_keys[6]], line)
                                port_nums_lst = port_nums_str.split()
                                port_nums = len(port_nums_lst)
                                # list with switch and slot information
                                switch_info_slot_lst = switch_info_lst.copy()
                                switch_info_slot_lst.append(slot_num)
                                # adding switch and slot information for each port to dictionary
                                for portcfg_param, switch_info_value in zip(portcfg_params[:7], switch_info_slot_lst):
                                    portcfgshow_tmp_dct[portcfg_param] = [switch_info_value for i in range(port_nums)]
                                # adding port numbers to dictionary    
                                portcfgshow_tmp_dct[portcfg_params[7]] = port_nums_lst                                
                                while not re.match('\r?\n', line):
                                    line = file.readline()
                                    match_dct ={match_key: comp_dct[comp_key].match(line) for comp_key, match_key in zip(comp_keys, match_keys)}
                                    # portcfg_match
                                    if match_dct[match_keys[7]]:
                                        # extract param name and values for each port and adding to dictionary
                                        param_name, param_values_str = line_to_list(comp_dct[comp_keys[7]], line)
                                        portcfgshow_tmp_dct[param_name] = param_values_str.split()
                                    if not line:
                                        break
                                # saving portcfg information of REQUIRED parameters from dictionary with DISCOVERED parameters
                                for portcfg_param in portcfg_params:
                                    portcfgshow_dct[portcfg_param].extend(portcfgshow_tmp_dct.get(portcfg_param, [None for i in range(port_nums)]))              
                    # portcfgshow section end
                     
            status_info('ok', max_title, len(info))

        # after check all config files create list of lists from dictionary. each nested list contains portcfg information for one port
        for portcfg_param in portcfg_params:
            portcfgshow_lst.append(portcfgshow_dct.get(portcfg_param))            
        portcfgshow_lst = list(zip(*portcfgshow_lst))
        
        # save extracted data to json file
        save_data(report_data_lst, data_names, sfpshow_lst, portcfgshow_lst)
    # verify if loaded data is empty after first iteration and replace information string with empty list
    else:
        sfpshow_lst, portcfgshow_lst = verify_data(report_data_lst, data_names, *data_lst)
        
    
    return sfpshow_lst, portcfgshow_lst
예제 #4
0
def switch_params_analysis_main(fabricshow_ag_labels_df, chassis_params_df,
                                switch_params_df, maps_params_df,
                                blade_module_loc_df, ag_principal_df,
                                report_data_lst):
    """Main function to create aggregated switch parameters table and report tables"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'report_columns_usage', 'switch_params_aggregated', 'Коммутаторы',
        'Фабрика', 'Параметры_коммутаторов', 'Лицензии',
        'Глобальные_параметры_фабрики'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    report_columns_usage_dct, switch_params_aggregated_df, switches_report_df, fabric_report_df, \
        switches_parameters_report_df, licenses_report_df, global_fabric_parameters_report_df  = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'chassis_parameters', 'switch_parameters', 'switchshow_ports',
        'maps_parameters', 'blade_interconnect', 'fabric_labels'
    ]

    # clean fabricshow DataFrame from unneccessary data
    fabric_clean_df = fabric_clean(fabricshow_ag_labels_df)
    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:

        # import data with switch models, firmware and etc
        switch_models_df = dataframe_import('switch_models', max_title)

        # current operation information string
        info = f'Generating aggregated switch parameters table'
        print(info, end=" ")

        # create aggregated table by joining DataFrames
        switch_params_aggregated_df, report_columns_usage_dct = \
            fabric_aggregation(fabric_clean_df, chassis_params_df, \
                switch_params_df, maps_params_df, switch_models_df, ag_principal_df)
        # add 'Device_Location for Blade chassis switches
        switch_params_aggregated_df = fill_device_location(
            switch_params_aggregated_df, blade_module_loc_df)

        # after finish display status
        status_info('ok', max_title, len(info))

        # check if switch config files missing
        mask_fabric = switch_params_aggregated_df[[
            'Fabric_name', 'Fabric_label'
        ]].notna().all(axis=1)
        mask_no_config = switch_params_aggregated_df['chassis_name'].isna()
        missing_configs_num = switch_params_aggregated_df.loc[mask_no_config][
            'Fabric_name'].count()
        if missing_configs_num:
            info = f'{missing_configs_num} switch configuration{"s" if missing_configs_num > 1 else ""} MISSING'
            print(info, end=" ")
            status_info('warning', max_title, len(info))

        switches_report_df, fabric_report_df, switches_parameters_report_df, \
            licenses_report_df, global_fabric_parameters_report_df = \
                switchs_params_report(switch_params_aggregated_df, data_names, report_columns_usage_dct, max_title)

        # # partition aggregated DataFrame to required tables
        # switches_report_df, fabric_report_df,  \
        #     switches_parameters_report_df, licenses_report_df = \
        #         dataframe_segmentation(switch_params_aggregated_df, data_names[2:-1], \
        #             report_columns_usage_dct, max_title)

        # # global parameters are equal for all switches in one fabric thus checking Principal switches only
        # mask_principal = switch_params_aggregated_df['switchRole'] == 'Principal'
        # switch_params_principal_df = switch_params_aggregated_df.loc[mask_principal].copy()
        # global_fabric_parameters_report_df, = dataframe_segmentation(switch_params_principal_df, data_names[-1], \
        #             report_columns_usage_dct, max_title)

        # # drop rows with empty switch names columns
        # fabric_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)
        # switches_parameters_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)
        # licenses_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)

        # # drop fabric_id if all have same value
        # if fabric_report_df['Fabric ID'].dropna().nunique() == 1:
        #     fabric_report_df.drop(columns=['Fabric ID'], inplace=True)

        # # TO_REMOVE No need to drop duplicates coz Principal switches only used before
        # # # parameters are equal for all switches in one fabric
        # # if report_columns_usage_dct['fabric_name_usage']:
        # #     global_fabric_parameters_report_df.drop_duplicates(subset=['Фабрика', 'Подсеть'], inplace=True)
        # # else:
        # #     global_fabric_parameters_report_df.drop_duplicates(subset=['Подсеть'], inplace=True)

        # global_fabric_parameters_report_df.reset_index(inplace=True, drop=True)

        # create list with partitioned DataFrames
        data_lst = [
            report_columns_usage_dct, switch_params_aggregated_df,
            switches_report_df, fabric_report_df,
            switches_parameters_report_df, licenses_report_df,
            global_fabric_parameters_report_df
        ]

        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        report_columns_usage_dct, switch_params_aggregated_df, switches_report_df, fabric_report_df,  \
            switches_parameters_report_df, licenses_report_df, global_fabric_parameters_report_df = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            report_columns_usage_dct, switch_params_aggregated_df,
            switches_report_df, fabric_report_df,
            switches_parameters_report_df, licenses_report_df,
            global_fabric_parameters_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names[1:], data_lst[1:]):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return report_columns_usage_dct, switch_params_aggregated_df, fabric_clean_df
예제 #5
0
def errdump_main(errdump_df, switchshow_df, switch_params_aggregated_df,
                 portshow_aggregated_df, report_columns_usage_dct,
                 report_data_lst):
    """Main function to get most frequently appeared log messages"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['errdump_aggregated', 'raslog_counter', 'Журнал']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # loading data if were saved on previous iterations
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    errdump_aggregated_df, raslog_counter_df, raslog_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'chassis_parameters', 'switch_parameters', 'switchshow_ports',
        'maps_parameters', 'portshow_aggregated', 'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct
        _, _, *re_pattern_lst = data_extract_objects('raslog', max_title)

        # current operation information string
        info = f'Counting RASLog messages'
        print(info, end=" ")

        # get aggregated DataFrames
        errdump_aggregated_df, raslog_counter_df, raslog_frequent_df = \
            errdump_aggregated(errdump_df, switchshow_df, switch_params_aggregated_df, portshow_aggregated_df, re_pattern_lst)
        # after finish display status
        status_info('ok', max_title, len(info))

        # partition aggregated DataFrame to required tables
        raslog_report_df = raslog_report(raslog_frequent_df, data_names,
                                         report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]
        # saving fabric_statistics and fabric_statistics_summary DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        errdump_aggregated_df, raslog_counter_df, raslog_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return errdump_aggregated_df, raslog_counter_df
예제 #6
0
def err_sfp_cfg_analysis_main(portshow_aggregated_df, sfpshow_df,
                              portcfgshow_df, report_columns_usage_dct,
                              report_data_lst):
    """Main function to add porterr, transceiver and portcfg information to portshow DataFrame"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst
    portshow_sfp_force_flag = False
    portshow_sfp_export_flag, *_ = report_steps_dct['portshow_sfp_aggregated']

    # names to save data obtained after current module execution
    data_names = [
        'portshow_sfp_aggregated', 'Ошибки', 'Параметры_SFP',
        'Параметры_портов'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    portshow_sfp_aggregated_df, error_report_df, sfp_report_df, portcfg_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portshow_aggregated', 'sfpshow', 'portcfgshow', 'portcmd',
        'switchshow_ports', 'switch_params_aggregated', 'fdmi',
        'device_rename', 'report_columns_usage_upd', 'nscamshow', 'nsshow',
        'alias', 'blade_servers', 'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)

    if force_run:
        # import transeivers information from file
        sfp_model_df = dataframe_import('sfp_models', max_title)
        # current operation information string
        info = f'Updating connected devices table'
        print(info, end=" ")
        # add sfpshow, transceiver information and portcfg to aggregated portcmd DataFrame
        portshow_sfp_aggregated_df = port_complete(portshow_aggregated_df,
                                                   sfpshow_df, sfp_model_df,
                                                   portcfgshow_df)
        # after finish display status
        status_info('ok', max_title, len(info))

        # warning if UKNOWN SFP present
        if (portshow_sfp_aggregated_df['Transceiver_Supported'] ==
                'Unknown SFP').any():
            info_columns = [
                'Fabric_name', 'Fabric_label', 'configname', 'chassis_name',
                'chassis_wwn', 'slot', 'port', 'Transceiver_Supported'
            ]
            portshow_sfp_info_df = portshow_sfp_aggregated_df.drop_duplicates(
                subset=info_columns).copy()
            unknown_count = len(portshow_sfp_info_df[
                portshow_sfp_info_df['Transceiver_Supported'] ==
                'Unknown SFP'])
            info = f'{unknown_count} {"port" if unknown_count == 1 else "ports"} with UNKNOWN supported SFP tag found'
            print(info, end=" ")
            status_info('warning', max_title, len(info))
            # ask if save portshow_aggregated_df
            if not portshow_sfp_export_flag:
                reply = reply_request(
                    "Do you want to save 'portshow_sfp_aggregated'? (y)es/(n)o: "
                )
                if reply == 'y':
                    portshow_sfp_force_flag = True

        # create reaport tables from port_complete_df DataFrtame
        error_report_df, sfp_report_df, portcfg_report_df = \
            create_report_tables(portshow_sfp_aggregated_df, data_names[1:], report_columns_usage_dct, max_title)
        # saving data to json or csv file
        data_lst = [
            portshow_sfp_aggregated_df, error_report_df, sfp_report_df,
            portcfg_report_df
        ]
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and reset DataFrame if yes
    else:
        portshow_sfp_aggregated_df, error_report_df, sfp_report_df, portcfg_report_df \
            = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            portshow_sfp_aggregated_df, error_report_df, sfp_report_df,
            portcfg_report_df
        ]
    # save data to excel file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        force_flag = False
        if data_name == 'portshow_sfp_aggregated':
            force_flag = portshow_sfp_force_flag
        save_xlsx_file(data_frame,
                       data_name,
                       report_data_lst,
                       force_flag=force_flag)

    return portshow_sfp_aggregated_df
예제 #7
0
def portcmd_analysis_main(portshow_df, switchshow_ports_df, switch_params_df,
                          switch_params_aggregated_df, isl_aggregated_df,
                          nsshow_df, nscamshow_df, ag_principal_df,
                          porttrunkarea_df, alias_df, fdmi_df, blade_module_df,
                          blade_servers_df, blade_vc_df, synergy_module_df,
                          synergy_servers_df, system_3par_df, port_3par_df,
                          report_columns_usage_dct, report_data_lst):
    """Main function to add connected devices information to portshow DataFrame"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'portshow_aggregated', 'storage_connection_statistics',
        'device_connection_statistics', 'device_rename',
        'report_columns_usage_upd', 'Серверы', 'Массивы', 'Библиотеки',
        'Микрокоды_HBA', 'Подключение_массивов', 'Подключение_библиотек',
        'Подключение_серверов', 'NPIV', 'Статистика_массивов',
        'Статистика_устройств'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    report_columns_usage_bckp = report_columns_usage_dct

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # flag to forcible save portshow_aggregated_df if required
    portshow_force_flag = False
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    portshow_aggregated_df, storage_connection_statistics_df, device_connection_statistics_df, \
        device_rename_df, report_columns_usage_dct, \
            servers_report_df, storage_report_df, library_report_df, hba_report_df, \
                storage_connection_df,  library_connection_df, server_connection_df, npiv_report_df, \
                    storage_connection_statistics_report_df, device_connection_statistics_report_df = data_lst
    nsshow_unsplit_df = pd.DataFrame()

    if not report_columns_usage_dct:
        report_columns_usage_dct = report_columns_usage_bckp

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portcmd', 'switchshow_ports', 'switch_params_aggregated',
        'switch_parameters', 'chassis_parameters', 'fdmi', 'nscamshow',
        'nsshow', 'alias', 'blade_servers', 'fabric_labels', 'isl', 'trunk',
        'isl_aggregated', 'Параметры_SFP', 'portshow_sfp_aggregated'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # import data with switch models, firmware and etc
        switch_models_df = dataframe_import('switch_models', max_title)
        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct
        _, _, *re_pattern_lst = data_extract_objects('nameserver', max_title)

        oui_df = dataframe_import('oui',
                                  max_title,
                                  columns=['Connected_oui', 'type', 'subtype'])

        # current operation information string
        info = f'Generating connected devices table'
        print(info, end=" ")


        portshow_aggregated_df, alias_wwnn_wwnp_df, nsshow_unsplit_df, expected_ag_links_df = \
            portshow_aggregated(portshow_df, switchshow_ports_df, switch_params_df,
                                switch_params_aggregated_df, isl_aggregated_df, nsshow_df,
                                nscamshow_df, ag_principal_df, porttrunkarea_df, switch_models_df, alias_df,
                                oui_df, fdmi_df, blade_module_df,  blade_servers_df, blade_vc_df,
                                synergy_module_df, synergy_servers_df, system_3par_df, port_3par_df,
                                re_pattern_lst, report_data_lst)

        # after finish display status
        status_info('ok', max_title, len(info))
        # show warning if any UNKNOWN device class founded, if any PortSymb or NodeSymb is not parsed,
        # if new switch founded
        portshow_force_flag, nsshow_unsplit_force_flag, expected_ag_links_force_flag = \
            warning_notification(portshow_aggregated_df, switch_params_aggregated_df,
            nsshow_unsplit_df, expected_ag_links_df, report_data_lst)
        # correct device names manually
        portshow_aggregated_df, device_rename_df = \
            devicename_correction_main(portshow_aggregated_df, device_rename_df,
                                        report_columns_usage_dct, report_data_lst)
        # count Device_Host_Name instances for fabric_label, label and total in fabric
        portshow_aggregated_df = device_ports_per_group(portshow_aggregated_df)

        # count device connection statistics
        info = f'Counting device connection statistics'
        print(info, end=" ")
        storage_connection_statistics_df = storage_connection_statistics(
            portshow_aggregated_df, re_pattern_lst)
        device_connection_statistics_df = device_connection_statistics(
            portshow_aggregated_df)
        status_info('ok', max_title, len(info))

        servers_report_df, storage_report_df, library_report_df, hba_report_df, \
            storage_connection_df,  library_connection_df, server_connection_df, npiv_report_df, \
                storage_connection_statistics_report_df, device_connection_statistics_report_df  = \
                    create_report_tables(portshow_aggregated_df, storage_connection_statistics_df,
                                            device_connection_statistics_df, data_names[5:-2],
                                            report_columns_usage_dct, max_title)
        # create list with partitioned DataFrames
        data_lst = [
            portshow_aggregated_df, storage_connection_statistics_df,
            device_connection_statistics_df, device_rename_df,
            report_columns_usage_dct, servers_report_df, storage_report_df,
            library_report_df, hba_report_df, storage_connection_df,
            library_connection_df, server_connection_df, npiv_report_df,
            storage_connection_statistics_report_df,
            device_connection_statistics_report_df
        ]

        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
        save_xlsx_file(nsshow_unsplit_df,
                       'nsshow_unsplit',
                       report_data_lst,
                       force_flag=nsshow_unsplit_force_flag)
        save_xlsx_file(expected_ag_links_df,
                       'expected_ag_links',
                       report_data_lst,
                       force_flag=expected_ag_links_force_flag)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        portshow_aggregated_df, storage_connection_statistics_df, device_connection_statistics_df, \
            device_rename_df, report_columns_usage_dct, \
                servers_report_df, storage_report_df, library_report_df, hba_report_df, \
                    storage_connection_df, library_connection_df, server_connection_df, npiv_report_df, \
                        storage_connection_statistics_report_df, device_connection_statistics_report_df \
                            = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            portshow_aggregated_df, storage_connection_statistics_df,
            device_connection_statistics_df, device_rename_df,
            report_columns_usage_dct, servers_report_df, storage_report_df,
            library_report_df, hba_report_df, storage_connection_df,
            library_connection_df, server_connection_df, npiv_report_df,
            storage_connection_statistics_report_df,
            device_connection_statistics_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        force_flag = False
        if data_name == 'portshow_aggregated':
            force_flag = portshow_force_flag
        save_xlsx_file(data_frame,
                       data_name,
                       report_data_lst,
                       force_flag=force_flag)
    return portshow_aggregated_df
예제 #8
0
def zoning_analysis_main(switch_params_aggregated_df, portshow_aggregated_df,
                         cfg_df, zone_df, alias_df, cfg_effective_df,
                         fcrfabric_df, lsan_df, peerzone_df,
                         report_columns_usage_dct, report_data_lst):
    """Main function to analyze zoning configuration"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'zoning_aggregated', 'alias_aggregated', 'zonemember_statistics',
        'portshow_zoned_aggregated', 'alias_statistics',
        'effective_cfg_statistics', 'Зонирование', 'Псевдонимы',
        'Зонирование_A&B', 'Порты_не_в_зонах', 'Порты_без_псевдономов',
        'Отсутсвуют_в_сети', 'Статистика_зон', 'Статистика_псевдонимов',
        'Статистика_конфигурации'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    zoning_aggregated_df, alias_aggregated_df, zonemember_statistics_df, \
        portshow_zoned_aggregated_df, alias_statistics_df, effective_cfg_statistics_df, zoning_report_df, alias_report_df, \
            zoning_compare_report_df, unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df,\
                zonemember_statistics_report_df,  alias_statistics_report_df, effective_cfg_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'cfg', 'cfg_effective', 'zone', 'alias', 'switch_params_aggregated',
        'switch_parameters', 'switchshow_ports', 'chassis_parameters',
        'portshow_aggregated', 'device_rename', 'report_columns_usage_upd',
        'portcmd', 'fdmi', 'nscamshow', 'nsshow', 'blade_servers',
        'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Generating zoning table'
        print(info, end=" ")

        # aggregated DataFrames
        zoning_aggregated_df, alias_aggregated_df \
            = zoning_aggregated(switch_params_aggregated_df, portshow_aggregated_df,
                                    cfg_df, zone_df, alias_df, cfg_effective_df, fcrfabric_df, lsan_df, peerzone_df, report_data_lst)

        # create comprehesive statistics DataFrame with Fabric summaries and
        # zones statistics DataFrame without summaries
        zonemember_statistics_df, zonemember_zonelevel_stat_df = zonemember_statistics(
            zoning_aggregated_df, report_data_lst)
        # add zoning statistics notes, zone duplicates and zone pairs to zoning aggregated DataFrame
        zoning_aggregated_df = statistics_to_aggregated_zoning(
            zoning_aggregated_df, zonemember_zonelevel_stat_df)
        # check all fabric devices (Wwnp) for usage in zoning configuration
        portshow_zoned_aggregated_df = verify_cfg_type(portshow_aggregated_df,
                                                       zoning_aggregated_df,
                                                       ['PortName'])
        # create alias configuration statistics
        alias_statistics_df = alias_dashboard(alias_aggregated_df,
                                              portshow_zoned_aggregated_df)
        # create Effective zoning configuration summary statistics
        effective_cfg_statistics_df = cfg_dashborad(
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            zoning_aggregated_df, alias_aggregated_df)
        # after finish display status
        status_info('ok', max_title, len(info))

        # report tables
        zoning_report_df, alias_report_df, zoning_compare_report_df, \
            unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df, \
                zonemember_statistics_report_df, alias_statistics_report_df, effective_cfg_statistics_report_df = \
                    zoning_report_main(zoning_aggregated_df, alias_aggregated_df, portshow_zoned_aggregated_df,
                                        zonemember_statistics_df, alias_statistics_df, effective_cfg_statistics_df,
                                        data_names, report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [
            zoning_aggregated_df, alias_aggregated_df,
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            alias_statistics_df, effective_cfg_statistics_df, zoning_report_df,
            alias_report_df, zoning_compare_report_df,
            unzoned_device_report_df, no_alias_device_report_df,
            zoning_absent_device_report_df, zonemember_statistics_report_df,
            alias_statistics_report_df, effective_cfg_statistics_report_df
        ]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)

    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        zoning_aggregated_df, alias_aggregated_df, zonemember_statistics_df, \
            portshow_zoned_aggregated_df, alias_statistics_df, effective_cfg_statistics_df, zoning_report_df, alias_report_df, \
                zoning_compare_report_df, unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df, \
                    zonemember_statistics_report_df, alias_statistics_report_df, effective_cfg_statistics_report_df \
                    = verify_data(report_data_lst, data_names, *data_lst)

        data_lst = [
            zoning_aggregated_df, alias_aggregated_df,
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            alias_statistics_df, effective_cfg_statistics_df, zoning_report_df,
            alias_report_df, zoning_compare_report_df,
            unzoned_device_report_df, no_alias_device_report_df,
            zoning_absent_device_report_df, zonemember_statistics_report_df,
            alias_statistics_report_df, effective_cfg_statistics_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):

        save_xlsx_file(data_frame, data_name, report_data_lst)

    return zoning_aggregated_df, alias_aggregated_df, portshow_zoned_aggregated_df
예제 #9
0
def synergy_system_extract(synergy_folder, report_data_lst):
    """Function to extract blade systems information"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['synergy_interconnect', 'synergy_servers']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)

    # # unpacking from the loaded list with data
    # # pylint: disable=unbalanced-tuple-unpacking
    # module_comprehensive_lst, blades_comprehensive_lst, blade_vc_comprehensive_lst = data_lst

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title)
    if force_run:

        # lists to store only REQUIRED infromation
        # collecting data for all blades during looping
        # list containing enclosure, blade and hba information for all blade systems

        # list containing enclosure and interconnect modules information for all blade systems

        synergy_module_columns = [
            'Enclosure_Name', 'Enclosure_SN', 'Enclosure_Type',
            'Interconnect_Bay', 'Interconnect_Model', 'Interconnect_SN',
            'Interconnect_Firmware', 'Interconnect_Name', 'NodeName',
            'Device_Location'
        ]

        synergy_server_columns = [
            'Enclosure_Name', 'Enclosure_Slot', 'Host_Name', 'name',
            'serverprofilename', 'Device_Model', 'Device_SN', 'Host_OS',
            'HBA_Description', 'Mezz_type', 'Connected_portWwn',
            'Mezz_location', 'Device_Location', 'HBA_Firmware'
        ]

        synergy_module_aggregated_df = pd.DataFrame(
            columns=synergy_module_columns)
        synergy_servers_aggregated_df = pd.DataFrame(
            columns=synergy_server_columns)

        if synergy_folder:
            print('\nEXTRACTING SYNERGY SYSTEM INFORMATION ...\n')

            # collects files in folder with xlsm extension
            synergy_config_lst = find_files(synergy_folder,
                                            max_title,
                                            filename_extension='xlsm')
            # number of files to check
            configs_num = len(synergy_config_lst)

            if configs_num:

                # # data imported from init file to extract values from config file
                # enclosure_params, _, comp_keys, match_keys, comp_dct = data_extract_objects('blades', max_title)
                # module_params = columns_import('blades', max_title, 'module_params')
                # blade_params = columns_import('blades', max_title, 'blade_params')

                for i, synergy_config in enumerate(synergy_config_lst):
                    # file name with extension
                    configname_wext = os.path.basename(synergy_config)
                    # remove extension from filename
                    configname, _ = os.path.splitext(configname_wext)

                    # current operation information string
                    info = f'[{i+1} of {configs_num}]: {configname} system.'
                    print(info, end=" ")

                    syn_enclosure_df = pd.read_excel(synergy_config,
                                                     sheet_name='enclosures')
                    syn_module_df = pd.read_excel(
                        synergy_config, sheet_name='interconnectbays')

                    syn_server_hw_df = pd.read_excel(
                        synergy_config, sheet_name='server-hardware')
                    syn_server_fw_sw_df = pd.read_excel(
                        synergy_config, sheet_name='server-fw-sw')
                    syn_server_profile_connection_df = pd.read_excel(
                        synergy_config, sheet_name='server-prof-conn-details')

                    synergy_module_df = synergy_module(syn_enclosure_df,
                                                       syn_module_df)

                    if synergy_module_aggregated_df.empty:
                        synergy_module_aggregated_df = synergy_module_df
                    else:
                        synergy_module_aggregated_df = pd.concat(
                            [synergy_module_aggregated_df, synergy_module_df],
                            ignore_index=True)

                    synergy_server_wwn_df = synergy_server_wwn(
                        syn_server_hw_df)
                    synergy_profile_wwn_df = synergy_profile_wwn(
                        syn_server_profile_connection_df,
                        synergy_server_wwn_df)

                    # conctenate connection profile and server hardware
                    synergy_servers_df = pd.concat(
                        [synergy_server_wwn_df, synergy_profile_wwn_df],
                        ignore_index=True)
                    synergy_servers_df.drop_duplicates(inplace=True)

                    # add mezzanine firmware details
                    synergy_servers_df = synergy_mezz_fw(
                        syn_server_fw_sw_df, synergy_servers_df)
                    synergy_servers_df.sort_values(
                        by=['enclosurename', 'position', 'Mezz_WWPN'],
                        ignore_index=True,
                        inplace=True)

                    if synergy_servers_aggregated_df.empty:
                        synergy_servers_aggregated_df = synergy_servers_df
                    else:
                        synergy_servers_aggregated_df = pd.concat(
                            [
                                synergy_servers_aggregated_df,
                                synergy_servers_df
                            ],
                            ignore_index=True)

                    if synergy_module_aggregated_df['switchbasewwn'].notna(
                    ).any():
                        synergy_module_aggregated_df[
                            'switchbasewwn'] = synergy_module_aggregated_df[
                                'switchbasewwn'].str.lower()
                    if synergy_servers_aggregated_df['Mezz_WWPN'].notna().any(
                    ):
                        synergy_servers_aggregated_df[
                            'Mezz_WWPN'] = synergy_servers_aggregated_df[
                                'Mezz_WWPN'].str.lower()

                    if not synergy_servers_df.empty or not synergy_module_df.empty:
                        status_info('ok', max_title, len(info))
                    else:
                        status_info('no data', max_title, len(info))

                module_columns_dct = {
                    'enclosurename': 'Enclosure_Name',
                    'enclosure_serialnumber': 'Enclosure_SN',
                    'enclosuretype': 'Enclosure_Type',
                    'baynumber': 'Interconnect_Bay',
                    'interconnectmodel': 'Interconnect_Model',
                    'serialnumber': 'Interconnect_SN',
                    'switchfwversion': 'Interconnect_Firmware',
                    'hostname': 'Interconnect_Name',
                    'switchbasewwn': 'NodeName',
                    'device_location': 'Device_Location'
                }

                synergy_module_aggregated_df.rename(columns=module_columns_dct,
                                                    inplace=True)
                synergy_module_aggregated_df.replace(r'^None$|^none$|^ *$',
                                                     value=np.nan,
                                                     regex=True,
                                                     inplace=True)

                server_columns_dct = {
                    'enclosurename': 'Enclosure_Name',
                    'position': 'Enclosure_Slot',
                    'servername': 'Host_Name',
                    'model': 'Device_Model',
                    'serialnumber': 'Device_SN',
                    'oshint': 'Host_OS',
                    'Mezz': 'HBA_Description',
                    'Mezz_WWPN': 'Connected_portWwn',
                    'device_location': 'Device_Location',
                    'componentversion': 'HBA_Firmware'
                }

                synergy_servers_aggregated_df.rename(
                    columns=server_columns_dct, inplace=True)
                synergy_servers_aggregated_df.replace(r'^None$|^none$|^ *$',
                                                      value=np.nan,
                                                      regex=True,
                                                      inplace=True)

                data_lst = [
                    synergy_module_aggregated_df, synergy_servers_aggregated_df
                ]
                # save extracted data to json file
                save_data(report_data_lst, data_names, *data_lst)
        else:
            # current operation information string
            info = f'Collecting synergy details'
            print(info, end=" ")
            status_info('skip', max_title, len(info))
            data_lst = [
                synergy_module_aggregated_df, synergy_servers_aggregated_df
            ]
            # save empty data to json file
            save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty after first iteration and replace information string with empty list
    else:
        synergy_module_aggregated_df, synergy_servers_aggregated_df = verify_data(
            report_data_lst, data_names, *data_lst)
        data_lst = [
            synergy_module_aggregated_df, synergy_servers_aggregated_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return synergy_module_aggregated_df, synergy_servers_aggregated_df
예제 #10
0
def isl_main(fabricshow_ag_labels_df, switch_params_aggregated_df, report_columns_usage_dct, 
    isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, report_data_lst):
    """Main function to create ISL and IFR report tables"""
    
   # report_data_lst contains information: 
   # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['isl_aggregated', 'isl_statistics', 'Межкоммутаторные_соединения', 'Межфабричные_соединения', 'Статистика_ISL']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')
    
    # loading data if were saved on previous iterations 
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = ['isl', 'trunk', 'fcredge', 'sfpshow', 'portcfgshow', 
                            'chassis_parameters', 'switch_parameters', 'switchshow_ports', 
                            'maps_parameters', 'blade_interconnect', 'fabric_labels']

    # force run when any data from data_lst was not saved (file not found) or 
    # procedure execution explicitly requested for output data or data used during fn execution  
    force_run = verify_force_run(data_names, data_lst, report_steps_dct, 
                                            max_title, analyzed_data_names)
    if force_run:

        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct    
        _, _, *re_pattern_lst = data_extract_objects('common_regex', max_title)

        # current operation information string
        info = f'Generating ISL and IFL tables'
        print(info, end =" ")

        # get aggregated DataFrames
        isl_aggregated_df, fcredge_df = \
            isl_aggregated(fabricshow_ag_labels_df, switch_params_aggregated_df, 
            isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, re_pattern_lst)

        isl_statistics_df = isl_statistics(isl_aggregated_df, re_pattern_lst, report_data_lst)

        # after finish display status
        status_info('ok', max_title, len(info))      

        # partition aggregated DataFrame to required tables
        isl_report_df, = dataframe_segmentation(isl_aggregated_df, [data_names[2]], report_columns_usage_dct, max_title)
        isl_report_df = translate_values(isl_report_df, translate_dct={'Yes': 'Да', 'No': 'Нет'})
        # if no trunks in fabric drop trunk columns
        if trunk_df.empty:
            isl_report_df.drop(columns = ['Идентификатор транка', 'Deskew', 'Master'], inplace = True)
        # check if IFL table required
        if not fcredge_df.empty:
            ifl_report_df, = dataframe_segmentation(fcredge_df, [data_names[3]], report_columns_usage_dct, max_title)
        else:
            ifl_report_df = fcredge_df.copy()

        isl_statistics_report_df = isl_statistics_report(isl_statistics_df, report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
        # saving fabric_statistics and fabric_statistics_summary DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return isl_aggregated_df, isl_statistics_df
예제 #11
0
def fabricstatistics_main(portshow_aggregated_df, switchshow_ports_df,
                          fabricshow_ag_labels_df, nscamshow_df, portshow_df,
                          report_columns_usage_dct, report_data_lst):
    """Main function to count Fabrics statistics"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['fabric_statistics', 'Статистика_фабрики']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were obtained on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    fabric_statistics_df, fabric_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portcmd', 'switchshow_ports', 'switch_params_aggregated',
        'portshow_aggregated', 'switch_parameters', 'chassis_parameters',
        'fdmi', 'nscamshow', 'nsshow', 'alias', 'blade_servers',
        'fabric_labels'
    ]

    chassis_column_usage = report_columns_usage_dct['chassis_info_usage']
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Counting up Fabrics statistics'
        print(info, end=" ")

        fabric_statistics_df = statisctics_aggregated(portshow_aggregated_df,
                                                      switchshow_ports_df,
                                                      fabricshow_ag_labels_df,
                                                      nscamshow_df,
                                                      portshow_df,
                                                      report_data_lst)
        # after finish display status
        status_info('ok', max_title, len(info))
        # get report DataFrame
        fabric_statistics_report_df = statistics_report(
            fabric_statistics_df, chassis_column_usage, max_title)
        # create list with partitioned DataFrames
        data_lst = [fabric_statistics_df, fabric_statistics_report_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        fabric_statistics_df, fabric_statistics_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [fabric_statistics_df, fabric_statistics_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return fabric_statistics_df