Ejemplo n.º 1
0
def sensor_analysis_main(sensor_df, switch_params_aggregated_df,
                         report_columns_usage_dct, report_data_lst):
    """Main function to analyze zoning configuration"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['sensor_aggregated', 'Датчики']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    sensor_aggregated_df, sensor_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'switch_params_aggregated', 'fabric_labels', 'sensor'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Generating sensor readings table'
        print(info, end=" ")

        # aggregated DataFrames
        sensor_aggregated_df = sensor_aggregation(sensor_df,
                                                  switch_params_aggregated_df)

        # after finish display status
        status_info('ok', max_title, len(info))

        # report tables
        sensor_report_df = sensor_report(sensor_aggregated_df, data_names,
                                         report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [sensor_aggregated_df, sensor_report_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)

    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        sensor_aggregated_df, sensor_report_df = verify_data(
            report_data_lst, data_names, *data_lst)

        data_lst = [sensor_aggregated_df, sensor_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return sensor_aggregated_df
Ejemplo n.º 2
0
def auto_fabrics_labeling(switchshow_ports_df, switch_params_df, fabricshow_df, report_data_lst):
    """Function to auto label fabrics  in fabricshow_df DataFrame"""

    # ls_type, customer defined fabric name, xisl usage mode
    switch_ls_type_df = verify_ls_type(switch_params_df)
    # counts statistics for port type (F-port, E-port) and port state (Online) for each switch in fabricshow
    fabricshow_porttype_state_df = fabricshow_porttype_state(switchshow_ports_df, switch_ls_type_df, fabricshow_df)
    # saving DataFrame to Excel if manual labeling required
    save_xlsx_file(fabricshow_porttype_state_df, 'fabricshow_statistics', report_data_lst, force_flag=True)
    # removing front domain and translate domain switches from DataFrame
    fabricshow_porttype_state_df = fabricshow_porttype_state_df.loc[fabricshow_porttype_state_df.Enet_IP_Addr != '0.0.0.0']
    # dividing fabricshow_porttype_state_df into groups. One group for each fabric
    fabricshow_grp = fabricshow_porttype_state_df.groupby(
        by=['chassis_name', 'Principal_switch_name', 'Principal_switch_wwn', 'Fabric_ID', 'FC_Route'], dropna=False)

    # applying faricshow_summary for each fabric to summarize fabricshow_porttype_state_df DataFrame
    fabricshow_summary_df = fabricshow_grp.apply(fabricshow_summary)
    if fabricshow_summary_df.empty:
        print('\n')
        print('No PRINCIPAL switch found. Exiting ...')
        print('\n')
        sys.exit()
    # sorting data in such way that two rows (odd and even) are pair fabrics
    fabricshow_summary_df = fabricshow_summary_df.reset_index().sort_values(
        by=['FC_Route', 'Total_switch', 'Domain_IDs', 'Switch_names'], 
        ascending=[False, False, True, True]).reset_index(drop=True)
    # labeling fabrics with auto_fabrics_labeling fanction
    fabricshow_summary_df[['Fabric_name', 'Fabric_label']] = \
        fabricshow_summary_df.apply(lambda row: pd.Series(_auto_fabrics_labeling(row)), axis=1)

    fabricshow_summary_df.sort_values(by=['Fabric_name', 'Fabric_label', 'Principal_switch_name', 'Domain_IDs'],
                                            inplace=True, ignore_index=True)
    return fabricshow_summary_df
Ejemplo n.º 3
0
def list_to_dataframe(data_lst, report_data_lst, sheet_title_export, sheet_title_import = None, 
                        columns = columns_import, columns_title_import = 'columns'):
    """Function to export list to DataFrame and then save it to excel report file
    returns DataFrame
    """

    *_, max_title, _ = report_data_lst 
    
    # checks if columns were passed to function as a list
    if isinstance(columns, list):
        columns_title = columns
    # if not (default) then import columns from excel file
    else:
        columns_title = columns(sheet_title_import, max_title, columns_title_import)
    data_df = pd.DataFrame(data_lst, columns= columns_title)
    save_xlsx_file(data_df, sheet_title_export, report_data_lst)
    
    return data_df
Ejemplo n.º 4
0
def download_summary(ns_3par_df, report_data_lst):
    """Function to print configurations download from STATs summary and
    save summary to file if user agreed"""

    if not 'STATs_status' in ns_3par_df.columns and \
        not 'Local_status' in ns_3par_df.columns:
        ns_3par_df['Status'] = 'skip'

    print('\n')
    print('3PAR Storage Systems configuaration download summary')
    print(ns_3par_df)
    print('\n')

    # if 'STATs_status' in ns_3par_df.columns and \
    #     ns_3par_df['STATs_status'].isin(['skip', 'fail']).any():
    # print('Some configurations are missing.')
    query = 'Do you want to SAVE download SUMMARY? (y)es/(n)o: '
    reply = reply_request(query)
    if reply == 'y':
        save_xlsx_file(ns_3par_df,
                       'stats_summary',
                       report_data_lst,
                       force_flag=True)
Ejemplo n.º 5
0
def define_device_to_rename(portshow_aggregated_df, device_rename_df,
                            max_title, force_form_update_flag,
                            force_change_data_lst, report_data_lst):
    """
    Function to define (create new, return previously saved or return empty) 
    device_rename_df DataFrame to apply device rename schema
    """

    # if device_rename_df DataFrame doesn't exist (1st iteration)
    # or force flag to change device_rename_df DataFrame is on
    # or some related DataFrames was forcibly changed
    if device_rename_df is None or force_form_update_flag:
        print('\n')
        if force_change_data_lst:
            print(
                f"Request to force change of {', '.join(force_change_data_lst)} data was received."
            )
        reply = reply_request(
            'Do you want to change auto assigned device names? (y)es/(n)o: ')
        if reply == 'y':
            # if device_rename_df DataFrame doesn't exist (1st iteration)
            if device_rename_df is None:
                # create new device rename DataFrame
                manual_device_rename_df = create_device_rename_form(
                    portshow_aggregated_df)
            else:
                # if any related DataFrames was forcibly changed ask if device rename form reset required
                if force_change_data_lst:
                    reply = reply_request(
                        'Do you want to apply previously saved device rename schema? (y)es/(n)o: '
                    )
                    if reply == 'y':
                        print('\n')
                        return device_rename_df
                    else:
                        print('\n')
                        reply = reply_request(
                            'Do you want to reset device rename form? (y)es/(n)o: '
                        )
                        if reply == 'y':
                            # create new device rename DataFrame
                            manual_device_rename_df = create_device_rename_form(
                                portshow_aggregated_df)
                        else:
                            # use saved device rename DataFrame
                            manual_device_rename_df = device_rename_df.copy()
                else:
                    # if no force change in related DataFrames but device_rename_df DataFrame
                    # change initiated use saved device rename DataFrame
                    manual_device_rename_df = device_rename_df.copy()

            # save manual_device_rename_df DataFrame to excel file to use at as form to fill
            sheet_title = 'device_rename_form'
            file_path = save_xlsx_file(manual_device_rename_df,
                                       sheet_title,
                                       report_data_lst,
                                       force_flag=True)
            file_name = os.path.basename(file_path)
            file_directory = os.path.dirname(file_path)
            print(
                f"\nTo rename devices put new names into the '{file_name}' file, '{sheet_title}' sheet in\n'{file_directory}' directory"
            )
            print('ATTN! CLOSE file after changes were made\n')
            # complete the manual_device_rename_df form and import it
            reply = reply_request("When finish enter 'yes': ", ['yes'])
            if reply == 'y':
                print('\n')
                device_rename_df = dataframe_import(sheet_title,
                                                    max_title,
                                                    init_file=file_path,
                                                    header=2)

        else:
            # if don't change auto assigned names save empty device_rename_df DataFrame
            device_rename_columns = [
                'Fabric_name', 'Device_Host_Name', 'Group_Name', 'deviceType',
                'deviceSubtype', 'Device_Host_Name_rename'
            ]
            device_rename_df = pd.DataFrame(columns=device_rename_columns)
    else:
        # check loaded device_rename_df DataFrame (if it's empty)
        device_rename_df = verify_data(report_data_lst, ['device_rename'],
                                       device_rename_df,
                                       show_status=False)

    return device_rename_df
Ejemplo n.º 6
0
def fabriclabels_main(switchshow_ports_df, switch_params_df, fabricshow_df,
                      ag_principal_df, report_data_lst):
    """Function to set Fabric labels"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    customer_name, report_path, _, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['fabric_labels']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    fabricshow_ag_labels_df, = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = []
    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)

    if force_run:
        print('\nSETTING UP FABRICS NAMES AND LABELS  ...\n')

        fabricshow_summary_df = auto_fabrics_labeling(switchshow_ports_df,
                                                      switch_params_df,
                                                      fabricshow_df,
                                                      report_data_lst)

        # display automatic fabric labeling
        info_labels = [
            'Fabric_name', 'Fabric_label', 'chassis_name',
            'Principal_switch_name', 'Fabric_ID', 'FC_Route', 'Total_switch',
            'Domain_IDs', 'Switch_names', 'Device_ports', 'Online_ports',
            'LS_type', 'Fabric_Name'
        ]
        # service file name for detailed information
        current_date = str(date.today())
        file_name = customer_name + '_' + report_steps_dct[
            'fabricshow_summary'][2] + '_' + current_date + '.xlsx'
        # file_name = customer_name + '_analysis_report_' + current_date + '.xlsx'
        print('\nAutomatic fabrics labeling\n')
        # set option to show all columns
        with pd.option_context('display.max_columns', None,
                               'display.expand_frame_repr', False):
            # pd.set_option('max_columns', None)
            # pd.set_option('expand_frame_repr', False)
            print(fabricshow_summary_df.loc[:, info_labels])
        print(
            f"\nFor detailed switch port types and numbers statistic in each fabric check '{file_name}' file 'fabricshow_statistics' sheet in"
        )
        print(f'{report_path} directory')
        print('ATTN! CLOSE file after check\n')

        # ask user if Automatic Fabric labeling need to be corrected
        query = 'Do you want to change Fabrics Names or Labels? (y)es/(n)o: '
        reply = reply_request(query)
        if reply == 'y':
            # saving DataFrame to Excel to check during manual labeling if required
            save_xlsx_file(fabricshow_summary_df,
                           'fabricshow_summary',
                           report_data_lst,
                           force_flag=True)
            fabricshow_summary_df = manual_fabrics_labeling(
                fabricshow_summary_df, info_labels)

        # takes all switches working in Native and AG switches
        # merge the in one DataFrame and identify which Fabrics they belong too with fabricshow_summary DataFrame
        fabricshow_ag_labels_df = native_ag_labeling(fabricshow_df,
                                                     ag_principal_df,
                                                     fabricshow_summary_df)

        # # disable option to show all columns
        # pd.reset_option('max_columns')
        # pd.reset_option('expand_frame_repr')

        # create list with partitioned DataFrames
        data_lst = [fabricshow_ag_labels_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        fabricshow_ag_labels_df = verify_data(report_data_lst, data_names,
                                              *data_lst)
        data_lst = [fabricshow_ag_labels_df]
    # save data to excel file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return fabricshow_ag_labels_df
Ejemplo n.º 7
0
def switch_params_analysis_main(fabricshow_ag_labels_df, chassis_params_df,
                                switch_params_df, maps_params_df,
                                blade_module_loc_df, ag_principal_df,
                                report_data_lst):
    """Main function to create aggregated switch parameters table and report tables"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'report_columns_usage', 'switch_params_aggregated', 'Коммутаторы',
        'Фабрика', 'Параметры_коммутаторов', 'Лицензии',
        'Глобальные_параметры_фабрики'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    report_columns_usage_dct, switch_params_aggregated_df, switches_report_df, fabric_report_df, \
        switches_parameters_report_df, licenses_report_df, global_fabric_parameters_report_df  = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'chassis_parameters', 'switch_parameters', 'switchshow_ports',
        'maps_parameters', 'blade_interconnect', 'fabric_labels'
    ]

    # clean fabricshow DataFrame from unneccessary data
    fabric_clean_df = fabric_clean(fabricshow_ag_labels_df)
    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:

        # import data with switch models, firmware and etc
        switch_models_df = dataframe_import('switch_models', max_title)

        # current operation information string
        info = f'Generating aggregated switch parameters table'
        print(info, end=" ")

        # create aggregated table by joining DataFrames
        switch_params_aggregated_df, report_columns_usage_dct = \
            fabric_aggregation(fabric_clean_df, chassis_params_df, \
                switch_params_df, maps_params_df, switch_models_df, ag_principal_df)
        # add 'Device_Location for Blade chassis switches
        switch_params_aggregated_df = fill_device_location(
            switch_params_aggregated_df, blade_module_loc_df)

        # after finish display status
        status_info('ok', max_title, len(info))

        # check if switch config files missing
        mask_fabric = switch_params_aggregated_df[[
            'Fabric_name', 'Fabric_label'
        ]].notna().all(axis=1)
        mask_no_config = switch_params_aggregated_df['chassis_name'].isna()
        missing_configs_num = switch_params_aggregated_df.loc[mask_no_config][
            'Fabric_name'].count()
        if missing_configs_num:
            info = f'{missing_configs_num} switch configuration{"s" if missing_configs_num > 1 else ""} MISSING'
            print(info, end=" ")
            status_info('warning', max_title, len(info))

        switches_report_df, fabric_report_df, switches_parameters_report_df, \
            licenses_report_df, global_fabric_parameters_report_df = \
                switchs_params_report(switch_params_aggregated_df, data_names, report_columns_usage_dct, max_title)

        # # partition aggregated DataFrame to required tables
        # switches_report_df, fabric_report_df,  \
        #     switches_parameters_report_df, licenses_report_df = \
        #         dataframe_segmentation(switch_params_aggregated_df, data_names[2:-1], \
        #             report_columns_usage_dct, max_title)

        # # global parameters are equal for all switches in one fabric thus checking Principal switches only
        # mask_principal = switch_params_aggregated_df['switchRole'] == 'Principal'
        # switch_params_principal_df = switch_params_aggregated_df.loc[mask_principal].copy()
        # global_fabric_parameters_report_df, = dataframe_segmentation(switch_params_principal_df, data_names[-1], \
        #             report_columns_usage_dct, max_title)

        # # drop rows with empty switch names columns
        # fabric_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)
        # switches_parameters_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)
        # licenses_report_df.dropna(subset = ['Имя коммутатора'], inplace = True)

        # # drop fabric_id if all have same value
        # if fabric_report_df['Fabric ID'].dropna().nunique() == 1:
        #     fabric_report_df.drop(columns=['Fabric ID'], inplace=True)

        # # TO_REMOVE No need to drop duplicates coz Principal switches only used before
        # # # parameters are equal for all switches in one fabric
        # # if report_columns_usage_dct['fabric_name_usage']:
        # #     global_fabric_parameters_report_df.drop_duplicates(subset=['Фабрика', 'Подсеть'], inplace=True)
        # # else:
        # #     global_fabric_parameters_report_df.drop_duplicates(subset=['Подсеть'], inplace=True)

        # global_fabric_parameters_report_df.reset_index(inplace=True, drop=True)

        # create list with partitioned DataFrames
        data_lst = [
            report_columns_usage_dct, switch_params_aggregated_df,
            switches_report_df, fabric_report_df,
            switches_parameters_report_df, licenses_report_df,
            global_fabric_parameters_report_df
        ]

        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        report_columns_usage_dct, switch_params_aggregated_df, switches_report_df, fabric_report_df,  \
            switches_parameters_report_df, licenses_report_df, global_fabric_parameters_report_df = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            report_columns_usage_dct, switch_params_aggregated_df,
            switches_report_df, fabric_report_df,
            switches_parameters_report_df, licenses_report_df,
            global_fabric_parameters_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names[1:], data_lst[1:]):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return report_columns_usage_dct, switch_params_aggregated_df, fabric_clean_df
Ejemplo n.º 8
0
def errdump_main(errdump_df, switchshow_df, switch_params_aggregated_df,
                 portshow_aggregated_df, report_columns_usage_dct,
                 report_data_lst):
    """Main function to get most frequently appeared log messages"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['errdump_aggregated', 'raslog_counter', 'Журнал']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # loading data if were saved on previous iterations
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    errdump_aggregated_df, raslog_counter_df, raslog_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'chassis_parameters', 'switch_parameters', 'switchshow_ports',
        'maps_parameters', 'portshow_aggregated', 'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct
        _, _, *re_pattern_lst = data_extract_objects('raslog', max_title)

        # current operation information string
        info = f'Counting RASLog messages'
        print(info, end=" ")

        # get aggregated DataFrames
        errdump_aggregated_df, raslog_counter_df, raslog_frequent_df = \
            errdump_aggregated(errdump_df, switchshow_df, switch_params_aggregated_df, portshow_aggregated_df, re_pattern_lst)
        # after finish display status
        status_info('ok', max_title, len(info))

        # partition aggregated DataFrame to required tables
        raslog_report_df = raslog_report(raslog_frequent_df, data_names,
                                         report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]
        # saving fabric_statistics and fabric_statistics_summary DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        errdump_aggregated_df, raslog_counter_df, raslog_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return errdump_aggregated_df, raslog_counter_df
Ejemplo n.º 9
0
def err_sfp_cfg_analysis_main(portshow_aggregated_df, sfpshow_df,
                              portcfgshow_df, report_columns_usage_dct,
                              report_data_lst):
    """Main function to add porterr, transceiver and portcfg information to portshow DataFrame"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst
    portshow_sfp_force_flag = False
    portshow_sfp_export_flag, *_ = report_steps_dct['portshow_sfp_aggregated']

    # names to save data obtained after current module execution
    data_names = [
        'portshow_sfp_aggregated', 'Ошибки', 'Параметры_SFP',
        'Параметры_портов'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    portshow_sfp_aggregated_df, error_report_df, sfp_report_df, portcfg_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portshow_aggregated', 'sfpshow', 'portcfgshow', 'portcmd',
        'switchshow_ports', 'switch_params_aggregated', 'fdmi',
        'device_rename', 'report_columns_usage_upd', 'nscamshow', 'nsshow',
        'alias', 'blade_servers', 'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)

    if force_run:
        # import transeivers information from file
        sfp_model_df = dataframe_import('sfp_models', max_title)
        # current operation information string
        info = f'Updating connected devices table'
        print(info, end=" ")
        # add sfpshow, transceiver information and portcfg to aggregated portcmd DataFrame
        portshow_sfp_aggregated_df = port_complete(portshow_aggregated_df,
                                                   sfpshow_df, sfp_model_df,
                                                   portcfgshow_df)
        # after finish display status
        status_info('ok', max_title, len(info))

        # warning if UKNOWN SFP present
        if (portshow_sfp_aggregated_df['Transceiver_Supported'] ==
                'Unknown SFP').any():
            info_columns = [
                'Fabric_name', 'Fabric_label', 'configname', 'chassis_name',
                'chassis_wwn', 'slot', 'port', 'Transceiver_Supported'
            ]
            portshow_sfp_info_df = portshow_sfp_aggregated_df.drop_duplicates(
                subset=info_columns).copy()
            unknown_count = len(portshow_sfp_info_df[
                portshow_sfp_info_df['Transceiver_Supported'] ==
                'Unknown SFP'])
            info = f'{unknown_count} {"port" if unknown_count == 1 else "ports"} with UNKNOWN supported SFP tag found'
            print(info, end=" ")
            status_info('warning', max_title, len(info))
            # ask if save portshow_aggregated_df
            if not portshow_sfp_export_flag:
                reply = reply_request(
                    "Do you want to save 'portshow_sfp_aggregated'? (y)es/(n)o: "
                )
                if reply == 'y':
                    portshow_sfp_force_flag = True

        # create reaport tables from port_complete_df DataFrtame
        error_report_df, sfp_report_df, portcfg_report_df = \
            create_report_tables(portshow_sfp_aggregated_df, data_names[1:], report_columns_usage_dct, max_title)
        # saving data to json or csv file
        data_lst = [
            portshow_sfp_aggregated_df, error_report_df, sfp_report_df,
            portcfg_report_df
        ]
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and reset DataFrame if yes
    else:
        portshow_sfp_aggregated_df, error_report_df, sfp_report_df, portcfg_report_df \
            = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            portshow_sfp_aggregated_df, error_report_df, sfp_report_df,
            portcfg_report_df
        ]
    # save data to excel file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        force_flag = False
        if data_name == 'portshow_sfp_aggregated':
            force_flag = portshow_sfp_force_flag
        save_xlsx_file(data_frame,
                       data_name,
                       report_data_lst,
                       force_flag=force_flag)

    return portshow_sfp_aggregated_df
Ejemplo n.º 10
0
def portcmd_analysis_main(portshow_df, switchshow_ports_df, switch_params_df,
                          switch_params_aggregated_df, isl_aggregated_df,
                          nsshow_df, nscamshow_df, ag_principal_df,
                          porttrunkarea_df, alias_df, fdmi_df, blade_module_df,
                          blade_servers_df, blade_vc_df, synergy_module_df,
                          synergy_servers_df, system_3par_df, port_3par_df,
                          report_columns_usage_dct, report_data_lst):
    """Main function to add connected devices information to portshow DataFrame"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'portshow_aggregated', 'storage_connection_statistics',
        'device_connection_statistics', 'device_rename',
        'report_columns_usage_upd', 'Серверы', 'Массивы', 'Библиотеки',
        'Микрокоды_HBA', 'Подключение_массивов', 'Подключение_библиотек',
        'Подключение_серверов', 'NPIV', 'Статистика_массивов',
        'Статистика_устройств'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    report_columns_usage_bckp = report_columns_usage_dct

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # flag to forcible save portshow_aggregated_df if required
    portshow_force_flag = False
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    portshow_aggregated_df, storage_connection_statistics_df, device_connection_statistics_df, \
        device_rename_df, report_columns_usage_dct, \
            servers_report_df, storage_report_df, library_report_df, hba_report_df, \
                storage_connection_df,  library_connection_df, server_connection_df, npiv_report_df, \
                    storage_connection_statistics_report_df, device_connection_statistics_report_df = data_lst
    nsshow_unsplit_df = pd.DataFrame()

    if not report_columns_usage_dct:
        report_columns_usage_dct = report_columns_usage_bckp

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portcmd', 'switchshow_ports', 'switch_params_aggregated',
        'switch_parameters', 'chassis_parameters', 'fdmi', 'nscamshow',
        'nsshow', 'alias', 'blade_servers', 'fabric_labels', 'isl', 'trunk',
        'isl_aggregated', 'Параметры_SFP', 'portshow_sfp_aggregated'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # import data with switch models, firmware and etc
        switch_models_df = dataframe_import('switch_models', max_title)
        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct
        _, _, *re_pattern_lst = data_extract_objects('nameserver', max_title)

        oui_df = dataframe_import('oui',
                                  max_title,
                                  columns=['Connected_oui', 'type', 'subtype'])

        # current operation information string
        info = f'Generating connected devices table'
        print(info, end=" ")


        portshow_aggregated_df, alias_wwnn_wwnp_df, nsshow_unsplit_df, expected_ag_links_df = \
            portshow_aggregated(portshow_df, switchshow_ports_df, switch_params_df,
                                switch_params_aggregated_df, isl_aggregated_df, nsshow_df,
                                nscamshow_df, ag_principal_df, porttrunkarea_df, switch_models_df, alias_df,
                                oui_df, fdmi_df, blade_module_df,  blade_servers_df, blade_vc_df,
                                synergy_module_df, synergy_servers_df, system_3par_df, port_3par_df,
                                re_pattern_lst, report_data_lst)

        # after finish display status
        status_info('ok', max_title, len(info))
        # show warning if any UNKNOWN device class founded, if any PortSymb or NodeSymb is not parsed,
        # if new switch founded
        portshow_force_flag, nsshow_unsplit_force_flag, expected_ag_links_force_flag = \
            warning_notification(portshow_aggregated_df, switch_params_aggregated_df,
            nsshow_unsplit_df, expected_ag_links_df, report_data_lst)
        # correct device names manually
        portshow_aggregated_df, device_rename_df = \
            devicename_correction_main(portshow_aggregated_df, device_rename_df,
                                        report_columns_usage_dct, report_data_lst)
        # count Device_Host_Name instances for fabric_label, label and total in fabric
        portshow_aggregated_df = device_ports_per_group(portshow_aggregated_df)

        # count device connection statistics
        info = f'Counting device connection statistics'
        print(info, end=" ")
        storage_connection_statistics_df = storage_connection_statistics(
            portshow_aggregated_df, re_pattern_lst)
        device_connection_statistics_df = device_connection_statistics(
            portshow_aggregated_df)
        status_info('ok', max_title, len(info))

        servers_report_df, storage_report_df, library_report_df, hba_report_df, \
            storage_connection_df,  library_connection_df, server_connection_df, npiv_report_df, \
                storage_connection_statistics_report_df, device_connection_statistics_report_df  = \
                    create_report_tables(portshow_aggregated_df, storage_connection_statistics_df,
                                            device_connection_statistics_df, data_names[5:-2],
                                            report_columns_usage_dct, max_title)
        # create list with partitioned DataFrames
        data_lst = [
            portshow_aggregated_df, storage_connection_statistics_df,
            device_connection_statistics_df, device_rename_df,
            report_columns_usage_dct, servers_report_df, storage_report_df,
            library_report_df, hba_report_df, storage_connection_df,
            library_connection_df, server_connection_df, npiv_report_df,
            storage_connection_statistics_report_df,
            device_connection_statistics_report_df
        ]

        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
        save_xlsx_file(nsshow_unsplit_df,
                       'nsshow_unsplit',
                       report_data_lst,
                       force_flag=nsshow_unsplit_force_flag)
        save_xlsx_file(expected_ag_links_df,
                       'expected_ag_links',
                       report_data_lst,
                       force_flag=expected_ag_links_force_flag)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        portshow_aggregated_df, storage_connection_statistics_df, device_connection_statistics_df, \
            device_rename_df, report_columns_usage_dct, \
                servers_report_df, storage_report_df, library_report_df, hba_report_df, \
                    storage_connection_df, library_connection_df, server_connection_df, npiv_report_df, \
                        storage_connection_statistics_report_df, device_connection_statistics_report_df \
                            = verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [
            portshow_aggregated_df, storage_connection_statistics_df,
            device_connection_statistics_df, device_rename_df,
            report_columns_usage_dct, servers_report_df, storage_report_df,
            library_report_df, hba_report_df, storage_connection_df,
            library_connection_df, server_connection_df, npiv_report_df,
            storage_connection_statistics_report_df,
            device_connection_statistics_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        force_flag = False
        if data_name == 'portshow_aggregated':
            force_flag = portshow_force_flag
        save_xlsx_file(data_frame,
                       data_name,
                       report_data_lst,
                       force_flag=force_flag)
    return portshow_aggregated_df
Ejemplo n.º 11
0
def zoning_analysis_main(switch_params_aggregated_df, portshow_aggregated_df,
                         cfg_df, zone_df, alias_df, cfg_effective_df,
                         fcrfabric_df, lsan_df, peerzone_df,
                         report_columns_usage_dct, report_data_lst):
    """Main function to analyze zoning configuration"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'zoning_aggregated', 'alias_aggregated', 'zonemember_statistics',
        'portshow_zoned_aggregated', 'alias_statistics',
        'effective_cfg_statistics', 'Зонирование', 'Псевдонимы',
        'Зонирование_A&B', 'Порты_не_в_зонах', 'Порты_без_псевдономов',
        'Отсутсвуют_в_сети', 'Статистика_зон', 'Статистика_псевдонимов',
        'Статистика_конфигурации'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    zoning_aggregated_df, alias_aggregated_df, zonemember_statistics_df, \
        portshow_zoned_aggregated_df, alias_statistics_df, effective_cfg_statistics_df, zoning_report_df, alias_report_df, \
            zoning_compare_report_df, unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df,\
                zonemember_statistics_report_df,  alias_statistics_report_df, effective_cfg_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'cfg', 'cfg_effective', 'zone', 'alias', 'switch_params_aggregated',
        'switch_parameters', 'switchshow_ports', 'chassis_parameters',
        'portshow_aggregated', 'device_rename', 'report_columns_usage_upd',
        'portcmd', 'fdmi', 'nscamshow', 'nsshow', 'blade_servers',
        'fabric_labels'
    ]

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Generating zoning table'
        print(info, end=" ")

        # aggregated DataFrames
        zoning_aggregated_df, alias_aggregated_df \
            = zoning_aggregated(switch_params_aggregated_df, portshow_aggregated_df,
                                    cfg_df, zone_df, alias_df, cfg_effective_df, fcrfabric_df, lsan_df, peerzone_df, report_data_lst)

        # create comprehesive statistics DataFrame with Fabric summaries and
        # zones statistics DataFrame without summaries
        zonemember_statistics_df, zonemember_zonelevel_stat_df = zonemember_statistics(
            zoning_aggregated_df, report_data_lst)
        # add zoning statistics notes, zone duplicates and zone pairs to zoning aggregated DataFrame
        zoning_aggregated_df = statistics_to_aggregated_zoning(
            zoning_aggregated_df, zonemember_zonelevel_stat_df)
        # check all fabric devices (Wwnp) for usage in zoning configuration
        portshow_zoned_aggregated_df = verify_cfg_type(portshow_aggregated_df,
                                                       zoning_aggregated_df,
                                                       ['PortName'])
        # create alias configuration statistics
        alias_statistics_df = alias_dashboard(alias_aggregated_df,
                                              portshow_zoned_aggregated_df)
        # create Effective zoning configuration summary statistics
        effective_cfg_statistics_df = cfg_dashborad(
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            zoning_aggregated_df, alias_aggregated_df)
        # after finish display status
        status_info('ok', max_title, len(info))

        # report tables
        zoning_report_df, alias_report_df, zoning_compare_report_df, \
            unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df, \
                zonemember_statistics_report_df, alias_statistics_report_df, effective_cfg_statistics_report_df = \
                    zoning_report_main(zoning_aggregated_df, alias_aggregated_df, portshow_zoned_aggregated_df,
                                        zonemember_statistics_df, alias_statistics_df, effective_cfg_statistics_df,
                                        data_names, report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [
            zoning_aggregated_df, alias_aggregated_df,
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            alias_statistics_df, effective_cfg_statistics_df, zoning_report_df,
            alias_report_df, zoning_compare_report_df,
            unzoned_device_report_df, no_alias_device_report_df,
            zoning_absent_device_report_df, zonemember_statistics_report_df,
            alias_statistics_report_df, effective_cfg_statistics_report_df
        ]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)

    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        zoning_aggregated_df, alias_aggregated_df, zonemember_statistics_df, \
            portshow_zoned_aggregated_df, alias_statistics_df, effective_cfg_statistics_df, zoning_report_df, alias_report_df, \
                zoning_compare_report_df, unzoned_device_report_df, no_alias_device_report_df, zoning_absent_device_report_df, \
                    zonemember_statistics_report_df, alias_statistics_report_df, effective_cfg_statistics_report_df \
                    = verify_data(report_data_lst, data_names, *data_lst)

        data_lst = [
            zoning_aggregated_df, alias_aggregated_df,
            zonemember_statistics_df, portshow_zoned_aggregated_df,
            alias_statistics_df, effective_cfg_statistics_df, zoning_report_df,
            alias_report_df, zoning_compare_report_df,
            unzoned_device_report_df, no_alias_device_report_df,
            zoning_absent_device_report_df, zonemember_statistics_report_df,
            alias_statistics_report_df, effective_cfg_statistics_report_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):

        save_xlsx_file(data_frame, data_name, report_data_lst)

    return zoning_aggregated_df, alias_aggregated_df, portshow_zoned_aggregated_df
Ejemplo n.º 12
0
def synergy_system_extract(synergy_folder, report_data_lst):
    """Function to extract blade systems information"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['synergy_interconnect', 'synergy_servers']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)

    # # unpacking from the loaded list with data
    # # pylint: disable=unbalanced-tuple-unpacking
    # module_comprehensive_lst, blades_comprehensive_lst, blade_vc_comprehensive_lst = data_lst

    # force run when any data from data_lst was not saved (file not found) or
    # procedure execution explicitly requested for output data or data used during fn execution
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title)
    if force_run:

        # lists to store only REQUIRED infromation
        # collecting data for all blades during looping
        # list containing enclosure, blade and hba information for all blade systems

        # list containing enclosure and interconnect modules information for all blade systems

        synergy_module_columns = [
            'Enclosure_Name', 'Enclosure_SN', 'Enclosure_Type',
            'Interconnect_Bay', 'Interconnect_Model', 'Interconnect_SN',
            'Interconnect_Firmware', 'Interconnect_Name', 'NodeName',
            'Device_Location'
        ]

        synergy_server_columns = [
            'Enclosure_Name', 'Enclosure_Slot', 'Host_Name', 'name',
            'serverprofilename', 'Device_Model', 'Device_SN', 'Host_OS',
            'HBA_Description', 'Mezz_type', 'Connected_portWwn',
            'Mezz_location', 'Device_Location', 'HBA_Firmware'
        ]

        synergy_module_aggregated_df = pd.DataFrame(
            columns=synergy_module_columns)
        synergy_servers_aggregated_df = pd.DataFrame(
            columns=synergy_server_columns)

        if synergy_folder:
            print('\nEXTRACTING SYNERGY SYSTEM INFORMATION ...\n')

            # collects files in folder with xlsm extension
            synergy_config_lst = find_files(synergy_folder,
                                            max_title,
                                            filename_extension='xlsm')
            # number of files to check
            configs_num = len(synergy_config_lst)

            if configs_num:

                # # data imported from init file to extract values from config file
                # enclosure_params, _, comp_keys, match_keys, comp_dct = data_extract_objects('blades', max_title)
                # module_params = columns_import('blades', max_title, 'module_params')
                # blade_params = columns_import('blades', max_title, 'blade_params')

                for i, synergy_config in enumerate(synergy_config_lst):
                    # file name with extension
                    configname_wext = os.path.basename(synergy_config)
                    # remove extension from filename
                    configname, _ = os.path.splitext(configname_wext)

                    # current operation information string
                    info = f'[{i+1} of {configs_num}]: {configname} system.'
                    print(info, end=" ")

                    syn_enclosure_df = pd.read_excel(synergy_config,
                                                     sheet_name='enclosures')
                    syn_module_df = pd.read_excel(
                        synergy_config, sheet_name='interconnectbays')

                    syn_server_hw_df = pd.read_excel(
                        synergy_config, sheet_name='server-hardware')
                    syn_server_fw_sw_df = pd.read_excel(
                        synergy_config, sheet_name='server-fw-sw')
                    syn_server_profile_connection_df = pd.read_excel(
                        synergy_config, sheet_name='server-prof-conn-details')

                    synergy_module_df = synergy_module(syn_enclosure_df,
                                                       syn_module_df)

                    if synergy_module_aggregated_df.empty:
                        synergy_module_aggregated_df = synergy_module_df
                    else:
                        synergy_module_aggregated_df = pd.concat(
                            [synergy_module_aggregated_df, synergy_module_df],
                            ignore_index=True)

                    synergy_server_wwn_df = synergy_server_wwn(
                        syn_server_hw_df)
                    synergy_profile_wwn_df = synergy_profile_wwn(
                        syn_server_profile_connection_df,
                        synergy_server_wwn_df)

                    # conctenate connection profile and server hardware
                    synergy_servers_df = pd.concat(
                        [synergy_server_wwn_df, synergy_profile_wwn_df],
                        ignore_index=True)
                    synergy_servers_df.drop_duplicates(inplace=True)

                    # add mezzanine firmware details
                    synergy_servers_df = synergy_mezz_fw(
                        syn_server_fw_sw_df, synergy_servers_df)
                    synergy_servers_df.sort_values(
                        by=['enclosurename', 'position', 'Mezz_WWPN'],
                        ignore_index=True,
                        inplace=True)

                    if synergy_servers_aggregated_df.empty:
                        synergy_servers_aggregated_df = synergy_servers_df
                    else:
                        synergy_servers_aggregated_df = pd.concat(
                            [
                                synergy_servers_aggregated_df,
                                synergy_servers_df
                            ],
                            ignore_index=True)

                    if synergy_module_aggregated_df['switchbasewwn'].notna(
                    ).any():
                        synergy_module_aggregated_df[
                            'switchbasewwn'] = synergy_module_aggregated_df[
                                'switchbasewwn'].str.lower()
                    if synergy_servers_aggregated_df['Mezz_WWPN'].notna().any(
                    ):
                        synergy_servers_aggregated_df[
                            'Mezz_WWPN'] = synergy_servers_aggregated_df[
                                'Mezz_WWPN'].str.lower()

                    if not synergy_servers_df.empty or not synergy_module_df.empty:
                        status_info('ok', max_title, len(info))
                    else:
                        status_info('no data', max_title, len(info))

                module_columns_dct = {
                    'enclosurename': 'Enclosure_Name',
                    'enclosure_serialnumber': 'Enclosure_SN',
                    'enclosuretype': 'Enclosure_Type',
                    'baynumber': 'Interconnect_Bay',
                    'interconnectmodel': 'Interconnect_Model',
                    'serialnumber': 'Interconnect_SN',
                    'switchfwversion': 'Interconnect_Firmware',
                    'hostname': 'Interconnect_Name',
                    'switchbasewwn': 'NodeName',
                    'device_location': 'Device_Location'
                }

                synergy_module_aggregated_df.rename(columns=module_columns_dct,
                                                    inplace=True)
                synergy_module_aggregated_df.replace(r'^None$|^none$|^ *$',
                                                     value=np.nan,
                                                     regex=True,
                                                     inplace=True)

                server_columns_dct = {
                    'enclosurename': 'Enclosure_Name',
                    'position': 'Enclosure_Slot',
                    'servername': 'Host_Name',
                    'model': 'Device_Model',
                    'serialnumber': 'Device_SN',
                    'oshint': 'Host_OS',
                    'Mezz': 'HBA_Description',
                    'Mezz_WWPN': 'Connected_portWwn',
                    'device_location': 'Device_Location',
                    'componentversion': 'HBA_Firmware'
                }

                synergy_servers_aggregated_df.rename(
                    columns=server_columns_dct, inplace=True)
                synergy_servers_aggregated_df.replace(r'^None$|^none$|^ *$',
                                                      value=np.nan,
                                                      regex=True,
                                                      inplace=True)

                data_lst = [
                    synergy_module_aggregated_df, synergy_servers_aggregated_df
                ]
                # save extracted data to json file
                save_data(report_data_lst, data_names, *data_lst)
        else:
            # current operation information string
            info = f'Collecting synergy details'
            print(info, end=" ")
            status_info('skip', max_title, len(info))
            data_lst = [
                synergy_module_aggregated_df, synergy_servers_aggregated_df
            ]
            # save empty data to json file
            save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty after first iteration and replace information string with empty list
    else:
        synergy_module_aggregated_df, synergy_servers_aggregated_df = verify_data(
            report_data_lst, data_names, *data_lst)
        data_lst = [
            synergy_module_aggregated_df, synergy_servers_aggregated_df
        ]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return synergy_module_aggregated_df, synergy_servers_aggregated_df
Ejemplo n.º 13
0
def fabric_main(fabricshow_ag_labels_df, chassis_params_df, \
    switch_params_df, maps_params_df, report_data_lst):
    """Main function to create tables"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'Коммутаторы', 'Фабрика', 'Глобальные_параметры_фабрики',
        'Параметры_коммутаторов', 'Лицензии'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
        switches_parameters_report_df, licenses_report_df = data_lst

    # data force extract check
    # list of keys for each data from data_lst representing if it is required
    # to re-collect or re-analyze data even they were obtained on previous iterations
    force_extract_keys_lst = [
        report_steps_dct[data_name][1] for data_name in data_names
    ]
    # list with True (if data loaded) and/or False (if data was not found and None returned)
    data_check = force_extract_check(data_names, data_lst,
                                     force_extract_keys_lst, max_title)

    # flag if fabrics labels was forced to be changed
    fabric_labels_change = True if report_steps_dct['fabric_labels'][
        1] else False
    # initialization chassis information and farbric name columns usage
    report_columns_usage_dct = {
        'fabric_name_usage': True,
        'chassis_info_usage': True
    }
    # import data with switch models, firmware and etc
    switch_models_df = dataframe_import('switch_models', max_title)
    # clean fabricshow DataFrame from unneccessary data
    fabric_clean_df = fabric_clean(fabricshow_ag_labels_df)
    # create aggregated table by joining DataFrames
    switch_params_aggregated_df, report_columns_usage_dct = \
        fabric_aggregation(fabric_clean_df, chassis_params_df, \
            switch_params_df, maps_params_df, switch_models_df)
    save_xlsx_file(switch_params_aggregated_df, 'switch_params_aggregated', \
        report_data_lst, report_type = 'analysis')

    # when no data saved or force extract flag is on or fabric labels have been changed than
    # analyze extracted config data
    if not all(data_check) or any(
            force_extract_keys_lst) or fabric_labels_change:
        # information string if fabric labels force changed was initiated
        # and statistics recounting required
        if fabric_labels_change and not any(force_extract_keys_lst) and all(
                data_check):
            info = f'Switch information force extract due to change in Fabrics labeling'
            print(info, end=" ")
            status_info('ok', max_title, len(info))

        # partition aggregated DataFrame to required tables
        switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
            switches_parameters_report_df, licenses_report_df = \
                dataframe_segmentation(switch_params_aggregated_df, data_names, \
                    report_columns_usage_dct, max_title)

        # drop rows with empty switch names columns
        fabric_report_df.dropna(subset=['Имя коммутатора'], inplace=True)
        switches_parameters_report_df.dropna(subset=['Имя коммутатора'],
                                             inplace=True)
        licenses_report_df.dropna(subset=['Имя коммутатора'], inplace=True)

        # parameters are equal for all switches in one fabric
        if report_columns_usage_dct['fabric_name_usage']:
            global_fabric_parameters_report_df.drop_duplicates(
                subset=['Фабрика', 'Подсеть'], inplace=True)
        else:
            global_fabric_parameters_report_df.drop_duplicates(
                subset=['Подсеть'], inplace=True)
        global_fabric_parameters_report_df.reset_index(inplace=True, drop=True)

        # create list with partitioned DataFrames
        data_lst = [switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
            switches_parameters_report_df, licenses_report_df]

        # current operation information string
        info = f'Generating Fabric and Switches tables'
        print(info, end=" ")
        # after finish display status
        status_info('ok', max_title, len(info))

        # saving DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
        # save_data(report_data_lst, data_auxillary_names, *data_auxillary_lst)

    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return switch_params_aggregated_df, report_columns_usage_dct, fabric_clean_df
Ejemplo n.º 14
0
def isl_main(fabricshow_ag_labels_df, switch_params_aggregated_df, report_columns_usage_dct, 
    isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, report_data_lst):
    """Main function to create ISL and IFR report tables"""
    
   # report_data_lst contains information: 
   # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['isl_aggregated', 'isl_statistics', 'Межкоммутаторные_соединения', 'Межфабричные_соединения', 'Статистика_ISL']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')
    
    # loading data if were saved on previous iterations 
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = ['isl', 'trunk', 'fcredge', 'sfpshow', 'portcfgshow', 
                            'chassis_parameters', 'switch_parameters', 'switchshow_ports', 
                            'maps_parameters', 'blade_interconnect', 'fabric_labels']

    # force run when any data from data_lst was not saved (file not found) or 
    # procedure execution explicitly requested for output data or data used during fn execution  
    force_run = verify_force_run(data_names, data_lst, report_steps_dct, 
                                            max_title, analyzed_data_names)
    if force_run:

        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct    
        _, _, *re_pattern_lst = data_extract_objects('common_regex', max_title)

        # current operation information string
        info = f'Generating ISL and IFL tables'
        print(info, end =" ")

        # get aggregated DataFrames
        isl_aggregated_df, fcredge_df = \
            isl_aggregated(fabricshow_ag_labels_df, switch_params_aggregated_df, 
            isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, re_pattern_lst)

        isl_statistics_df = isl_statistics(isl_aggregated_df, re_pattern_lst, report_data_lst)

        # after finish display status
        status_info('ok', max_title, len(info))      

        # partition aggregated DataFrame to required tables
        isl_report_df, = dataframe_segmentation(isl_aggregated_df, [data_names[2]], report_columns_usage_dct, max_title)
        isl_report_df = translate_values(isl_report_df, translate_dct={'Yes': 'Да', 'No': 'Нет'})
        # if no trunks in fabric drop trunk columns
        if trunk_df.empty:
            isl_report_df.drop(columns = ['Идентификатор транка', 'Deskew', 'Master'], inplace = True)
        # check if IFL table required
        if not fcredge_df.empty:
            ifl_report_df, = dataframe_segmentation(fcredge_df, [data_names[3]], report_columns_usage_dct, max_title)
        else:
            ifl_report_df = fcredge_df.copy()

        isl_statistics_report_df = isl_statistics_report(isl_statistics_df, report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
        # saving fabric_statistics and fabric_statistics_summary DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return isl_aggregated_df, isl_statistics_df
Ejemplo n.º 15
0
def fabricstatistics_main(portshow_aggregated_df, switchshow_ports_df,
                          fabricshow_ag_labels_df, nscamshow_df, portshow_df,
                          report_columns_usage_dct, report_data_lst):
    """Main function to count Fabrics statistics"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['fabric_statistics', 'Статистика_фабрики']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were obtained on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    fabric_statistics_df, fabric_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = [
        'portcmd', 'switchshow_ports', 'switch_params_aggregated',
        'portshow_aggregated', 'switch_parameters', 'chassis_parameters',
        'fdmi', 'nscamshow', 'nsshow', 'alias', 'blade_servers',
        'fabric_labels'
    ]

    chassis_column_usage = report_columns_usage_dct['chassis_info_usage']
    force_run = verify_force_run(data_names, data_lst, report_steps_dct,
                                 max_title, analyzed_data_names)
    if force_run:
        # current operation information string
        info = f'Counting up Fabrics statistics'
        print(info, end=" ")

        fabric_statistics_df = statisctics_aggregated(portshow_aggregated_df,
                                                      switchshow_ports_df,
                                                      fabricshow_ag_labels_df,
                                                      nscamshow_df,
                                                      portshow_df,
                                                      report_data_lst)
        # after finish display status
        status_info('ok', max_title, len(info))
        # get report DataFrame
        fabric_statistics_report_df = statistics_report(
            fabric_statistics_df, chassis_column_usage, max_title)
        # create list with partitioned DataFrames
        data_lst = [fabric_statistics_df, fabric_statistics_report_df]
        # saving data to json or csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        fabric_statistics_df, fabric_statistics_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [fabric_statistics_df, fabric_statistics_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return fabric_statistics_df