Exemplo n.º 1
0
def switchs_params_report(switch_params_aggregated_df, data_names,
                          report_columns_usage_dct, max_title):
    """Function to create switch related report tables"""

    # partition aggregated DataFrame to required tables
    switches_report_df, fabric_report_df,  \
        switches_parameters_report_df, licenses_report_df = \
            dataframe_segmentation(switch_params_aggregated_df, data_names[2:-1], \
                report_columns_usage_dct, max_title)

    # global parameters are equal for all switches in one fabric thus checking Principal switches only
    mask_principal = switch_params_aggregated_df['switchRole'] == 'Principal'
    switch_params_principal_df = switch_params_aggregated_df.loc[
        mask_principal].copy()
    global_fabric_parameters_report_df, = dataframe_segmentation(switch_params_principal_df, data_names[-1], \
                report_columns_usage_dct, max_title)

    # drop rows with empty switch names columns
    fabric_report_df.dropna(subset=['Имя коммутатора'], inplace=True)
    switches_parameters_report_df.dropna(subset=['Имя коммутатора'],
                                         inplace=True)
    licenses_report_df.dropna(subset=['Имя коммутатора'], inplace=True)

    # drop fabric_id if all have same value
    if fabric_report_df['Fabric ID'].dropna().nunique() == 1:
        fabric_report_df.drop(columns=['Fabric ID'], inplace=True)
    # drop Fabric_Name (not Fabric_name) if column is empty
    if fabric_report_df['Название фабрики'].isna().all():
        fabric_report_df.drop(columns=['Название фабрики'], inplace=True)

    global_fabric_parameters_report_df.reset_index(inplace=True, drop=True)

    return switches_report_df, fabric_report_df, switches_parameters_report_df, \
                licenses_report_df, global_fabric_parameters_report_df
Exemplo n.º 2
0
def unzoned_device_report(portshow_cfg_aggregated_df, data_names,
                          report_columns_usage_dct, max_title):
    """
    Function to check all fabric devices for usage in zoning configuration and
    check if all fabric devices have aliases.
    Create unzoned devices and no aliases reports
    """

    # switche and virtual connect ports are not part of zoning configuration by defenition
    mask_not_switch_vc = ~portshow_cfg_aggregated_df.deviceType.isin(
        ['SWITCH', 'VC'])
    # show online ports only
    mask_online = portshow_cfg_aggregated_df['portState'] == 'Online'
    # Access gateway switch connection information is excessive
    mask_native = portshow_cfg_aggregated_df['switchMode'] == 'Native'
    # show ports which are not part of any configuration
    mask_not_zoned = portshow_cfg_aggregated_df['cfg_type'].isna()
    # show_devices that have no aliases
    mask_no_alias = portshow_cfg_aggregated_df['alias'].isna()

    unzoned_device_df = portshow_cfg_aggregated_df.loc[mask_native
                                                       & mask_online
                                                       & mask_not_switch_vc
                                                       & mask_not_zoned]
    unzoned_device_df.dropna(axis='columns', how='all')

    no_alias_device_df = portshow_cfg_aggregated_df.loc[mask_native
                                                        & mask_online
                                                        & mask_not_switch_vc
                                                        & mask_no_alias]
    # no_alias_devices_df.dropna(axis='columns', how='all')
    # create report DataFeame
    # pylint: disable=unbalanced-tuple-unpacking
    unzoned_device_report_df, = dataframe_segmentation(
        unzoned_device_df, data_names[0], report_columns_usage_dct, max_title)
    no_alias_device_report_df, = dataframe_segmentation(
        no_alias_device_df, data_names[1], report_columns_usage_dct, max_title)

    return unzoned_device_report_df, no_alias_device_report_df
Exemplo n.º 3
0
def storage_host_report(storage_host_aggregated_df, data_names,
                        report_columns_usage_dct, max_title):
    """Function to create storage_host and storage_host fabric_label comparision DataFrames"""

    if storage_host_aggregated_df.empty:
        return pd.DataFrame(), pd.DataFrame()

    storage_host_report_df = storage_host_aggregated_df.copy()
    # dataframe where hosts and storage port are in the same fabric or host imported to storage fabric
    mask_local_imported = storage_host_aggregated_df[
        'Fabric_host_status'].isin(['local', 'remote_imported'])
    storage_host_valid_df = storage_host_aggregated_df.loc[
        mask_local_imported].copy()

    # drop uninformative columns
    storage_host_report_df = clean_storage_host(storage_host_report_df)
    storage_host_valid_df = clean_storage_host(storage_host_valid_df)
    # slice required columns and translate column names
    storage_host_report_df, = dataframe_segmentation(storage_host_report_df,
                                                     data_names[1:2],
                                                     report_columns_usage_dct,
                                                     max_title)
    storage_host_valid_df, = dataframe_segmentation(storage_host_valid_df,
                                                    data_names[1:2],
                                                    report_columns_usage_dct,
                                                    max_title)
    # translate values in columns
    translate_dct = {'Yes': 'Да', 'No': 'Нет'}
    storage_host_report_df = translate_values(storage_host_report_df,
                                              translate_dct)
    storage_host_valid_df = translate_values(storage_host_valid_df,
                                             translate_dct)
    # create comparision storage_host DataFrame based on Fabric_labels
    storage_host_compare_report_df = dataframe_slice_concatenate(
        storage_host_valid_df, column='Подсеть')
    return storage_host_report_df, storage_host_compare_report_df
Exemplo n.º 4
0
def create_report_tables(port_complete_df, data_names,
                         report_columns_usage_dct, max_title):
    """Function to create required report DataFrames out of aggregated DataFrame"""

    # partition aggregated DataFrame to required tables
    # pylint: disable=unbalanced-tuple-unpacking
    errors_report_df, sfp_report_df, portcfg_report_df = \
        dataframe_segmentation(port_complete_df, data_names, report_columns_usage_dct, max_title)
    # drop empty columns
    errors_report_df.dropna(axis=1, how='all', inplace=True)
    sfp_report_df.dropna(axis=1, how='all', inplace=True)
    portcfg_report_df.dropna(axis=1, how='all', inplace=True)

    # remove rows with no sfp installed
    mask_sfp = ~sfp_report_df['Vendor Name'].str.contains('No SFP module',
                                                          na=False)
    sfp_report_df = sfp_report_df.loc[mask_sfp]

    return errors_report_df, sfp_report_df, portcfg_report_df
Exemplo n.º 5
0
def create_report(aggregated_df, data_name, translate_dct,
                  report_columns_usage_dct, max_title):
    """
    Auxiliary function to remove unnecessary columns from aggregated DataFrame and
    extract required columns and create report dataframe
    """

    # pylint: disable=unbalanced-tuple-unpacking
    cleaned_df = drop_columns(aggregated_df, report_columns_usage_dct)
    translate_columns = [
        'Fabric_device_status', 'Target_Initiator_note', 'Target_model_note',
        'Effective_cfg_usage_note'
    ]
    cleaned_df = translate_values(cleaned_df, translate_dct, translate_columns)
    # take required data from aggregated DataFrame to create report
    report_df, = dataframe_segmentation(cleaned_df, data_name,
                                        report_columns_usage_dct, max_title)

    return report_df
Exemplo n.º 6
0
def absent_device(zoning_aggregated_df, data_name, translate_dct,
                  report_columns_usage_dct, max_title):
    """Function to create table with absent and unavailable remote devices in zoning configuration"""

    mask_absent = zoning_aggregated_df.Fabric_device_status.isin(
        ['absent', 'remote_na'])
    absent_columns = [
        'Fabric_name', 'Fabric_label', 'cfg', 'cfg_type', 'zone_member',
        'alias_member', 'Fabric_device_status', 'zonemember_Fabric_name',
        'zonemember_Fabric_label', 'zone'
    ]
    absent_device_df = zoning_aggregated_df.loc[mask_absent, absent_columns]
    absent_device_df = absent_device_df.groupby(absent_columns[:-1],
                                                as_index=False,
                                                dropna=False).agg(
                                                    {'zone': ', '.join})
    absent_device_df = translate_values(absent_device_df, translate_dct,
                                        ['Fabric_device_status'])
    zoning_absent_device_report_df, = dataframe_segmentation(
        absent_device_df, data_name, report_columns_usage_dct, max_title)

    return zoning_absent_device_report_df
Exemplo n.º 7
0
def raslog_report(raslog_frequent_df, data_names, report_columns_usage_dct,
                  max_title):
    """Function to check if it is required to use chassis_name columns. RASLog sometimes uses it's own
    chname not equal to switchname or chassis name thus it's better to keep default chassis names
    for visibility even if it was allowed to drop chassiss_name column before"""

    # make copy of default report_columns_usage_dct in order to avoid change it
    report_columns_usage_upd_dct = report_columns_usage_dct.copy()
    chassis_column_usage = report_columns_usage_upd_dct['chassis_info_usage']

    # if chassis_name column to be dropped
    if not chassis_column_usage:
        # if all switchnames and chassis names are not identical
        if not all(raslog_frequent_df.chassis_name ==
                   raslog_frequent_df.switchName):
            # change keep chassis_name column tag to True
            report_columns_usage_upd_dct['chassis_info_usage'] = True

    raslog_report_df, = dataframe_segmentation(raslog_frequent_df,
                                               [data_names[2]],
                                               report_columns_usage_upd_dct,
                                               max_title)

    return raslog_report_df
Exemplo n.º 8
0
def fabric_main(fabricshow_ag_labels_df, chassis_params_df, \
    switch_params_df, maps_params_df, report_data_lst):
    """Main function to create tables"""

    # report_data_lst contains information:
    # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = [
        'Коммутаторы', 'Фабрика', 'Глобальные_параметры_фабрики',
        'Параметры_коммутаторов', 'Лицензии'
    ]
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')

    # load data if they were saved on previos program execution iteration
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
        switches_parameters_report_df, licenses_report_df = data_lst

    # data force extract check
    # list of keys for each data from data_lst representing if it is required
    # to re-collect or re-analyze data even they were obtained on previous iterations
    force_extract_keys_lst = [
        report_steps_dct[data_name][1] for data_name in data_names
    ]
    # list with True (if data loaded) and/or False (if data was not found and None returned)
    data_check = force_extract_check(data_names, data_lst,
                                     force_extract_keys_lst, max_title)

    # flag if fabrics labels was forced to be changed
    fabric_labels_change = True if report_steps_dct['fabric_labels'][
        1] else False
    # initialization chassis information and farbric name columns usage
    report_columns_usage_dct = {
        'fabric_name_usage': True,
        'chassis_info_usage': True
    }
    # import data with switch models, firmware and etc
    switch_models_df = dataframe_import('switch_models', max_title)
    # clean fabricshow DataFrame from unneccessary data
    fabric_clean_df = fabric_clean(fabricshow_ag_labels_df)
    # create aggregated table by joining DataFrames
    switch_params_aggregated_df, report_columns_usage_dct = \
        fabric_aggregation(fabric_clean_df, chassis_params_df, \
            switch_params_df, maps_params_df, switch_models_df)
    save_xlsx_file(switch_params_aggregated_df, 'switch_params_aggregated', \
        report_data_lst, report_type = 'analysis')

    # when no data saved or force extract flag is on or fabric labels have been changed than
    # analyze extracted config data
    if not all(data_check) or any(
            force_extract_keys_lst) or fabric_labels_change:
        # information string if fabric labels force changed was initiated
        # and statistics recounting required
        if fabric_labels_change and not any(force_extract_keys_lst) and all(
                data_check):
            info = f'Switch information force extract due to change in Fabrics labeling'
            print(info, end=" ")
            status_info('ok', max_title, len(info))

        # partition aggregated DataFrame to required tables
        switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
            switches_parameters_report_df, licenses_report_df = \
                dataframe_segmentation(switch_params_aggregated_df, data_names, \
                    report_columns_usage_dct, max_title)

        # drop rows with empty switch names columns
        fabric_report_df.dropna(subset=['Имя коммутатора'], inplace=True)
        switches_parameters_report_df.dropna(subset=['Имя коммутатора'],
                                             inplace=True)
        licenses_report_df.dropna(subset=['Имя коммутатора'], inplace=True)

        # parameters are equal for all switches in one fabric
        if report_columns_usage_dct['fabric_name_usage']:
            global_fabric_parameters_report_df.drop_duplicates(
                subset=['Фабрика', 'Подсеть'], inplace=True)
        else:
            global_fabric_parameters_report_df.drop_duplicates(
                subset=['Подсеть'], inplace=True)
        global_fabric_parameters_report_df.reset_index(inplace=True, drop=True)

        # create list with partitioned DataFrames
        data_lst = [switches_report_df, fabric_report_df, global_fabric_parameters_report_df, \
            switches_parameters_report_df, licenses_report_df]

        # current operation information string
        info = f'Generating Fabric and Switches tables'
        print(info, end=" ")
        # after finish display status
        status_info('ok', max_title, len(info))

        # saving DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
        # save_data(report_data_lst, data_auxillary_names, *data_auxillary_lst)

    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return switch_params_aggregated_df, report_columns_usage_dct, fabric_clean_df
Exemplo n.º 9
0
def isl_main(fabricshow_ag_labels_df, switch_params_aggregated_df, report_columns_usage_dct, 
    isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, report_data_lst):
    """Main function to create ISL and IFR report tables"""
    
   # report_data_lst contains information: 
   # customer_name, dir_report, dir to save obtained data, max_title, report_steps_dct
    *_, max_title, report_steps_dct = report_data_lst

    # names to save data obtained after current module execution
    data_names = ['isl_aggregated', 'isl_statistics', 'Межкоммутаторные_соединения', 'Межфабричные_соединения', 'Статистика_ISL']
    # service step information
    print(f'\n\n{report_steps_dct[data_names[0]][3]}\n')
    
    # loading data if were saved on previous iterations 
    data_lst = load_data(report_data_lst, *data_names)
    # unpacking DataFrames from the loaded list with data
    # pylint: disable=unbalanced-tuple-unpacking
    isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = data_lst

    # list of data to analyze from report_info table
    analyzed_data_names = ['isl', 'trunk', 'fcredge', 'sfpshow', 'portcfgshow', 
                            'chassis_parameters', 'switch_parameters', 'switchshow_ports', 
                            'maps_parameters', 'blade_interconnect', 'fabric_labels']

    # force run when any data from data_lst was not saved (file not found) or 
    # procedure execution explicitly requested for output data or data used during fn execution  
    force_run = verify_force_run(data_names, data_lst, report_steps_dct, 
                                            max_title, analyzed_data_names)
    if force_run:

        # data imported from init file (regular expression patterns) to extract values from data columns
        # re_pattern list contains comp_keys, match_keys, comp_dct    
        _, _, *re_pattern_lst = data_extract_objects('common_regex', max_title)

        # current operation information string
        info = f'Generating ISL and IFL tables'
        print(info, end =" ")

        # get aggregated DataFrames
        isl_aggregated_df, fcredge_df = \
            isl_aggregated(fabricshow_ag_labels_df, switch_params_aggregated_df, 
            isl_df, trunk_df, fcredge_df, portshow_df, sfpshow_df, portcfgshow_df, switchshow_ports_df, re_pattern_lst)

        isl_statistics_df = isl_statistics(isl_aggregated_df, re_pattern_lst, report_data_lst)

        # after finish display status
        status_info('ok', max_title, len(info))      

        # partition aggregated DataFrame to required tables
        isl_report_df, = dataframe_segmentation(isl_aggregated_df, [data_names[2]], report_columns_usage_dct, max_title)
        isl_report_df = translate_values(isl_report_df, translate_dct={'Yes': 'Да', 'No': 'Нет'})
        # if no trunks in fabric drop trunk columns
        if trunk_df.empty:
            isl_report_df.drop(columns = ['Идентификатор транка', 'Deskew', 'Master'], inplace = True)
        # check if IFL table required
        if not fcredge_df.empty:
            ifl_report_df, = dataframe_segmentation(fcredge_df, [data_names[3]], report_columns_usage_dct, max_title)
        else:
            ifl_report_df = fcredge_df.copy()

        isl_statistics_report_df = isl_statistics_report(isl_statistics_df, report_columns_usage_dct, max_title)

        # create list with partitioned DataFrames
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
        # saving fabric_statistics and fabric_statistics_summary DataFrames to csv file
        save_data(report_data_lst, data_names, *data_lst)
    # verify if loaded data is empty and replace information string with empty DataFrame
    else:
        isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df = \
            verify_data(report_data_lst, data_names, *data_lst)
        data_lst = [isl_aggregated_df, isl_statistics_df, isl_report_df, ifl_report_df, isl_statistics_report_df]
    # save data to service file if it's required
    for data_name, data_frame in zip(data_names, data_lst):
        save_xlsx_file(data_frame, data_name, report_data_lst)

    return isl_aggregated_df, isl_statistics_df