def process(workbook: Any, content: str) -> None: """Process NAS_License worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('NAS_License') headers = get_parser_header(NAS_LICENSE_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) nas_license_out = run_parser_over(content, NAS_LICENSE_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, nas_license_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NASLicenseTable', 'NAS_License', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process NAS Summary worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('NAS Summary') headers = get_parser_header(NAS_SUMMARY_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) nas_summary_out = run_parser_over(content, NAS_SUMMARY_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, nas_summary_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NASSummaryTable', 'NAS Summary', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Backend Storage SP DETAILS worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Backend Storage SP DETAILS') headers = get_parser_header(BACKEND_DETAILS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) backend_details_out = run_parser_over(content, BACKEND_DETAILS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, backend_details_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'BackendStorageSPDETAILSTable', 'Backend Storage SP DETAILS', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Disk Groups worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Disk Groups') headers = get_parser_header(DISK_GROUPS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) disk_groups_out = run_parser_over(content, DISK_GROUPS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, disk_groups_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) not in ('B', ): set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DiskGroupsTable', 'Disk Groups', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Disks (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Disks') headers = get_parser_header(SHOWPD_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name headers[7], headers[8], headers[11] = 'Total(MB)', 'Free(MB)', 'Cap(GB)' build_header(worksheet, headers) show_pd_out = run_parser_over(content, SHOWPD_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, show_pd_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DisksTable', 'Disks', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Storage Array Summary (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Storage Array Summary') headers = get_parser_header(SHOWSYS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) showsys_out = run_parser_over(content, SHOWSYS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, showsys_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'StorageArraySummaryTable', 'Storage Array Summary', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Drive List worksheet :param workbook: :param contents: """ worksheet_name = 'Drive List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'cluster', 'lnum', 'baynum', 'lnn', 'purpose_description', 'blocks', 'serial', 'wwn', 'logical_block_length', 'ui_state', 'physical_block_length', 'firmware/desired_firmware', 'firmware/current_firmware', 'id', 'media_type', 'interface_type', 'handle', 'devname', 'chassis', 'purpose', 'y_loc', 'x_loc', 'present', 'locnstr', 'model' ] build_header(worksheet, headers) rows, errors = [], 0 # type: list, int for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') drive_list = [] # type: Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): drives_content = collected_data( entry, 'cmd', 'isi_for_array isi devices list --format?json') drive_list, local_errors = process_drives( drives_content, headers[2:]) if drives_content else [drive_list, 0] errors += local_errors rows += [[host] + row for row in drive_list] if errors != 0: print('{} bad jsons found in {}, ' 'some data will not be found in the output!'.format( errors, worksheet_name)) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DriveListTable', 'Drive List', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process SMB, NFS, Multiprotocol worksheets :param workbook: :param content: """ sheets = ['SMB', 'NFS', 'Multiprotocol'] row_tuples = dict() row_tuples['SMB'] = [ 'ArrayName', 'DataMover', 'ShareName', 'SharePath', 'RootPath', 'Type', 'umask', 'maxusr', 'netbios', 'comment' ] row_tuples['NFS'] = [ 'Hostname', 'Server', 'MountedPath', 'FileSystem', 'Type', 'rw', 'root', 'access' ] row_tuples['Multiprotocol'] = ['Hostname', 'Server', 'MountedPath', 'Type'] server_export_out = run_parser_over(content, SERVER_EXPORT_TMPL) server_export_grouped = groupby(itemgetter(3), server_export_out) share, export, multi = classify_rows(server_export_grouped) for sheet, data_list in zip(sheets, [share, export, multi]): worksheet = workbook.get_sheet_by_name(sheet) build_header(worksheet, row_tuples[sheet]) RowTuple = namedtuple('RowTuple', row_tuples[sheet]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, data_list), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, '{}Table'.format(sheet), sheet, final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Hosts (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Hosts') headers = list(concat([ get_parser_header(SHOWHOST_TMPL), get_parser_header(SHOWHOST_LINES_TMPL)[4:], ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) show_hosts_out = groupby( itemgetter(0, 1, 2, 3, 4), run_parser_over(content, SHOWHOST_TMPL)) show_hosts_lines_out = groupby( itemgetter(0, 1, 2, 3), run_parser_over(content, SHOWHOST_LINES_TMPL)) rows = [] for idfier in show_hosts_out: with suppress(KeyError): for host_line, details_line in \ zip(show_hosts_out[idfier], show_hosts_lines_out[idfier[:-1]]): rows.append(host_line + details_line[4:]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'HostsTable', 'Hosts', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process PStat worksheet :param workbook: :param contents: """ worksheet_name = 'PStat' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname'] + get_parser_header(PSTAT_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') stats = [] # type: list host = component_details['hostname'] for entry in command_details: with suppress(TypeError): stats_content = collected_alias_data(entry, 'cmd', 'isi statistics pstat') stats = run_parser_over(stats_content, PSTAT_TMPL) if stats_content else stats rows += [[host] + row for row in stats] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'PStatTable', 'PStat', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process System Details worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('System Details') headers = get_parser_header(SYSTEM_DETAILS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) system_details_out = run_parser_over(content, SYSTEM_DETAILS_TMPL) det_count = slice(-3, -1) for system_entry in system_details_out: det_counts = Counter(system_entry[-3]) system_entry[det_count] = \ [det for det in det_counts], \ [str(count) for count in det_counts.values()] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, system_details_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SystemDetailsTable', 'System Details', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process SMB Shares by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'SMB Shares by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'file_create_mode', 'ntfs_acl_support', 'file_filtering_enabled', 'directory_create_mask', 'inheritable_path_acl', 'ca_write_integrity', 'file_create_mask', 'path', 'access_based_enumeration_root_only', 'allow_variable_expansion', 'id', 'hide_dot_files', 'create_permissions', 'strict_locking', 'mangle_map', 'zid', 'file_filter_type', 'browsable', 'description', 'auto_create_directory', 'continuously_available', 'directory_create_mode', 'allow_delete_readonly', 'ca_timeout', 'access_based_enumeration', 'allow_execute_always', 'csc_policy', 'permissions/permission_type', 'permissions/trustee/id', 'permissions/permission', 'name', 'change_notify', 'mangle_byte_start', 'impersonate_user', 'impersonate_guest', 'oplocks', 'strict_flush', 'strict_ca_lockout' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') smb_zone, smb_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): smb_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_zone_content = collected_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_alias_zone = process_smb_zones( smb_zone_alias_content, headers[1:]) \ if smb_zone_alias_content else smb_zone smb_zone = process_smb_zones( ''.join(smb_zone_content), headers[1:]) \ if smb_zone_content else smb_zone smb_zone = list(smb_zone) + list(smb_alias_zone) rows += [[host] + row for row in smb_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n if col_n > final_col else final_col final_row = row_n sheet_process_output(worksheet, 'SMBSharesByZoneTable', 'SMB Shares by Zone', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process NFS Exports by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'NFS Exports by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'encoding', 'map_retry', 'security_flavors', 'write_unstable_reply', 'write_filesync_reply', 'readdirplus_prefetch', 'block_size', 'write_transfer_size', 'id', 'description', 'max_file_size', 'paths', 'write_unstable_action', 'zone', 'name_max_size', 'can_set_time', 'read_transfer_multiple', 'return_32bit_file_ids', 'write_transfer_multiple', 'all_dirs', 'setattr_asynchronous', 'map_failure/primary_group', 'map_failure/enabled', 'map_failure/secondary_groups', 'map_failure/user', 'link_max', 'write_datasync_reply', 'no_truncate', 'time_delta', 'snapshot', 'read_only', 'map_lookup_uid', 'chown_restricted', 'write_datasync_action', 'read_transfer_size', 'map_full', 'read_transfer_max_size', 'map_root/primary_group', 'map_root/enabled', 'map_root/secondary_groups', 'map_root/user', 'map_non_root/primary_group', 'map_non_root/enabled', 'map_non_root/secondary_groups', 'map_non_root/user', 'symlinks', 'commit_asynchronous', 'write_filesync_action', 'case_insensitive', 'readdirplus', 'write_transfer_max_size', 'directory_transfer_size', 'case_preserving', 'read_only_clients', 'clients' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') nfs_zone, nfs_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): nfs_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_zone_content = collected_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_alias_zone = process_nfs_zones( nfs_zone_alias_content, headers[1:]) \ if nfs_zone_alias_content else nfs_alias_zone nfs_zone = process_nfs_zones( ''.join(nfs_zone_content), headers[1:]) \ if nfs_zone_content else nfs_zone nfs_zone = list(nfs_zone) + list(nfs_alias_zone) rows += [[host] + row for row in nfs_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NFSExportsByZoneTable', 'NFS Exports by Zone', final_col, final_row)