def process(workbook: Any, contents: list) -> None: """Process Storage Inventory worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Inventory' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = list(concat([ ['Hostname', 'Model', 'OS', 'Nodes'], get_parser_header(DEDUPE_TMPL) ])) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') dedupe, nodes = [], 0 # type: (list, int) for entry in command_details: nodes_content = collected_data( entry, 'cmd', 'isi storagepool nodepools list') nodes = max(map(compose(int, itemgetter(0)), run_parser_over( nodes_content, NODES_TMPL))) if nodes_content else nodes dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats') dedupe = run_parser_over( dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']] rows.append([ component_details['hostname'], component_details['model'], component_details['os'], str(nodes), *dedupe[0] ]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StorageInventoryTable', 'Storage Inventory', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Drive List worksheet :param workbook: :param contents: """ worksheet_name = 'Drive List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'cluster', 'lnum', 'baynum', 'lnn', 'purpose_description', 'blocks', 'serial', 'wwn', 'logical_block_length', 'ui_state', 'physical_block_length', 'firmware/desired_firmware', 'firmware/current_firmware', 'id', 'media_type', 'interface_type', 'handle', 'devname', 'chassis', 'purpose', 'y_loc', 'x_loc', 'present', 'locnstr', 'model' ] build_header(worksheet, headers) rows, errors = [], 0 # type: list, int for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') drive_list = [] # type: Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): drives_content = collected_data( entry, 'cmd', 'isi_for_array isi devices list --format?json') drive_list, local_errors = process_drives( drives_content, headers[2:]) if drives_content else [drive_list, 0] errors += local_errors rows += [[host] + row for row in drive_list] if errors != 0: print('{} bad jsons found in {}, ' 'some data will not be found in the output!'.format( errors, worksheet_name)) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DriveListTable', 'Drive List', final_col, final_row)
def process(workbook: Any, contents: list) -> list: """Process SMB Shares List worksheet :param workbook: :param contents: """ worksheet_name = 'SMB Shares List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'ShareName', 'Path'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: Any bad_rows = 0 for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') shares = [] # type: list host = component_details['hostname'] for entry in command_details: shares_content = collected_data( entry, 'cmd', 'isi smb shares list --format?csv -a -z') shares = list( csv.reader(shares_content.split('\n') ) if shares_content else shares) bad_rows += len( list(filter(lambda x: len(x) != 2 and len(x) > 1, shares))) rows += [[host] + squash(row, 0, -1) for row in filter(lambda x: len(x) == 2, shares)] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SMBSharesListTable', 'SMB Shares List', final_col, final_row) if bad_rows: print('{} bad rows in smb shares csv!'.format(bad_rows)) return [[row[0], row[1].split('/')[-1], row[1], 'SMB'] for row in rows]
def process(workbook: Any, contents: list) -> None: """Process Quotas worksheet :param workbook: :param contents: """ worksheet_name = 'Quotas' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'Hostname', 'Path', 'Type', 'AppliesTo', 'Hard', 'Soft', 'Advisory', 'PhysicalUsed', 'LogicalUsed', 'InodesCount' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') quotas = [] # type: Iterable host = component_details['hostname'] for entry in command_details: quotas_content = collected_data( entry, 'cmd', 'isi quota quotas list --format?json') quotas = quotas_json(json.loads(quotas_content)) \ if quotas_content else quotas rows += [[host] + row for row in quotas] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'QuotasTable', 'Quotas', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Sync Policies worksheet :param workbook: :param contents: """ worksheet_name = 'Sync Policies' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'name', 'schedule', 'source_root_path', 'enabled', 'target_path', 'last_success', 'action', 'id', 'target_host' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') sync_policies = [] # type: Iterable host = component_details['hostname'] for entry in command_details: policies_content = collected_data( entry, 'cmd', 'isi sync policies list --format?json') sync_policies = ordered_jsons( json.loads(policies_content), headers[1:]) \ if policies_content else sync_policies rows += [[host] + row for row in sync_policies] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SyncPoliciesTable', 'Sync Policies', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Storage Pool Summary worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Pool Summary' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname'] + get_parser_header(POOL_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') pool = [] # type: list host = component_details['hostname'] for entry in command_details: pool_content = collected_data( entry, 'cmd', 'isi storagepool list --format?list') pool = run_parser_over( pool_content, POOL_TMPL) if pool_content else pool rows += [[host] + row for row in pool] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StoragePoolSummaryTable', 'Storage Pool Summary', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Zone List worksheet :param workbook: :param contents: """ worksheet_name = 'Zone List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'Name', 'Path'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') zones = [] # type: Iterable host = component_details['hostname'] for entry in command_details: zones_content = collected_data( entry, 'cmd', 'isi zone zones list --format?list') zones = run_parser_over(zones_content, ZONES_TMPL) \ if zones_content else zones rows += [[host] + row for row in zones] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'ZoneListTable', 'Zone List', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process HW Status worksheet :param workbook: :param contents: """ worksheet_name = 'HW Status' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'Cluster', 'Component', 'Status'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') hw_statuses = [] # type: Iterable host = component_details['hostname'] for entry in command_details: status_content = collected_data(entry, 'cmd', 'isi_for_array isi_hw_status') hw_statuses = hw_split(status_content) \ if status_content else hw_statuses rows += [[host] + row for row in hw_statuses] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'HWStatusTable', 'HW Status', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process SMB Shares by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'SMB Shares by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'file_create_mode', 'ntfs_acl_support', 'file_filtering_enabled', 'directory_create_mask', 'inheritable_path_acl', 'ca_write_integrity', 'file_create_mask', 'path', 'access_based_enumeration_root_only', 'allow_variable_expansion', 'id', 'hide_dot_files', 'create_permissions', 'strict_locking', 'mangle_map', 'zid', 'file_filter_type', 'browsable', 'description', 'auto_create_directory', 'continuously_available', 'directory_create_mode', 'allow_delete_readonly', 'ca_timeout', 'access_based_enumeration', 'allow_execute_always', 'csc_policy', 'permissions/permission_type', 'permissions/trustee/id', 'permissions/permission', 'name', 'change_notify', 'mangle_byte_start', 'impersonate_user', 'impersonate_guest', 'oplocks', 'strict_flush', 'strict_ca_lockout' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') smb_zone, smb_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): smb_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_zone_content = collected_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_alias_zone = process_smb_zones( smb_zone_alias_content, headers[1:]) \ if smb_zone_alias_content else smb_zone smb_zone = process_smb_zones( ''.join(smb_zone_content), headers[1:]) \ if smb_zone_content else smb_zone smb_zone = list(smb_zone) + list(smb_alias_zone) rows += [[host] + row for row in smb_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n if col_n > final_col else final_col final_row = row_n sheet_process_output(worksheet, 'SMBSharesByZoneTable', 'SMB Shares by Zone', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process NFS Exports by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'NFS Exports by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'encoding', 'map_retry', 'security_flavors', 'write_unstable_reply', 'write_filesync_reply', 'readdirplus_prefetch', 'block_size', 'write_transfer_size', 'id', 'description', 'max_file_size', 'paths', 'write_unstable_action', 'zone', 'name_max_size', 'can_set_time', 'read_transfer_multiple', 'return_32bit_file_ids', 'write_transfer_multiple', 'all_dirs', 'setattr_asynchronous', 'map_failure/primary_group', 'map_failure/enabled', 'map_failure/secondary_groups', 'map_failure/user', 'link_max', 'write_datasync_reply', 'no_truncate', 'time_delta', 'snapshot', 'read_only', 'map_lookup_uid', 'chown_restricted', 'write_datasync_action', 'read_transfer_size', 'map_full', 'read_transfer_max_size', 'map_root/primary_group', 'map_root/enabled', 'map_root/secondary_groups', 'map_root/user', 'map_non_root/primary_group', 'map_non_root/enabled', 'map_non_root/secondary_groups', 'map_non_root/user', 'symlinks', 'commit_asynchronous', 'write_filesync_action', 'case_insensitive', 'readdirplus', 'write_transfer_max_size', 'directory_transfer_size', 'case_preserving', 'read_only_clients', 'clients' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') nfs_zone, nfs_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): nfs_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_zone_content = collected_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_alias_zone = process_nfs_zones( nfs_zone_alias_content, headers[1:]) \ if nfs_zone_alias_content else nfs_alias_zone nfs_zone = process_nfs_zones( ''.join(nfs_zone_content), headers[1:]) \ if nfs_zone_content else nfs_zone nfs_zone = list(nfs_zone) + list(nfs_alias_zone) rows += [[host] + row for row in nfs_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NFSExportsByZoneTable', 'NFS Exports by Zone', final_col, final_row)