def process(workbook: Any, contents: list) -> None: """Process Storage Inventory worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Inventory' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = list(concat([ ['Hostname', 'Model', 'OS', 'Nodes'], get_parser_header(DEDUPE_TMPL) ])) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') dedupe, nodes = [], 0 # type: (list, int) for entry in command_details: nodes_content = collected_data( entry, 'cmd', 'isi storagepool nodepools list') nodes = max(map(compose(int, itemgetter(0)), run_parser_over( nodes_content, NODES_TMPL))) if nodes_content else nodes dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats') dedupe = run_parser_over( dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']] rows.append([ component_details['hostname'], component_details['model'], component_details['os'], str(nodes), *dedupe[0] ]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StorageInventoryTable', 'Storage Inventory', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Drive List worksheet :param workbook: :param contents: """ worksheet_name = 'Drive List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'cluster', 'lnum', 'baynum', 'lnn', 'purpose_description', 'blocks', 'serial', 'wwn', 'logical_block_length', 'ui_state', 'physical_block_length', 'firmware/desired_firmware', 'firmware/current_firmware', 'id', 'media_type', 'interface_type', 'handle', 'devname', 'chassis', 'purpose', 'y_loc', 'x_loc', 'present', 'locnstr', 'model' ] build_header(worksheet, headers) rows, errors = [], 0 # type: list, int for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') drive_list = [] # type: Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): drives_content = collected_data( entry, 'cmd', 'isi_for_array isi devices list --format?json') drive_list, local_errors = process_drives( drives_content, headers[2:]) if drives_content else [drive_list, 0] errors += local_errors rows += [[host] + row for row in drive_list] if errors != 0: print('{} bad jsons found in {}, ' 'some data will not be found in the output!'.format( errors, worksheet_name)) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DriveListTable', 'Drive List', final_col, final_row)
def process(workbook: Any, contents: list) -> list: """Process SMB Shares List worksheet :param workbook: :param contents: """ worksheet_name = 'SMB Shares List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'ShareName', 'Path'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: Any bad_rows = 0 for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') shares = [] # type: list host = component_details['hostname'] for entry in command_details: shares_content = collected_data( entry, 'cmd', 'isi smb shares list --format?csv -a -z') shares = list( csv.reader(shares_content.split('\n') ) if shares_content else shares) bad_rows += len( list(filter(lambda x: len(x) != 2 and len(x) > 1, shares))) rows += [[host] + squash(row, 0, -1) for row in filter(lambda x: len(x) == 2, shares)] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SMBSharesListTable', 'SMB Shares List', final_col, final_row) if bad_rows: print('{} bad rows in smb shares csv!'.format(bad_rows)) return [[row[0], row[1].split('/')[-1], row[1], 'SMB'] for row in rows]
def process(workbook: Any, contents: list) -> None: """Process Virtual Disks worksheet :param workbook: :param contents: """ worksheet_name = 'Virtual Disks' worksheet = workbook.get_sheet_by_name(worksheet_name) family_excel_header = ['FamilyName', 'OperationalState', 'TotalSnapshots'] host_excel_header = [ 'FamilyName', 'AllocatedCapacity', 'HostName', 'HostOSMode' ] FamilyTuple = namedtuple('FamilyTuple', family_excel_header) HostTuple = namedtuple('HostTuple', host_excel_header) build_header(worksheet, family_excel_header) build_header(worksheet, host_excel_header, 'E') family_header = ['familyname', 'operationalstate', 'totalsnapshots'] host_header = [ 'familyname', 'allocatedcapacity', 'presentation/hostname', 'presentation/hostosmode' ] family_data, host_data = [], [] # type: list, list for content in contents: doc = xmltodict.parse(content) family_data += list( ordered_jsons(search_tag_value(doc, 'object'), family_header)) raw_host_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] host_data += ordered_jsons(raw_host_data, host_header) final_col, final_row = write_excel(family_data, worksheet, FamilyTuple, 'A') sheet_process_output(worksheet, 'FamilyTable', 'Virtual Disks', final_col, final_row) final_col, final_row = write_excel(host_data, worksheet, HostTuple, 'E') sheet_process_output(worksheet, 'HostTable', 'Virtual Disks', final_col, final_row, start_col=ord('E'))
def process(workbook: Any, contents: list) -> None: """Process Disk Group worksheet :param workbook: :param contents: """ worksheet_name = 'Disk Group' worksheet = workbook.get_sheet_by_name(worksheet_name) disk_excel_header = [ 'DiskGroupName', 'TotalDisks', 'DiskDriveType', 'RequestedSparePolicy', 'CurrentSparePolicy', 'TotalStorageSpaceGB', 'UsedStorageSpaceGB' ] object_excel_header = [ 'ObjectName', 'TotalUngroupedDisks', 'XMLCapacityGB' ] DiskTuple = namedtuple('DiskTuple', disk_excel_header) ObjectTuple = namedtuple('ObjectTuple', object_excel_header) build_header(worksheet, disk_excel_header) build_header(worksheet, object_excel_header, 'I') disk_header = [ 'diskgroupname', 'totaldisks', 'diskdrivetype', 'requestedsparepolicy', 'currentsparepolicy', 'totalstoragespacegb', 'usedstoragespacegb' ] object_header = ['objectname', 'totalungroupeddisks', 'xmlcapacitygb'] disk_data, object_data = [], [] # type: list, list for content in contents: doc = xmltodict.parse(content) disk_data += list( ordered_jsons(search_tag_value(doc, 'object'), disk_header)) object_data += list( ordered_jsons(search_tag_value(doc, 'object'), object_header)) final_col, final_row = write_excel(disk_data, worksheet, DiskTuple, 'A') sheet_process_output(worksheet, 'DiskTable', 'Disk Group', final_col, final_row) final_col, final_row = write_excel(object_data, worksheet, ObjectTuple, 'I') sheet_process_output(worksheet, 'ObjectTable', 'Disk Group', final_col, final_row, start_col=ord('I'))
def process(workbook: Any, contents: list) -> None: """Process Quotas worksheet :param workbook: :param contents: """ worksheet_name = 'Quotas' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'Hostname', 'Path', 'Type', 'AppliesTo', 'Hard', 'Soft', 'Advisory', 'PhysicalUsed', 'LogicalUsed', 'InodesCount' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') quotas = [] # type: Iterable host = component_details['hostname'] for entry in command_details: quotas_content = collected_data( entry, 'cmd', 'isi quota quotas list --format?json') quotas = quotas_json(json.loads(quotas_content)) \ if quotas_content else quotas rows += [[host] + row for row in quotas] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'QuotasTable', 'Quotas', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Hosts worksheet :param workbook: :param contents: """ worksheet_name = 'Hosts' worksheet = workbook.get_sheet_by_name(worksheet_name) host_excel_header = [ 'HostName', 'OperationalState', 'OSMode', 'HostType', 'VirtualDiskName' ] HostTuple = namedtuple('HostTuple', host_excel_header) build_header(worksheet, host_excel_header) host_header = [ 'hostname', 'operationalstate', 'osmode', 'hosttype', 'presentation/virtualdiskname' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] disk_data += ordered_jsons(raw_disk_data, host_header) final_col, final_row = write_excel(disk_data, worksheet, HostTuple, 'A') sheet_process_output(worksheet, 'HostsTable', 'Hosts', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Storage Inventory worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Inventory' worksheet = workbook.get_sheet_by_name(worksheet_name) system_excel_header = [ 'SystemObjectName', 'SystemType', 'FirmwareVersion', 'TotalStorageSpace', 'AvailableStorageSpace', 'UsedStorageSpace' ] SystemTuple = namedtuple('SystemTuple', system_excel_header) build_header(worksheet, system_excel_header) system_header = [ 'objectname', 'systemtype', 'firmwareversion', 'totalstoragespace', 'availablestoragespace', 'usedstoragespace' ] system_data = [] # type: list for content in contents: doc = xmltodict.parse(content) system_data += list( ordered_jsons(search_tag_value(doc, 'object'), system_header)) final_col, final_row = write_excel(system_data, worksheet, SystemTuple, 'A') sheet_process_output(worksheet, 'SystemTable', 'Storage Inventory', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Controller worksheet :param workbook: :param contents: """ worksheet_name = 'Controller' worksheet = workbook.get_sheet_by_name(worksheet_name) excel_header = [ 'controllername', 'datablocksize', 'modelnumber', 'productnumber', 'serialnumber', 'firmwareversion', 'cachecondition', 'readcapacity', 'writecapacity', 'mirrorcapacity', 'portname', 'topology', 'hostportaddress', 'switchtype' ] build_header(worksheet, excel_header) RowTuple = namedtuple('RowTuple', excel_header) header = [ 'controllername', 'datablocksize', 'modelnumber', 'productnumber', 'serialnumber', 'firmwareversion', 'cachememory/cachecondition', 'cachememory/readcapacity', 'cachememory/writecapacity', 'cachememory/mirrorcapacity', 'hostports/hostport/portname', 'hostports/hostport/topology', 'hostports/hostport/hostportaddress', 'deviceports/deviceport/switchtype' ] rows = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] for main_dict in raw_data: entry = list(ordered_jsons([main_dict], header[:6])) if entry: main_dict['hostports/hostport'] = merge_dicts( main_dict['hostports/hostport']) main_dict['deviceports/deviceport'] = merge_dicts( main_dict['deviceports/deviceport']) main_dict = flatten_dict(main_dict) rows += ordered_jsons([main_dict], header) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'ControllerTable', 'Controller', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Sync Policies worksheet :param workbook: :param contents: """ worksheet_name = 'Sync Policies' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'name', 'schedule', 'source_root_path', 'enabled', 'target_path', 'last_success', 'action', 'id', 'target_host' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') sync_policies = [] # type: Iterable host = component_details['hostname'] for entry in command_details: policies_content = collected_data( entry, 'cmd', 'isi sync policies list --format?json') sync_policies = ordered_jsons( json.loads(policies_content), headers[1:]) \ if policies_content else sync_policies rows += [[host] + row for row in sync_policies] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SyncPoliciesTable', 'Sync Policies', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Storage Pool Summary worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Pool Summary' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname'] + get_parser_header(POOL_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') pool = [] # type: list host = component_details['hostname'] for entry in command_details: pool_content = collected_data( entry, 'cmd', 'isi storagepool list --format?list') pool = run_parser_over( pool_content, POOL_TMPL) if pool_content else pool rows += [[host] + row for row in pool] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StoragePoolSummaryTable', 'Storage Pool Summary', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Zone List worksheet :param workbook: :param contents: """ worksheet_name = 'Zone List' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'Name', 'Path'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') zones = [] # type: Iterable host = component_details['hostname'] for entry in command_details: zones_content = collected_data( entry, 'cmd', 'isi zone zones list --format?list') zones = run_parser_over(zones_content, ZONES_TMPL) \ if zones_content else zones rows += [[host] + row for row in zones] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'ZoneListTable', 'Zone List', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process PStat worksheet :param workbook: :param contents: """ worksheet_name = 'PStat' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname'] + get_parser_header(PSTAT_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') stats = [] # type: list host = component_details['hostname'] for entry in command_details: with suppress(TypeError): stats_content = collected_alias_data(entry, 'cmd', 'isi statistics pstat') stats = run_parser_over(stats_content, PSTAT_TMPL) if stats_content else stats rows += [[host] + row for row in stats] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'PStatTable', 'PStat', final_col, final_row)
def process(workbook: Any, contents: Iterable) -> None: """Process Volumes Status worksheet :param workbook: :param contents: """ worksheet_name = 'Volumes' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(SYSTEM_NAME_TMPL) headers += [ 'VolumeId', 'VolumeName', 'Size', 'Size_MiB', 'Capacity', 'Serial', 'cg_Name', 'sg_Name', 'SgSnapshotOf', 'Locked', 'SnapshotTime', 'SnapshotTimeOnMaster', 'PoolName', 'UsedCapacity_GB', 'LockedByPool', 'SnapshotInternalRole', 'Mirrored', 'Compressed', 'Ratio', 'Saving', 'Online', 'MetadataMismatch' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) headers = [ 'id/@value', 'name/@value', 'size/@value', 'size_MiB/@value', 'capacity/@value', 'serial/@value', 'cg_name/@value', 'sg_name/@value', 'sg_snapshot_of/@value', 'locked/@value', 'snapshot_time/@value', 'snapshot_time_on_master/@value', 'pool_name/@value', 'used_capacity/@value', 'locked_by_pool/@value', 'snapshot_internal_role/@value', 'mirrored/@value', 'compressed/@value', 'ratio/@value', 'saving/@value', 'online/@value', 'metadata_mismatch/@value' ] rows = [] # type: list for sys_content, content in contents: system_name = run_parser_over(sys_content, SYSTEM_NAME_TMPL)[0] volumes_content = '\n'.join(content.split('\n')[1:]) doc = xmltodict.parse(volumes_content) command_details = search_tag_value(doc, 'volume') flat_data = [flatten_dict(data) for data in command_details] volumes = ordered_jsons(flat_data, headers) rows += [system_name + row for row in volumes] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) != 'B': set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'VolumesTable', 'Volumes', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process HW Status worksheet :param workbook: :param contents: """ worksheet_name = 'HW Status' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname', 'Cluster', 'Component', 'Status'] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') hw_statuses = [] # type: Iterable host = component_details['hostname'] for entry in command_details: status_content = collected_data(entry, 'cmd', 'isi_for_array isi_hw_status') hw_statuses = hw_split(status_content) \ if status_content else hw_statuses rows += [[host] + row for row in hw_statuses] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'HWStatusTable', 'HW Status', final_col, final_row)
def luns_occurrences(cmd_out: list, headers: list) -> list: """Creates a list for map_details :param headers: :param cmd_out: :return: """ lun_rows = [] # type: list for entry in cmd_out: flat_data_map = [flatten_dict(entry)] maps = ordered_jsons(flat_data_map, headers[:2]) lun_details = search_tag_value(entry, 'lun') if isinstance(lun_details, list): flat_luns = [flatten_dict(data) for data in lun_details] elif isinstance(lun_details, dict): flat_luns = [flatten_dict(lun_details)] luns = [flat_lun[headers[2]] for flat_lun in flat_luns] lun_rows += [row + [luns] for row in maps] return lun_rows
def process(workbook: Any, contents: list) -> None: """Process Disks worksheet :param workbook: :param contents: """ worksheet_name = 'Disks' worksheet = workbook.get_sheet_by_name(worksheet_name) disks_excel_header = [ 'DiskName', 'OperationalState', 'EnclosureDiskBays', 'DiskBayNumber', 'ShelfNumber', 'DiskGroupName', 'DiskType', 'ModelNumber', 'FormattedCapacity', 'Occupancy' ] HostTuple = namedtuple('HostTuple', disks_excel_header) build_header(worksheet, disks_excel_header) disks_header = [ 'diskname', 'operationalstate', 'EnclosureDiskBays', 'diskbaynumber', 'shelfnumber', 'diskgroupname', 'disktype', 'modelnumber', 'formattedcapacity', 'occupancy' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object')] disk_data += ordered_jsons(raw_disk_data, disks_header) final_col, final_row = write_excel(disk_data, worksheet, HostTuple, 'A') sheet_process_output( worksheet, 'DisksTable', 'Disks', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Controller worksheet :param workbook: :param contents: """ worksheet_name = 'Disk Enclosure' worksheet = workbook.get_sheet_by_name(worksheet_name) excel_header = [ 'Name', 'Type', 'DiskSlotType', 'Transport', 'ProductId', 'ProductNumber', 'DiskSlot' ] RowTuple = namedtuple('RowTuple', excel_header) build_header(worksheet, excel_header) header = [ 'objectname', 'objecttype', 'diskslottype', 'transport', 'productid', 'productnum', 'diskslot/name' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] for main_dict in raw_disk_data: entry = list(ordered_jsons([main_dict], ['diskslot'])) if entry: main_dict['diskslot'] = merge_dicts(main_dict['diskslot']) main_dict = flatten_dict(main_dict) disk_data += ordered_jsons([main_dict], header) final_col, final_row = write_excel(disk_data, worksheet, RowTuple, 'A') sheet_process_output(worksheet, 'DiskEnclosureTable', 'DiskEnclosure', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process SMB Shares by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'SMB Shares by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'file_create_mode', 'ntfs_acl_support', 'file_filtering_enabled', 'directory_create_mask', 'inheritable_path_acl', 'ca_write_integrity', 'file_create_mask', 'path', 'access_based_enumeration_root_only', 'allow_variable_expansion', 'id', 'hide_dot_files', 'create_permissions', 'strict_locking', 'mangle_map', 'zid', 'file_filter_type', 'browsable', 'description', 'auto_create_directory', 'continuously_available', 'directory_create_mode', 'allow_delete_readonly', 'ca_timeout', 'access_based_enumeration', 'allow_execute_always', 'csc_policy', 'permissions/permission_type', 'permissions/trustee/id', 'permissions/permission', 'name', 'change_notify', 'mangle_byte_start', 'impersonate_user', 'impersonate_guest', 'oplocks', 'strict_flush', 'strict_ca_lockout' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') smb_zone, smb_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): smb_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_zone_content = collected_data( entry, 'cmd', 'isi smb shares list by zone **format?json') smb_alias_zone = process_smb_zones( smb_zone_alias_content, headers[1:]) \ if smb_zone_alias_content else smb_zone smb_zone = process_smb_zones( ''.join(smb_zone_content), headers[1:]) \ if smb_zone_content else smb_zone smb_zone = list(smb_zone) + list(smb_alias_zone) rows += [[host] + row for row in smb_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n if col_n > final_col else final_col final_row = row_n sheet_process_output(worksheet, 'SMBSharesByZoneTable', 'SMB Shares by Zone', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process NFS Exports by Zone worksheet :param workbook: :param contents: """ worksheet_name = 'NFS Exports by Zone' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = [ 'hostname', 'encoding', 'map_retry', 'security_flavors', 'write_unstable_reply', 'write_filesync_reply', 'readdirplus_prefetch', 'block_size', 'write_transfer_size', 'id', 'description', 'max_file_size', 'paths', 'write_unstable_action', 'zone', 'name_max_size', 'can_set_time', 'read_transfer_multiple', 'return_32bit_file_ids', 'write_transfer_multiple', 'all_dirs', 'setattr_asynchronous', 'map_failure/primary_group', 'map_failure/enabled', 'map_failure/secondary_groups', 'map_failure/user', 'link_max', 'write_datasync_reply', 'no_truncate', 'time_delta', 'snapshot', 'read_only', 'map_lookup_uid', 'chown_restricted', 'write_datasync_action', 'read_transfer_size', 'map_full', 'read_transfer_max_size', 'map_root/primary_group', 'map_root/enabled', 'map_root/secondary_groups', 'map_root/user', 'map_non_root/primary_group', 'map_non_root/enabled', 'map_non_root/secondary_groups', 'map_non_root/user', 'symlinks', 'commit_asynchronous', 'write_filesync_action', 'case_insensitive', 'readdirplus', 'write_transfer_max_size', 'directory_transfer_size', 'case_preserving', 'read_only_clients', 'clients' ] build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') nfs_zone, nfs_alias_zone = [], [] # type: Iterable, Iterable host = component_details['hostname'] for entry in command_details: with suppress(TypeError): nfs_zone_alias_content = collected_alias_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_zone_content = collected_data( entry, 'cmd', 'isi nfs exports list by zone **format?json') nfs_alias_zone = process_nfs_zones( nfs_zone_alias_content, headers[1:]) \ if nfs_zone_alias_content else nfs_alias_zone nfs_zone = process_nfs_zones( ''.join(nfs_zone_content), headers[1:]) \ if nfs_zone_content else nfs_zone nfs_zone = list(nfs_zone) + list(nfs_alias_zone) rows += [[host] + row for row in nfs_zone] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(rows, 2): for col_n, col_value in \ enumerate(row_tuple, ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NFSExportsByZoneTable', 'NFS Exports by Zone', final_col, final_row)