def process(workbook: Any, contents: list) -> None: """Process Controller worksheet :param workbook: :param contents: """ worksheet_name = 'Controller' worksheet = workbook.get_sheet_by_name(worksheet_name) excel_header = [ 'controllername', 'datablocksize', 'modelnumber', 'productnumber', 'serialnumber', 'firmwareversion', 'cachecondition', 'readcapacity', 'writecapacity', 'mirrorcapacity', 'portname', 'topology', 'hostportaddress', 'switchtype' ] build_header(worksheet, excel_header) RowTuple = namedtuple('RowTuple', excel_header) header = [ 'controllername', 'datablocksize', 'modelnumber', 'productnumber', 'serialnumber', 'firmwareversion', 'cachememory/cachecondition', 'cachememory/readcapacity', 'cachememory/writecapacity', 'cachememory/mirrorcapacity', 'hostports/hostport/portname', 'hostports/hostport/topology', 'hostports/hostport/hostportaddress', 'deviceports/deviceport/switchtype' ] rows = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] for main_dict in raw_data: entry = list(ordered_jsons([main_dict], header[:6])) if entry: main_dict['hostports/hostport'] = merge_dicts( main_dict['hostports/hostport']) main_dict['deviceports/deviceport'] = merge_dicts( main_dict['deviceports/deviceport']) main_dict = flatten_dict(main_dict) rows += ordered_jsons([main_dict], header) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'ControllerTable', 'Controller', final_col, final_row)
def smb_flatten(smb_dict: dict) -> dict: """Flattens the permissions list of dictionaries for SMB jsons :param smb_dict: :return: """ permissions = defaultdict(list) # type: dict for perm_entry in smb_dict['permissions']: for key, val in flatten_dict(perm_entry).items(): permissions[key].append(val) smb_dict['permissions'] = permissions return flatten_dict(smb_dict)
def process(workbook: Any, contents: list) -> None: """Process Hosts worksheet :param workbook: :param contents: """ worksheet_name = 'Hosts' worksheet = workbook.get_sheet_by_name(worksheet_name) host_excel_header = [ 'HostName', 'OperationalState', 'OSMode', 'HostType', 'VirtualDiskName' ] HostTuple = namedtuple('HostTuple', host_excel_header) build_header(worksheet, host_excel_header) host_header = [ 'hostname', 'operationalstate', 'osmode', 'hosttype', 'presentation/virtualdiskname' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] disk_data += ordered_jsons(raw_disk_data, host_header) final_col, final_row = write_excel(disk_data, worksheet, HostTuple, 'A') sheet_process_output(worksheet, 'HostsTable', 'Hosts', final_col, final_row)
def process(workbook: Any, contents: Iterable) -> None: """Process Volumes Status worksheet :param workbook: :param contents: """ worksheet_name = 'Volumes' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(SYSTEM_NAME_TMPL) headers += [ 'VolumeId', 'VolumeName', 'Size', 'Size_MiB', 'Capacity', 'Serial', 'cg_Name', 'sg_Name', 'SgSnapshotOf', 'Locked', 'SnapshotTime', 'SnapshotTimeOnMaster', 'PoolName', 'UsedCapacity_GB', 'LockedByPool', 'SnapshotInternalRole', 'Mirrored', 'Compressed', 'Ratio', 'Saving', 'Online', 'MetadataMismatch' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) headers = [ 'id/@value', 'name/@value', 'size/@value', 'size_MiB/@value', 'capacity/@value', 'serial/@value', 'cg_name/@value', 'sg_name/@value', 'sg_snapshot_of/@value', 'locked/@value', 'snapshot_time/@value', 'snapshot_time_on_master/@value', 'pool_name/@value', 'used_capacity/@value', 'locked_by_pool/@value', 'snapshot_internal_role/@value', 'mirrored/@value', 'compressed/@value', 'ratio/@value', 'saving/@value', 'online/@value', 'metadata_mismatch/@value' ] rows = [] # type: list for sys_content, content in contents: system_name = run_parser_over(sys_content, SYSTEM_NAME_TMPL)[0] volumes_content = '\n'.join(content.split('\n')[1:]) doc = xmltodict.parse(volumes_content) command_details = search_tag_value(doc, 'volume') flat_data = [flatten_dict(data) for data in command_details] volumes = ordered_jsons(flat_data, headers) rows += [system_name + row for row in volumes] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) != 'B': set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'VolumesTable', 'Volumes', final_col, final_row)
def luns_occurrences(cmd_out: list, headers: list) -> list: """Creates a list for map_details :param headers: :param cmd_out: :return: """ lun_rows = [] # type: list for entry in cmd_out: flat_data_map = [flatten_dict(entry)] maps = ordered_jsons(flat_data_map, headers[:2]) lun_details = search_tag_value(entry, 'lun') if isinstance(lun_details, list): flat_luns = [flatten_dict(data) for data in lun_details] elif isinstance(lun_details, dict): flat_luns = [flatten_dict(lun_details)] luns = [flat_lun[headers[2]] for flat_lun in flat_luns] lun_rows += [row + [luns] for row in maps] return lun_rows
def process(workbook: Any, contents: list) -> None: """Process Controller worksheet :param workbook: :param contents: """ worksheet_name = 'Disk Enclosure' worksheet = workbook.get_sheet_by_name(worksheet_name) excel_header = [ 'Name', 'Type', 'DiskSlotType', 'Transport', 'ProductId', 'ProductNumber', 'DiskSlot' ] RowTuple = namedtuple('RowTuple', excel_header) build_header(worksheet, excel_header) header = [ 'objectname', 'objecttype', 'diskslottype', 'transport', 'productid', 'productnum', 'diskslot/name' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] for main_dict in raw_disk_data: entry = list(ordered_jsons([main_dict], ['diskslot'])) if entry: main_dict['diskslot'] = merge_dicts(main_dict['diskslot']) main_dict = flatten_dict(main_dict) disk_data += ordered_jsons([main_dict], header) final_col, final_row = write_excel(disk_data, worksheet, RowTuple, 'A') sheet_process_output(worksheet, 'DiskEnclosureTable', 'DiskEnclosure', final_col, final_row)
def process(workbook: Any, contents: list) -> None: """Process Virtual Disks worksheet :param workbook: :param contents: """ worksheet_name = 'Virtual Disks' worksheet = workbook.get_sheet_by_name(worksheet_name) family_excel_header = ['FamilyName', 'OperationalState', 'TotalSnapshots'] host_excel_header = [ 'FamilyName', 'AllocatedCapacity', 'HostName', 'HostOSMode' ] FamilyTuple = namedtuple('FamilyTuple', family_excel_header) HostTuple = namedtuple('HostTuple', host_excel_header) build_header(worksheet, family_excel_header) build_header(worksheet, host_excel_header, 'E') family_header = ['familyname', 'operationalstate', 'totalsnapshots'] host_header = [ 'familyname', 'allocatedcapacity', 'presentation/hostname', 'presentation/hostosmode' ] family_data, host_data = [], [] # type: list, list for content in contents: doc = xmltodict.parse(content) family_data += list( ordered_jsons(search_tag_value(doc, 'object'), family_header)) raw_host_data = [ flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object') ] host_data += ordered_jsons(raw_host_data, host_header) final_col, final_row = write_excel(family_data, worksheet, FamilyTuple, 'A') sheet_process_output(worksheet, 'FamilyTable', 'Virtual Disks', final_col, final_row) final_col, final_row = write_excel(host_data, worksheet, HostTuple, 'E') sheet_process_output(worksheet, 'HostTable', 'Virtual Disks', final_col, final_row, start_col=ord('E'))
def process_drives(content: str, headers: list) -> tuple: """Processes json lines for the Drive List sheet :param content: :param headers: :return: """ lines = content.split('\n') rows, bad_jsons = [], 0 for line in lines: cluster, jsons = line.split()[0].strip(':'), ' '.join(line.split()[1:]) try: for json_entry in json.loads(jsons): rows.append( [cluster] + first(ordered_jsons([flatten_dict(json_entry)], headers))) except json.JSONDecodeError: bad_jsons += 1 return rows, bad_jsons
def process_nfs_zones(content: str, headers: list) -> Generator: """Processes jsons for the NFS Zones sheet :param content: :param headers: :return: """ converted = convert_jsons(content) fixed = [] # type: list for row in converted: # json.loads() does not work on multiple lists of jsons # so this split is necessary if ']\n[' in row: to_fix = row.split(']\n[') fixed += [json.loads(fixed_json) for fixed_json in to_fix] else: fixed.append(json.loads(row)) flattened = [flatten_dict(row) for row in fixed] return ordered_jsons(flattened, headers)
def process(workbook: Any, contents: list) -> None: """Process Disks worksheet :param workbook: :param contents: """ worksheet_name = 'Disks' worksheet = workbook.get_sheet_by_name(worksheet_name) disks_excel_header = [ 'DiskName', 'OperationalState', 'EnclosureDiskBays', 'DiskBayNumber', 'ShelfNumber', 'DiskGroupName', 'DiskType', 'ModelNumber', 'FormattedCapacity', 'Occupancy' ] HostTuple = namedtuple('HostTuple', disks_excel_header) build_header(worksheet, disks_excel_header) disks_header = [ 'diskname', 'operationalstate', 'EnclosureDiskBays', 'diskbaynumber', 'shelfnumber', 'diskgroupname', 'disktype', 'modelnumber', 'formattedcapacity', 'occupancy' ] disk_data = [] # type: list for content in contents: doc = xmltodict.parse(content) raw_disk_data = [flatten_dict(det_dict) for det_dict in search_tag_value(doc, 'object')] disk_data += ordered_jsons(raw_disk_data, disks_header) final_col, final_row = write_excel(disk_data, worksheet, HostTuple, 'A') sheet_process_output( worksheet, 'DisksTable', 'Disks', final_col, final_row)