def process(workbook: Any, contents: list) -> None: """Process Storage Inventory worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Inventory' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = list(concat([ ['Hostname', 'Model', 'OS', 'Nodes'], get_parser_header(DEDUPE_TMPL) ])) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') dedupe, nodes = [], 0 # type: (list, int) for entry in command_details: nodes_content = collected_data( entry, 'cmd', 'isi storagepool nodepools list') nodes = max(map(compose(int, itemgetter(0)), run_parser_over( nodes_content, NODES_TMPL))) if nodes_content else nodes dedupe_content = collected_data(entry, 'cmd', 'isi dedupe stats') dedupe = run_parser_over( dedupe_content, DEDUPE_TMPL) if dedupe_content else dedupe dedupe = dedupe if len(dedupe) > 1 else [['', '', '', '', '', '']] rows.append([ component_details['hostname'], component_details['model'], component_details['os'], str(nodes), *dedupe[0] ]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StorageInventoryTable', 'Storage Inventory', final_col, final_row)
def process(workbook: Any, content: str) -> tuple: """Process Storage-Array-Summary worksheet Also returns a list of array names used in other sheets :param workbook: :param content: :return: """ worksheet = workbook.get_sheet_by_name('Storage-Array-Summary') headers = list(concat([ get_parser_header(ARRAY_NAME_TMPL), get_parser_header(GET_ARRAY_UID_TMPL), get_parser_header(GET_AGENT_TMPL) ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) cmd_arrayname_out = run_parser_over(content, ARRAY_NAME_TMPL) cmd_getarrayuid_out = run_parser_over(content, GET_ARRAY_UID_TMPL) cmd_getagent_out = run_parser_over(content, GET_AGENT_TMPL) # noinspection PyTypeChecker cmd_out = map(compose( list, concat), zip( cmd_arrayname_out, cmd_getarrayuid_out, cmd_getagent_out)) array_names = defaultdict(str) # type: defaultdict rows = check_empty_arrays(list(unique(cmd_out, key=itemgetter(0, 1)))) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n array_names[worksheet['Q{}'.format(row_n)].value] = \ worksheet['A{}'.format(row_n)].value final_row = row_n sheet_process_output( worksheet, 'StorageArraySummaryTable', 'Storage-Array-Summary', final_col, final_row) array_models = groupby(itemgetter(12), rows) array_revisions = groupby(itemgetter(10), rows) return array_names, array_models, array_revisions
def process(workbook: Any, content: str) -> None: """Process dskgrp_summary worksheet :param workbook: :param content: """ worksheet_name = 'dskgrp_summary' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(DSKRGP_TMPL) RowTuple = namedtuple('RowTuple', headers) headers[5], headers[6], headers[8] = \ 'diskspeed(RPM)', 'disksize(MB)', 'totalcapacity(MB)' build_header(worksheet, headers) worksheet['E1'].comment = Comment(legend, '') dskgrp_summary_out = run_parser_over(content, DSKRGP_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, dskgrp_summary_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'DskgrpSummaryTable', 'dskgrp_summary', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Target Ports worksheet (XtremIO) :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Target Ports') headers = get_parser_header(SHOW_TARGETS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) show_targets_out = run_parser_over(content, SHOW_TARGETS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, show_targets_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'TargetPortsTable', 'Target Ports', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process NAS Summary worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('NAS Summary') headers = get_parser_header(NAS_SUMMARY_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) nas_summary_out = run_parser_over(content, NAS_SUMMARY_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, nas_summary_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NASSummaryTable', 'NAS Summary', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process list_WWN worksheet :param workbook: :param content: """ worksheet_name = 'list_WWN' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(LSTWWN_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) list_wwn_out = run_parser_over(content, LSTWWN_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, list_wwn_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) not in ('B', 'F'): set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'ListWWNTable', 'list_WWN', final_col, final_row)
def main(templates: Any, input_files: str, output_file: str) -> None: """Celerra Entry point :param templates: :param input_files: :param output_file: """ workbook = Workbook() raw_content_patterns = ('*cmd_outputs/hostname', ) # import ipdb; ipdb.set_trace() with open(templates, 'r') as temp: temp_data = xmltodict.parse(''.join(temp.readlines()))['sgr'] import ipdb ipdb.set_trace() with open(input_files[0], 'r') as inp: data = ''.join(inp.readlines()) extracted_data = run_parser_over(data, temp_data['template']) fname = temp_data['fname'] sheet = temp_data['sheet'] import ipdb ipdb.set_trace() # raw_content = load_raw_content(tuple(input_files), raw_content_patterns) # import ipdb; ipdb.set_trace() # system_details_content = get_relevant_content( # raw_content, (raw_content_patterns[0], ), '*' * 20 + '\n') workbook.save(output_file)
def process(workbook: Any, content: str) -> None: """Process Storage Array Summary (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Storage Array Summary') headers = get_parser_header(SHOWSYS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) showsys_out = run_parser_over(content, SHOWSYS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, showsys_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'StorageArraySummaryTable', 'Storage Array Summary', final_col, final_row)
def process(workbook: Any, content: str, array_names: dict) -> None: """Process SnapView worksheet :param workbook: :param content: :param array_names: """ worksheet = workbook.get_sheet_by_name('SnapView') headers = get_parser_header(SNAP_VIEW_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) snap_view_out = check_empty_arrays( take_array_names(array_names, run_parser_over(content, SNAP_VIEW_TMPL))) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, snap_view_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SnapViewTable', 'SnapView', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Software-Packages worksheet :param workbook: :param content: """ worksheet_name = 'Software-Packages' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(NDU_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) cmd_ndu_out = run_parser_over(content, NDU_TMPL) cmd_ndu_out = check_empty_arrays( list(unique(cmd_ndu_out, key=itemgetter(0, 1)))) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, cmd_ndu_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) if cell.value != '-': cell.value = str.strip(col_value, '-') style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SoftwarePackagesTable', 'Software-Packages', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Pools worksheet :param workbook: :param content: """ worksheet_name = 'Pools' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(POOLS_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) pools_out = run_parser_over(content, POOLS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, pools_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'PoolsTable', 'Pools', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Backend Storage SP DETAILS worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Backend Storage SP DETAILS') headers = get_parser_header(BACKEND_DETAILS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) backend_details_out = run_parser_over(content, BACKEND_DETAILS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, backend_details_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'BackendStorageSPDETAILSTable', 'Backend Storage SP DETAILS', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Disks (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Disks') headers = get_parser_header(SHOWPD_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name headers[7], headers[8], headers[11] = 'Total(MB)', 'Free(MB)', 'Cap(GB)' build_header(worksheet, headers) show_pd_out = run_parser_over(content, SHOWPD_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, show_pd_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DisksTable', 'Disks', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process access_initiator worksheet :param workbook: :param content: """ worksheet_name = 'access_initiator' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(ACSINIT_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) access_initiator_out = run_parser_over(content, ACSINIT_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate( map(RowTuple._make, access_initiator_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'AccessInitiatorTable', 'access_initiator', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process NAS_License worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('NAS_License') headers = get_parser_header(NAS_LICENSE_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) nas_license_out = run_parser_over(content, NAS_LICENSE_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, nas_license_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'NASLicenseTable', 'NAS_License', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process device_name_list worksheet :param workbook: :param content: """ worksheet_name = 'device_name_list' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(DEVNM_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) device_name_list_out = run_parser_over(content, DEVNM_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate( map(RowTuple._make, device_name_list_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) != 'B': set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DevNameListTable', 'device_name_list', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Disk Groups worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Disk Groups') headers = get_parser_header(DISK_GROUPS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) disk_groups_out = run_parser_over(content, DISK_GROUPS_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, disk_groups_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) not in ('B', ): set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DiskGroupsTable', 'Disk Groups', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Data Protection Groups worksheet (XtremIO) :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Data Protection Groups') headers = get_parser_header(SHOW_DATA_PROTECTION_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) show_targets_out = run_parser_over(content, SHOW_DATA_PROTECTION_TMPL) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, show_targets_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'DataProtectionGroupsTable', 'Data Protection Groups', final_col, final_row)
def process(workbook: Any, content: str, array_names: dict) -> None: """Process MirrorView-S worksheet :param workbook: :param content: :param array_names: """ worksheet = workbook.get_sheet_by_name('MirrorView-S') headers = get_parser_header(MIRROR_VIEW_S_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) cmd_mirror_view_s_out = check_empty_arrays( take_array_names(array_names, run_parser_over(content, MIRROR_VIEW_S_TMPL))) rows = unique(cmd_mirror_view_s_out, key=itemgetter(0)) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'MirrorViewSTable', 'MirrorView-S', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Hosts (3Par) worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Hosts') headers = list(concat([ get_parser_header(SHOWHOST_TMPL), get_parser_header(SHOWHOST_LINES_TMPL)[4:], ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) show_hosts_out = groupby( itemgetter(0, 1, 2, 3, 4), run_parser_over(content, SHOWHOST_TMPL)) show_hosts_lines_out = groupby( itemgetter(0, 1, 2, 3), run_parser_over(content, SHOWHOST_LINES_TMPL)) rows = [] for idfier in show_hosts_out: with suppress(KeyError): for host_line, details_line in \ zip(show_hosts_out[idfier], show_hosts_lines_out[idfier[:-1]]): rows.append(host_line + details_line[4:]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'HostsTable', 'Hosts', final_col, final_row)
def process(workbook: Workbook, content: str) -> list: """Process SP-Frontend-Ports worksheet :param workbook: :param content: :return: """ worksheet_name = 'SP-Frontend-Ports' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = list( concat([ get_parser_header(PORT_TMPL), get_parser_header(SPPORTSPEED_TMPL)[3:], ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) cmd_port_out = run_parser_over(content, PORT_TMPL) cmd_spportspeed_out = run_parser_over(content, SPPORTSPEED_TMPL) common_columns = (0, 1, 2) common_columns_getter = itemgetter(*common_columns) cmd_merged_out = join(common_columns_getter, cmd_port_out, common_columns_getter, cmd_spportspeed_out) rows = map(compose(list, concat, juxt(first, compose(drop(3), second))))(cmd_merged_out) rows = check_empty_arrays(list(unique(rows, key=common_columns_getter))) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'SPFrontendPortsTable', 'SP-Frontend-Ports', final_col, final_row) return rows
def process(workbook: Any, contents: Iterable) -> None: """Process Volumes Status worksheet :param workbook: :param contents: """ worksheet_name = 'Volumes' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(SYSTEM_NAME_TMPL) headers += [ 'VolumeId', 'VolumeName', 'Size', 'Size_MiB', 'Capacity', 'Serial', 'cg_Name', 'sg_Name', 'SgSnapshotOf', 'Locked', 'SnapshotTime', 'SnapshotTimeOnMaster', 'PoolName', 'UsedCapacity_GB', 'LockedByPool', 'SnapshotInternalRole', 'Mirrored', 'Compressed', 'Ratio', 'Saving', 'Online', 'MetadataMismatch' ] RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) headers = [ 'id/@value', 'name/@value', 'size/@value', 'size_MiB/@value', 'capacity/@value', 'serial/@value', 'cg_name/@value', 'sg_name/@value', 'sg_snapshot_of/@value', 'locked/@value', 'snapshot_time/@value', 'snapshot_time_on_master/@value', 'pool_name/@value', 'used_capacity/@value', 'locked_by_pool/@value', 'snapshot_internal_role/@value', 'mirrored/@value', 'compressed/@value', 'ratio/@value', 'saving/@value', 'online/@value', 'metadata_mismatch/@value' ] rows = [] # type: list for sys_content, content in contents: system_name = run_parser_over(sys_content, SYSTEM_NAME_TMPL)[0] volumes_content = '\n'.join(content.split('\n')[1:]) doc = xmltodict.parse(volumes_content) command_details = search_tag_value(doc, 'volume') flat_data = [flatten_dict(data) for data in command_details] volumes = ordered_jsons(flat_data, headers) rows += [system_name + row for row in volumes] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) != 'B': set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'VolumesTable', 'Volumes', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Storage Controllers worksheet :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Storage Controllers') headers = list( concat([ get_parser_header(STORAGE_CONTROLLERS_TMPL), get_parser_header(STORAGE_VERSION_TMPL)[1:], get_parser_header(STORAGE_CAPACITY_TMPL)[1:] ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) storage_controllers_out = run_parser_over(content, STORAGE_CONTROLLERS_TMPL) storage_version_out = run_parser_over(content, STORAGE_VERSION_TMPL) storage_capacity_out = run_parser_over(content, STORAGE_CAPACITY_TMPL) common_columns = (0, ) rows = multiple_join( common_columns, [storage_controllers_out, storage_version_out, storage_capacity_out]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'StorageControllersTable', 'Storage Controllers', final_col, final_row)
def process(workbook: Any, content: str) -> None: """Process Disk Drivers worksheet :param workbook: :param content: """ worksheet_name = 'Disk Drives' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = list(concat([ get_parser_header(SYSTEM_NAME_TMPL), get_parser_header(DISK_TMPL)[2:], ])) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) system_drivers_out = run_parser_over(content, SYSTEM_NAME_TMPL) disk_drivers_out = run_parser_over(content, DISK_TMPL) common_columns = (0, 1) rows = multiple_join( common_columns, [system_drivers_out, disk_drivers_out]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'DiskDrivesTable', 'Disk Drives', final_col, final_row)
def process(content: str) -> list: """Process clusters (XtremIO) :param content: :return: """ clusters_info_out = run_parser_over(content, SHOW_CLUSTERS_INFO_TMPL) clusters_data = [("Cluster Names:", (', '.join(cluster[0] for cluster in clusters_info_out)), '', '')] return clusters_data
def process(workbook: Any, content: str) -> None: """Process Initiators and Groups worksheet (XtremIO) :param workbook: :param content: """ worksheet = workbook.get_sheet_by_name('Initiators and Groups') headers = list( concat([ get_parser_header(SHOW_INITIATORS_TMPL), get_parser_header(SHOW_INITIATOR_GROUPS_TMPL)[2:], ])) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) show_initiators_out = run_parser_over(content, SHOW_INITIATORS_TMPL) show_initiator_groups_out = run_parser_over(content, SHOW_INITIATOR_GROUPS_TMPL) common_columns = (0, 1) rows = multiple_join(common_columns, [show_initiators_out, show_initiator_groups_out]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) set_cell_to_number(cell) style_value_cell(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'InitiatorsGroupsTable', 'Initiators and Groups', final_col, final_row)
def process(workbook: Any, content: str, sg_data: list) -> list: """Process LUNs worksheet :param workbook: :param content: :param sg_data: """ worksheet_name = 'LUNs' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(GETLUN_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) cmd_getlun_out = run_parser_over(content, GETLUN_TMPL) cmd_getlun_out = check_empty_arrays( list(unique(cmd_getlun_out, key=itemgetter(0, 1)))) expanded_luns = [[*entry[:-1], get_luns(entry[-1])] for entry in sg_data] sg_dict = {} # type: dict for entry in expanded_luns: for lun in entry[-1]: sg_dict[entry[0], lun[1]] = entry[1], lun[0] for row in cmd_getlun_out: if (row[0], row[1]) in sg_dict.keys(): row[3], row[4] = sg_dict[row[0], row[1]][0], \ sg_dict[row[0], row[1]][1] else: row[3], row[4] = ('No Storage Group Found', '') row[12] = capacity_conversion(row[11]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, cmd_getlun_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) if chr(col_n) == 'K': set_cell_to_number(cell, '0.00000') else: set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, 'LUNSTable', 'LUNs', final_col, final_row) return cmd_getlun_out
def process(workbook: Any, content: str) -> None: """Process SMB, NFS, Multiprotocol worksheets :param workbook: :param content: """ sheets = ['SMB', 'NFS', 'Multiprotocol'] row_tuples = dict() row_tuples['SMB'] = [ 'ArrayName', 'DataMover', 'ShareName', 'SharePath', 'RootPath', 'Type', 'umask', 'maxusr', 'netbios', 'comment' ] row_tuples['NFS'] = [ 'Hostname', 'Server', 'MountedPath', 'FileSystem', 'Type', 'rw', 'root', 'access' ] row_tuples['Multiprotocol'] = ['Hostname', 'Server', 'MountedPath', 'Type'] server_export_out = run_parser_over(content, SERVER_EXPORT_TMPL) server_export_grouped = groupby(itemgetter(3), server_export_out) share, export, multi = classify_rows(server_export_grouped) for sheet, data_list in zip(sheets, [share, export, multi]): worksheet = workbook.get_sheet_by_name(sheet) build_header(worksheet, row_tuples[sheet]) RowTuple = namedtuple('RowTuple', row_tuples[sheet]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, data_list), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output(worksheet, '{}Table'.format(sheet), sheet, final_col, final_row)
def process(workbook: Any, content: str) -> list: """Process Disks worksheet :param workbook: :param content: """ worksheet_name = 'Disks' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = get_parser_header(GETDISK_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) cmd_disks_out = run_parser_over(content, GETDISK_TMPL) cmd_disks_out = check_empty_arrays( list(unique(cmd_disks_out, key=itemgetter(0, 1, 2, 3)))) for row in cmd_disks_out: row[7] = capacity_conversion(row[6]) final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, cmd_disks_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = str.strip('\n'.join(col_value)) style_value_cell(cell) if chr(col_n) == 'H': set_cell_to_number(cell, '0.00000') else: set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'DisksTable', 'Disks', final_col, final_row) return cmd_disks_out
def process(workbook: Any, contents: list) -> None: """Process Storage Pool Summary worksheet :param workbook: :param contents: """ worksheet_name = 'Storage Pool Summary' worksheet = workbook.get_sheet_by_name(worksheet_name) headers = ['Hostname'] + get_parser_header(POOL_TMPL) RowTuple = namedtuple('RowTuple', headers) build_header(worksheet, headers) rows = [] # type: list for content in contents: doc = xmltodict.parse(content) component_details = search_tag_value(doc, 'component_details') command_details = search_tag_value(doc, 'command_details') pool = [] # type: list host = component_details['hostname'] for entry in command_details: pool_content = collected_data( entry, 'cmd', 'isi storagepool list --format?list') pool = run_parser_over( pool_content, POOL_TMPL) if pool_content else pool rows += [[host] + row for row in pool] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(chr(col_n), row_n)] cell.value = str.strip(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'StoragePoolSummaryTable', 'Storage Pool Summary', final_col, final_row)