def parse_wmi_table(info, key="Name"): parsed = {} info_iter = iter(info) try: # read input line by line. rows with [] start the table name. # Each table has to start with a header line line = next(info_iter) timestamp, frequency = None, None if line[0] == "sampletime": timestamp, frequency = int(line[1]), int(line[2]) line = next(info_iter) while True: if len(line) == 1 and line[0].startswith("["): # multi-table input tablename = regex(r"\[(.*)\]").search(line[0]).group(1) # Did subsection get WMI timeout? line = next(info_iter) else: # single-table input tablename = "" missing_wmi_status, current_table =\ _prepare_wmi_table(parsed, tablename, line, key, timestamp, frequency) # read table content line = next(info_iter) while not line[0].startswith("["): current_table.add_row(line + ['OK'] * bool(missing_wmi_status)) line = next(info_iter) except (StopIteration, ValueError): # regular end of block pass return parsed
def _legacy_docker_parse_table(rows, keys): '''docker provides us with space separated tables with field containing spaces e.g.: TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 7 6 2.076 GB 936.9 MB (45%) Containers 22 0 2.298 GB 2.298 GB (100%) Local Volumes 5 5 304 B 0 B (0%) ''' if not rows or not rows[0]: return [] indices = [] for key in keys: field = key.upper() rex = regex(field + r'\ *') match = rex.search(rows[0][0]) if match is not None: start, end = match.start(), match.end() if end - start == len(field): end = None indices.append((start, end)) else: indices.append((0, 0)) table = [] for row in rows[1:]: if not row: continue try: line = {k: row[0][i:j].strip() for k, (i, j) in zip(keys, indices)} except IndexError: continue table.append(line) return table
def parse_runmqsc_display_output(info, group_by_object): re_intro = regex(r"^QMNAME\((.*)\)[\s]*STATUS\((.*?)\)[\s]*NOW\((.*)\)") re_group = regex(r"^AMQ\d+\w?: [^.]*.") re_key = regex(r"[\s]*[A-Z0-9]+\(") re_second_column = regex(r" [A-Z0-9]+\(") re_key_value = regex(r"([A-Z0-9]+)\((.*)\)[\s]*") def record_attribute(s, attributes, parsed): pair = re_key_value.match(s) if pair is None: return key = pair.group(1) value = pair.group(2).strip() attributes[key] = value def record_group(qmname, attributes, parsed): obj = attributes.get(group_by_object) if obj is not None and not obj.startswith( ("SYSTEM", "AMQ.MQEXPLORER")): obj = "%s:%s" % (qmname, obj) parsed.setdefault(obj, {}) parsed[obj].update(attributes) def lookahead(iterable): """ Pass through all values from the given iterable, augmented by the information if there are more values to come after the current one (True), or if it is the last value (False). """ sentinel = object() previous = sentinel for value in iter(iterable): if previous is not sentinel: yield previous, True previous = value yield previous, False parsed: Dict[Any, Any] = {} attributes: Dict[Any, Any] = {} for (line, ), has_more in lookahead(info): intro_line = re_intro.match(line) if intro_line: if attributes: record_group(qmname, attributes, parsed) # type: ignore[has-type] attributes.clear() qmname = intro_line.group(1) qmstatus = intro_line.group(2) now = intro_line.group(3) parsed[qmname] = {'STATUS': qmstatus, 'NOW': now} continue if re_group.match(line) or not has_more: if attributes: record_group(qmname, attributes, parsed) attributes.clear() continue if re_key.match(line): if re_second_column.match(line[39:]): first_half = line[:40] second_half = line[40:] record_attribute(first_half, attributes, parsed) record_attribute(second_half, attributes, parsed) else: record_attribute(line, attributes, parsed) return parsed