def multi_match_generator(cls, data, parser, mm_key): if not hasattr(data, "items"): # Is a list, process each list item for item in data: for _ in cls.multi_match_generator(item, parser, mm_key="@"): yield _ return onlyif = parser.get("onlyif", None) if onlyif is not None and not hasattr(onlyif, "items"): onlyif = {"key": onlyif} # Decide how to iterate on the data # Options are: # Return result_dict per match in dict (if: data is dict) # Return one result_dict for whole dict (if: data is dict) if mm_key == "@" or parser.get("match_all", False): # Treat the entire data as a single match # Returns a single result_dict data = [(None, data)] else: # Each matching key is a separate result_dict data = data.items() for (k, v) in data: if onlyif is not None: if not hasattr(onlyif, "items"): onlyif = {"key": onlyif} value = cls.get_value(v, onlyif["key"], None) if value is None: continue elif "regex" in onlyif: rex = re.compile(onlyif["regex"], re.I) if not rex.search(value): continue else: if not bool(value): continue result_dict = OrderedDict() for mm_parser in parser["keys"]: for mm_result_dict in cls.get_result_dicts(v, mm_parser, mm_key=k, onlyif=onlyif): result_dict.update(mm_result_dict) if len(result_dict) > 0: result_dict.labels = parser.get("labels", None) yield result_dict
def build_result(self, parser, result_dict): defaults_dict = parser.get("defaults", {}) result = OrderedDict() result.update(defaults_dict) result.update(result_dict) result.pop(None, None) if "map" in parser: for (old, new) in parser["map"].items(): if new is None: result.pop(old) elif old in result: result[new] = result.pop(old) # fmt = dict() # for (k, v) in result.items(): # fk = "<{0}>".format(k) # fmt[fk] = str(v) # # for (k, v) in result.items(): # for (find, replace) in fmt.items(): # try: # result[k] = v.replace(find, replace) # except AttributeError: # pass if "defaults" in parser: for (k, v) in parser["defaults"].items(): result[k] = v if "pretty_name" in parser: result = OrderedDict([ ("value", result), ("pretty_name", parser["pretty_name"]) ]) if hasattr(result_dict, "labels"): result.labels = result_dict.labels return result
def multi_match_generator(cls, data, parser, mm_key): if not hasattr(data, "items"): # Is a list, process each list item for item in data: for _ in cls.multi_match_generator(item, parser, mm_key="@"): yield _ return onlyif = parser.get("onlyif", None) if onlyif is not None and not hasattr(onlyif, "items"): onlyif = {"key": onlyif} # Decide how to iterate on the data # Options are: # Return result_dict per match in dict (if: data is dict) # Return one result_dict for whole dict (if: data is dict) if mm_key == "@" or parser.get("match_all", False): # Treat the entire data as a single match # Returns a single result_dict data = [(None, data)] else: # Each matching key is a separate result_dict data = data.items() for (k, v) in data: if onlyif is not None: if not hasattr(onlyif, "items"): onlyif = {"key": onlyif} value = cls.get_value(v, onlyif["key"], None) if value is None: continue elif "regex" in onlyif: rex = re.compile(onlyif["regex"], re.I) if not rex.search(value): continue # Check for maxage key in onlyif. If it exists, parse it as Splunk relative time syntax and compare to parsed input "value" elif "maxage" in onlyif: age = parse(value) if not onlyif["maxage"].startswith( "-"): # Assume we want dates in the past print( '\033[91m' + 'WARNING: maxage must be prepended with "-" Please correct this in your configuration file.' + '\033[0m') onlyif["maxage"] = "-%s" % onlyif["maxage"] ageout = timeParser(onlyif["maxage"]).replace(tzinfo=None) if age < ageout: continue else: if not bool(value): continue result_dict = OrderedDict() for mm_parser in parser["keys"]: for mm_result_dict in cls.get_result_dicts(v, mm_parser, mm_key=k, onlyif=onlyif): result_dict.update(mm_result_dict) if result_dict: result_dict.labels = parser.get("labels", None) yield result_dict