def match_and_append(self, compiled_matchers, om, found, rm): for c_matcher in compiled_matchers: if c_matcher['ignoreParametersWhenModeling']: fullname = om.procName params = None else: fullname = (om.procName + ' ' + om.parameters).rstrip() params = om.parameters if not c_matcher['regex_search_function'](fullname): continue om.setOSProcessClass = c_matcher['getPrimaryDmdId'] om.id = self.prepId(getProcessIdentifier(om.procName, params)) if om.id not in found: found[om.id] = True rm.append(om) # Stop once a match is found. return
def onSuccess(self, results, config): data = self.new_data() datasource_by_pid = {} metrics_by_component = collections.defaultdict( lambda: collections.defaultdict(list)) # Used for process restart checking. if not hasattr(self, 'previous_pids_by_component'): self.previous_pids_by_component = collections.defaultdict(set) pids_by_component = collections.defaultdict(set) sorted_datasource = sorted(config.datasources, key=lambda x: x.params.get('sequence', 0)) # Win32_Process: Counts and correlation to performance table. process_key = [x for x in results if 'Win32_Process' in x.wql][0] for item in results[process_key]: processText = get_processText(item) for datasource in sorted_datasource: regex = re.compile(datasource.params['regex']) # Zenoss 4.2 2013-10-15 RPS style. if 'replacement' in datasource.params: matcher = OSProcessDataMatcher( includeRegex=datasource.params['includeRegex'], excludeRegex=datasource.params['excludeRegex'], replaceRegex=datasource.params['replaceRegex'], replacement=datasource.params['replacement'], primaryUrlPath=datasource.params['primaryUrlPath'], generatedId=datasource.params['generatedId']) if not matcher.matches(processText): continue # Zenoss 4.2 intermediate style elif hasattr(OSProcess, 'matchRegex'): excludeRegex = re.compile( datasource.params['excludeRegex']) basic_match = OSProcess.matchRegex(regex, excludeRegex, processText) if not basic_match: continue capture_match = OSProcess.matchNameCaptureGroups( regex, processText, datasource.component) if not capture_match: continue # Zenoss 4.1-4.2 style. else: if datasource.params['ignoreParameters']: processText = item.ExecutablePath or item.Name name, args = get_processNameAndArgs(item) if datasource.params['ignoreParameters']: proc_id = getProcessIdentifier(name, None) else: proc_id = getProcessIdentifier(name, args) if datasource.component != prepId(proc_id): continue datasource_by_pid[item.ProcessId] = datasource pids_by_component[datasource.component].add(item.ProcessId) # Track process count. Append 1 each time we find a # match because the generic aggregator below will sum # them up to the total count. metrics_by_component[ datasource.component][COUNT_DATAPOINT].append(1) # Don't continue matching once a match is found. break # Send process status events. for datasource in config.datasources: component = datasource.component if COUNT_DATAPOINT in metrics_by_component[component]: severity = 0 summary = 'matching processes running' # Process restart checking. previous_pids = self.previous_pids_by_component.get(component) current_pids = pids_by_component.get(component) # No restart if there are no current or previous PIDs. # previous PIDs. if previous_pids and current_pids: # Only consider PID changes a restart if all PIDs # matching the process changed. if current_pids.isdisjoint(previous_pids): summary = 'matching processes restarted' # If the process is configured to alert on # restart, the first "up" won't be a clear. if datasource.params['alertOnRestart']: severity = datasource.params['severity'] else: severity = datasource.params['severity'] summary = 'no matching processes running' # Add a 0 count for process that aren't running. metrics_by_component[component][COUNT_DATAPOINT].append(0) data['events'].append({ 'device': datasource.device, 'component': component, 'eventClass': datasource.eventClass, 'eventGroup': 'Process', 'summary': summary, 'severity': severity, }) # Prepare for next cycle's restart check by merging current # process PIDs with previous. This is to catch restarts that # stretch across more than subsequent cycles. self.previous_pids_by_component.update( (c, p) for c, p in pids_by_component.iteritems() if p) # Win32_PerfFormattedData_PerfProc_Process: Datapoints. perf_keys = [x for x in results if 'Win32_Perf' in x.wql] if perf_keys: for item in results[perf_keys[0]]: if item.IDProcess not in datasource_by_pid: continue datasource = datasource_by_pid[item.IDProcess] for point in datasource.points: if point.id == COUNT_DATAPOINT: continue try: value = int(getattr(item, point.id)) except (TypeError, ValueError): LOG.warn("%s %s %s: Couldn't convert %r to integer", datasource.device, datasource.component, point.id, value) except AttributeError: LOG.warn("%s %s: %s not in result", datasource.device, datasource.component, point.id) else: metrics_by_component[datasource.component][ point.id].append(value) # Aggregate and store datapoint values. for component, points in metrics_by_component.iteritems(): for point, values in points.iteritems(): if point in NON_AGGREGATED_DATAPOINTS: value = values[0] else: value = sum(values) data['values'][component][point] = (value, 'N') # Send overall clear. data['events'].append({ 'device': config.id, 'severity': Event.Clear, 'eventClass': Status_OSProcess, 'summary': 'process scan successful', }) generateClearAuthEvents(config, data['events']) return data
def onSuccess(self, results, config): data = self.new_data() datasource_by_pid = {} metrics_by_component = collections.defaultdict( lambda: collections.defaultdict(list)) # Used for process restart checking. if not hasattr(self, 'previous_pids_by_component'): self.previous_pids_by_component = collections.defaultdict(set) pids_by_component = collections.defaultdict(set) # Win32_Process: Counts and correlation to performance table. process_key = [x for x in results if 'Win32_Process' in x.wql][0] for item in results[process_key]: processText = get_processText(item) for datasource in config.datasources: regex = re.compile(datasource.params['regex']) # Zenoss 4.2 2013-10-15 RPS style. if 'replacement' in datasource.params: matcher = OSProcessDataMatcher( includeRegex=datasource.params['includeRegex'], excludeRegex=datasource.params['excludeRegex'], replaceRegex=datasource.params['replaceRegex'], replacement=datasource.params['replacement'], primaryUrlPath=datasource.params['primaryUrlPath'], generatedId=datasource.params['generatedId']) if not matcher.matches(processText): continue # Zenoss 4.2 intermediate style elif hasattr(OSProcess, 'matchRegex'): excludeRegex = re.compile( datasource.params['excludeRegex']) basic_match = OSProcess.matchRegex( regex, excludeRegex, processText) if not basic_match: continue capture_match = OSProcess.matchNameCaptureGroups( regex, processText, datasource.component) if not capture_match: continue # Zenoss 4.1-4.2 style. else: if datasource.params['ignoreParameters']: processText = item.ExecutablePath or item.Name name, args = get_processNameAndArgs(item) if datasource.params['ignoreParameters']: proc_id = getProcessIdentifier(name, None) else: proc_id = getProcessIdentifier(name, args) if datasource.component != prepId(proc_id): continue datasource_by_pid[item.ProcessId] = datasource pids_by_component[datasource.component].add(item.ProcessId) # Track process count. Append 1 each time we find a # match because the generic aggregator below will sum # them up to the total count. metrics_by_component[datasource.component][COUNT_DATAPOINT].append(1) # Send process status events. for datasource in config.datasources: component = datasource.component if COUNT_DATAPOINT in metrics_by_component[component]: severity = 0 summary = 'matching processes running' # Process restart checking. previous_pids = self.previous_pids_by_component.get(component) current_pids = pids_by_component.get(component) # No restart if there are no current or previous PIDs. # previous PIDs. if previous_pids and current_pids: # Only consider PID changes a restart if all PIDs # matching the process changed. if current_pids.isdisjoint(previous_pids): summary = 'matching processes restarted' # If the process is configured to alert on # restart, the first "up" won't be a clear. if datasource.params['alertOnRestart']: severity = datasource.params['severity'] else: severity = datasource.params['severity'] summary = 'no matching processes running' # Add a 0 count for process that aren't running. metrics_by_component[component][COUNT_DATAPOINT].append(0) data['events'].append({ 'device': datasource.device, 'component': component, 'eventClass': datasource.eventClass, 'eventGroup': 'Process', 'summary': summary, 'severity': severity, }) # Prepare for next cycle's restart check by merging current # process PIDs with previous. This is to catch restarts that # stretch across more than subsequent cycles. self.previous_pids_by_component.update( (c, p) for c, p in pids_by_component.iteritems() if p) # Win32_PerfFormattedData_PerfProc_Process: Datapoints. perf_key = [x for x in results if 'Win32_Perf' in x.wql][0] for item in results[perf_key]: if item.IDProcess not in datasource_by_pid: continue datasource = datasource_by_pid[item.IDProcess] for point in datasource.points: if point.id == COUNT_DATAPOINT: continue try: value = int(getattr(item, point.id)) except (TypeError, ValueError): LOG.warn( "%s %s %s: Couldn't convert %r to integer", datasource.device, datasource.component, point.id, value) except AttributeError: LOG.warn( "%s %s: %s not in result", datasource.device, datasource.component, point.id) else: metrics_by_component[datasource.component][point.id].append(value) # Aggregate and store datapoint values. for component, points in metrics_by_component.iteritems(): for point, values in points.iteritems(): if point in NON_AGGREGATED_DATAPOINTS: value = values[0] else: value = sum(values) data['values'][component][point] = (value, 'N') # Send overall clear. data['events'].append({ 'device': config.id, 'severity': Event.Clear, 'eventClass': Status_OSProcess, 'summary': 'process scan successful', }) return data