def process_zbx_metrics(self): """Processes zbx metrics provided by metric_manager Args: None Returns: a list of errors, if any """ # Read metrics from disk all_metrics = self.metric_manager.read_metrics() # Process the zbx metrics zbx_metrics = self.metric_manager.filter_zbx_metrics(all_metrics) zbx_errors = self._process_normal_metrics(zbx_metrics) # Now we need to try to send our zagg processor metrics. zagg_metrics = [] zagg_metrics.append(UniqueMetric(self._hostname, 'zagg.server.metrics.count', len(zbx_metrics))) zagg_metrics.append(UniqueMetric(self._hostname, 'zagg.server.metrics.errors', len(zbx_errors))) # We write them to disk so that we can retry sending if there's an error self.metric_manager.write_metrics(zagg_metrics) return zbx_errors
def process_hb_metrics(self): """Processes heartbeat metrics provided by metric_manager Args: None Returns: a list of errors, if any """ # Read metrics from disk all_metrics = self.metric_manager.read_metrics() # Process heartbeat metrics First (this ordering is important) # This ensures a host in zabbix has been created. hb_metrics = self.metric_manager.filter_heartbeat_metrics(all_metrics) hb_errors = self._process_heartbeat_metrics(hb_metrics) # Now we need to try to send our zagg processor metrics. zagg_metrics = [] zagg_metrics.append(UniqueMetric(self._hostname, 'zagg.server.heartbeat.count', len(hb_metrics))) zagg_metrics.append(UniqueMetric(self._hostname, 'zagg.server.heartbeat.errors', len(hb_errors))) # We write them to disk so that we can retry sending this on error self.metric_manager.write_metrics(zagg_metrics) return hb_errors
def _process_heartbeat_metrics(self, hb_metrics): """Processes heartbeat metrics. This ensures that there is a host entry in zabbix, then sends a value for the heartbeat item. Args: hb_metrics: a list of heartbeat metrics to process. Returns: a list of errors, if any """ errors = [] all_templates = [] all_hostgroups = [] # Collect all templates and hostgroups so we only process them 1 time. for hb_metric in hb_metrics: all_templates.extend(hb_metric.value['templates']) all_hostgroups.extend(hb_metric.value['hostgroups']) # Handle the Templates errors.extend(self._handle_templates(all_templates)) # Handle the Hostgroups errors.extend(self._handle_hostgroups(all_hostgroups)) for i, hb_metric in enumerate(hb_metrics): try: templates = hb_metric.value['templates'] hostgroups = hb_metric.value['hostgroups'] # Make sure there is a host entry in zabbix host_res = self.zbxapi.ensure_host_exists(hb_metric.host, templates, hostgroups) # Actually do the heartbeat now hbum = UniqueMetric(hb_metric.host, 'heartbeat.ping', 1) hb_res = self.zbxsender.send([hbum]) # Remove the metric if we were able to successfully heartbeat if host_res and hb_res: self.logger.info("Sending heartbeat metric %s for host [%s] to Zabbix: success", i + 1, hb_metric.host) # We've successfuly sent the heartbeat, so remove it from disk self.metric_manager.remove_metrics(hb_metric) else: raise Exception("Error while sending to zabbix") # Reason: disable pylint broad-except because we want to process as much as possible # Status: permanently disabled # pylint: disable=broad-except except Exception as error: self.logger.info("Sending heartbeat metric %s for host [%s] to Zabbix: FAILED: %s", i + 1, hb_metric.host, error.message) errors.append(error) return errors
def add_metric(self, metrics, host=None, synthetic=False, key_tags=None): """ create unique metric from key value pair """ if not key_tags: key_tags = {} if synthetic and not host: host = self.config['synthetic_clusterwide']['host']['name'] elif not host: host = self.host hawk_metrics = [] config_rules = self.config.get('metadata_rules') or [] metric_tags = {} for key, value in metrics.iteritems(): #check config rules - add tags that match this key for rule in config_rules: compiled_rule = re.compile(rule.get('regex')) if compiled_rule.match(key): metric_tags.update(rule.get('tags') or {}) #override configuration with runtime parameters metric_tags.update(key_tags) hawk_metric = UniqueMetric(host, key, value, tags=metric_tags) hawk_metrics.append(hawk_metric) self.unique_metrics += hawk_metrics
def add_dynamic_metric(self, discovery_key, macro_string, macro_array, host=None, synthetic=False): """ This creates a dynamic item prototype that is required for low level discovery rules in Zabbix. This requires: - dicovery key - macro string - macro name This will create a zabbix key value pair that looks like: disovery_key = "{"data": [ {"{#macro_string}":"macro_array[0]"}, {"{#macro_string}":"macro_array[1]"}, ]}" """ if synthetic and not host: host = self.config['synthetic_clusterwide']['host']['name'] elif not host: host = self.host data_array = [{'{%s}' % macro_string: i} for i in macro_array] json_data = json.dumps({'data': data_array}) zabbix_dynamic_item = UniqueMetric(host, discovery_key, json_data) self.unique_metrics.append(zabbix_dynamic_item)
def post(self, request, *args, **kwargs): """ implementation of the REST GET method """ new_metric = UniqueMetric.from_request(request.data) mymm = MetricManager('/tmp/metrics') mymm.write_metrics(new_metric) return Response({"success": True})
def add_heartbeat(self, heartbeat, host=None): """ create a heartbeat unique metric to send to zagg """ if not host: host = self.host hb_metric = UniqueMetric.create_heartbeat(host, heartbeat.templates, heartbeat.hostgroups, ) self.unique_metrics.append(hb_metric)
def add_pcp_metrics(self, pcp_metrics, host=None): """ Evaluate a list of metrics from pcp using pminfo return list of UniqueMetrics """ if not host: host = self.host pcp_metric_dict = pminfo.get_metrics(pcp_metrics) for metric, value in pcp_metric_dict.iteritems(): new_unique_metric = UniqueMetric(host, metric, value) self.unique_metrics.append(new_unique_metric)
def add_zabbix_keys(self, zabbix_keys, host=None): """ create unique metric from zabbix key value pair """ if not host: host = self.host zabbix_metrics = [] for key, value in zabbix_keys.iteritems(): zabbix_metric = UniqueMetric(host, key, value) zabbix_metrics.append(zabbix_metric) self.unique_metrics += zabbix_metrics
def post(self, request, *args, **kwargs): """ implementation of the REST GET method """ config_file = '/etc/openshift_tools/zagg_server.yaml' config = yaml.load(file(config_file)) for target in config['targets']: new_metric = UniqueMetric.from_request(request.data) mymm = MetricManager(target['path']) mymm.write_metrics(new_metric) return Response({"success": True})
def run(self): """Runs through each defined target in the config file and sends a heartbeat Args: None Returns: None """ for target in self.config["targets"]: mm = MetricManager(target["path"]) hostname = socket.gethostname() myhb = UniqueMetric.create_heartbeat(hostname, self.config["templates"], self.config["hostgroups"]) print "Writing heartbeat to %s/%s" % (target["path"], myhb.filename) mm.write_metrics(myhb)
def run(self): """Runs through each defined target in the config file and sends a heartbeat Args: None Returns: None """ for target in self.config['targets']: mm = MetricManager(target['path']) hostname = socket.gethostname() myhb = UniqueMetric.create_heartbeat(hostname, ['Template Heartbeat'], ['Linux servers']) print 'Writing heartbeat to %s/%s' % (target['path'], myhb.filename) mm.write_metrics(myhb)
def add_metric(self, metrics, host=None, synthetic=False, key_tags=None): """ create unique metric from zabbix key value pair """ if synthetic and not host: host = self.config['synthetic_clusterwide']['host']['name'] elif not host: host = self.host zabbix_metrics = [] for key, value in metrics.iteritems(): zabbix_metric = UniqueMetric(host, key, value, tags=key_tags) zabbix_metrics.append(zabbix_metric) self.unique_metrics += zabbix_metrics
def run(self): """Runs through each defined target in the config file and sends a heartbeat Args: None Returns: None """ for target in self.config['targets']: mm = MetricManager(target['name']) hostname = socket.gethostname() myhb = UniqueMetric.create_heartbeat(hostname, self.config['templates'], self.config['hostgroups']) print 'Writing heartbeat to %s' % (target['name']) mm.write_metrics(myhb)
def process_metric(): ''' Receive POSTs to the '/metric' URL endpoint and process/save them ''' if request.method == 'POST': json = request.get_json() for target in flask_app.config['targets']: new_metric = UniqueMetric.from_request(json) mymm = MetricManager(target['name']) mymm.write_metrics(new_metric) return jsonify({"success": True}) else: flask_app.logger.error('Unexpectedly received non-POST request (GET?)') return jsonify({"success": False})
def _process_heartbeat_metrics(self, hb_metrics): """Processes heartbeat metrics. This ensures that there is a host entry in zabbix, then sends a value for the heartbeat item. Args: hb_metrics: a list of heartbeat metrics to process. Returns: nothing """ all_templates = [] all_hostgroups = [] # Collect all templates and hostgroups so we only process them 1 time. for hb_metric in hb_metrics: all_templates.extend(hb_metric.value['templates']) all_hostgroups.extend(hb_metric.value['hostgroups']) # Make sure there is a template entry in zabbix for template in set(all_templates): self.zbxapi.ensure_template_exists(template) # Make sure there is a hostgroup entry in zabbix for hostgroup in set(all_hostgroups): self.zbxapi.ensure_hostgroup_exists(hostgroup) for hb_metric in hb_metrics: templates = hb_metric.value['templates'] hostgroups = hb_metric.value['hostgroups'] # Make sure there is a host entry in zabbix host_res = self.zbxapi.ensure_host_exists(hb_metric.host, templates, hostgroups) # Actually do the heartbeat now hbum = UniqueMetric(hb_metric.host, 'heartbeat.ping', 1) hb_res = self.zbxsender.send([hbum]) # Remove the metric if we were able to successfully heartbeat if host_res and hb_res: # We've successfuly sent the heartbeat, so remove it from disk self.metric_manager.remove_metrics(hb_metric) else: # TODO: add logging of the failure, and signal of failure # For now, we'll just leave them on disk and try again pass
def process_metric(): ''' Receive POSTs to the '/metric' URL endpoint and process/save them ''' if request.method == 'POST': config_file = '/etc/openshift_tools/zagg_server.yaml' config = yaml.load(file(config_file)) json = request.get_json() for target in config['targets']: new_metric = UniqueMetric.from_request(json) mymm = MetricManager(target['path']) mymm.write_metrics(new_metric) return jsonify({"success": True}) else: flask_app.logger.error('Unexpectedly received non-POST request (GET?)') return jsonify({"success": False})