def test_get_routingkey(self): event = self.get_ref_event() rk = get_routingkey(event) self.assertEqual(rk, self.refrk) event = self.get_ref_event() event['source_type'] = 'caps' rk = get_routingkey(event) self.assertEqual(rk, self.refrk)
def test_get_routingkey_overrides_source_type(self): event = self.get_ref_event() del event['source_type'] rk = get_routingkey(event) self.assertEqual(rk, self.refrk) self.assertEqual(event['source_type'], 'resource') del event['resource'] rk = get_routingkey(event) self.assertEqual(rk, self.refrk_component) self.assertNotIn('resource', event)
def publish_event(self, name, event, sleep=2): rk = get_routingkey(event) self.logger.debug("Sending event {}".format(name)) self.amqp.publish(event, rk, 'canopsis.events') time.sleep(sleep)
def _publish_event(event): rk = event.get('rk', get_routingkey(event)) self.logger.info(u"Sending event {}".format(rk)) self.logger.debug(event) publish( event=event, rk=rk, publisher=self.amqp )
def canopsis_event(self, event, exchange_name='canopsis.events', retries=3, wait=1): """ Shortcut to self.json_document, builds the routing key for you from the event. Event required keys: connector connector_name event_type source_type component resource if source_type == 'resource' :param event: valid Canopsis event. :raises KeyError: on invalid event, if routing key cannot be built. :param canopsis_exchange: exchange to publish to """ return self.json_document(event, exchange_name, get_routingkey(event), retries=retries, wait=wait)
def get_last_state(self, event): """ Retrieve last event state from database This is a subset information of a find query focused on state """ existing_event = self.get(get_routingkey(event), {}) return existing_event.get('state', self.default_state)
def test_01(self): event = forger( connector='unittest', source_type='component', connector_name='test1', event_type='log' ) rk = get_routingkey(event) print(rk) print(event)
def publish(event, publisher, rk=None, exchange=None, logger=None, **kwargs): """Task dedicated to publish an event from an engine. :param dict event to send. :param publisher: object in charge of publishing the event. Its method ``publish`` takes three parameters, the ``event``, the ``rk`` related to the event and an ``exchange name``. :param str rk: routing key to use. If None, use get_routingkey(event). :param str exchange: exchange name. If None, use ``publisher.exchange_name_events``. """ if exchange is None: exchange = publisher.exchange_name_events if rk is None: rk = get_routingkey(event) publisher.publish(event, rk, exchange)
def publish(event, publisher, rk=None, exchange=None, logger=None, **kwargs): """Task dedicated to publish an event from an engine. :param dict event to send. :param publisher: object in charge of publishing the event. Its method ``publish`` takes three parameters, the ``event``, the ``rk`` related to the event and an ``exchange name``. :param str rk: routing key to use. If None, use get_routingkey(event). :param str exchange: exchange name. If None, use ``publisher.exchange_name_events``. """ if exchange is None: exchange = publisher.exchange_name_events if rk is None: rk = get_routingkey(event) publisher.publish( event, rk, exchange )
def canopsis_event(self, event, exchange_name='canopsis.events', retries=3, wait=1): """ Send an event to canopsis. :param event dict: a canopsis event (as a dictionnary). :param exchange_name str: the name of the exchange to publish to. :param retries int: the number of times the publication should be retried in case of failure. :param wait float: the number of seconds to wait before retrying to :raises KeyError: on invalid event, if routing key cannot be built. :raises AmqpPublishError: when all retries failed, raise this error. :raises TypeError: when the document cannot be serialized """ return self.json_document(event, exchange_name, get_routingkey(event), retries=retries, wait=wait)
def publish(event, publisher, rk=None, exchange=None, **kwargs): """ Task dedicated to publish an event from an engine. :param dict event to send. :param publisher: resource able to publish the event with a publish method. This method takes three parameters, the event, the rk related to the event and en exchange name. :param str rk: routing key to use. If None, use get_routingkey(event). :param str exchange: exchange name. If None, use publisher.exchange_name_events. """ if exchange is None: exchange = publisher.exchange_name_events if rk is None: rk = get_routingkey(event) publisher.publish( event, rk, exchange )
def consume_dispatcher(self, event, *args, **kargs): selector = self.get_ready_record(event) if selector: event_id = event['_id'] # Loads associated class selector = Selector( storage=self.storage, record=selector, logging_level=self.logging_level) name = selector.display_name self.logger.debug('----------SELECTOR----------\n') self.logger.debug(u'Selector {} found, start processing..'.format( name )) update_extra_fields = {} # Selector event have to be published when do state is true. if selector.dostate: rk, selector_event, publish_ack = selector.event() # Compute previous event to know if any difference next turn previous_metrics = {} for metric in selector_event['perf_data_array']: previous_metrics[metric['metric']] = metric['value'] update_extra_fields['previous_metrics'] = previous_metrics do_publish_event = selector.have_to_publish(selector_event) if do_publish_event: update_extra_fields['last_publication_date'] = \ selector.last_publication_date self.publish_event( selector, rk, selector_event, publish_ack ) # When selector computed, sla may be asked to be computed. if selector.dosla: self.logger.debug('----------SLA----------\n') # Retrieve user ui settings # This template should be always set template = selector.get_sla_output_tpl() # Timewindow computation duration timewindow = selector.get_sla_timewindow() sla_warning = selector.get_sla_warning() sla_critical = selector.get_sla_critical() alert_level = selector.get_alert_level() display_name = selector.display_name rk = get_routingkey(selector_event) sla = Sla( self.storage, rk, template, timewindow, sla_warning, sla_critical, alert_level, display_name, logger=self.logger ) self.publish_sla_event( sla.get_event(), display_name ) else: self.logger.debug(u'Nothing to do with selector {}'.format( name )) # Update crecords informations self.crecord_task_complete(event_id, update_extra_fields) self.nb_beat += 1
def test_get_routingkey_raise(self): event = {} with self.assertRaises(KeyError): get_routingkey(event)
def work(self, event, *xargs, **kwargs): rk = get_routingkey(event) default_action = self.configuration.get('default_action', 'pass') # list of actions supported rules = self.configuration.get('rules', []) to_apply = [] self.logger.debug(u'event {}'.format(event)) # When list configuration then check black and # white lists depending on json configuration for filterItem in rules: actions = filterItem.get('actions') name = filterItem.get('name', 'no_name') self.logger.debug(u'rule {}'.format(filterItem)) self.logger.debug(u'filter is {}'.format(filterItem['mfilter'])) # Try filter rules on current event if filterItem['mfilter'] and check(filterItem['mfilter'], event): self.logger.debug( u'Event: {}, filter matches'.format(event['rk']) ) for action in actions: if action['type'].lower() == 'drop': self.apply_actions(event, to_apply) return self.a_drop(event, None, name) to_apply.append((name, action)) if filterItem.get('break', 0): self.logger.debug( u' + Filter {} broke the next filters processing' .format( filterItem.get('name', 'filter') ) ) break if len(to_apply): if self.apply_actions(event, to_apply): self.logger.debug( u'Event before sent to next engine: %s' % event ) event['rk'] = event['_id'] = get_routingkey(event) return event # No rules matched if default_action == 'drop': self.logger.debug("Event '%s' dropped by default action" % (rk)) self.drop_event_count += 1 return DROP self.logger.debug("Event '%s' passed by default action" % (rk)) self.pass_event_count += 1 self.logger.debug(u'Event before sent to next engine: %s' % event) event['rk'] = event['_id'] = get_routingkey(event) return event
def work(self, event, *xargs, **kwargs): rk = get_routingkey(event) default_action = self.configuration.get('default_action', 'pass') # list of supported actions rules = self.configuration.get('rules', []) to_apply = [] self.logger.debug(u'event {}'.format(event)) # When list configuration then check black and # white lists depending on json configuration for filterItem in rules: actions = filterItem.get('actions') name = filterItem.get('name', 'no_name') self.logger.debug(u'rule {}'.format(filterItem)) self.logger.debug(u'filter is {}'.format(filterItem['mfilter'])) # Try filter rules on current event if filterItem['mfilter'] and check(filterItem['mfilter'], event): self.logger.debug(u'Event: {}, filter matches'.format( event.get('rk', event))) if 'pbehaviors' in filterItem: pbehaviors = filterItem.get('pbehaviors', {}) list_in = pbehaviors.get('in', []) list_out = pbehaviors.get('out', []) if list_in or list_out: pbm = singleton_per_scope(PBehaviorManager) cm = singleton_per_scope(ContextGraph) entity = cm.get_entity(event) entity_id = cm.get_entity_id(entity) result = pbm.check_pbehaviors(entity_id, list_in, list_out) if not result: break for action in actions: if action['type'].lower() == 'drop': self.apply_actions(event, to_apply) return self.a_drop(event, None, name) to_apply.append((name, action)) if filterItem.get('break', 0): self.logger.debug( u' + Filter {} broke the next filters processing'. format(filterItem.get('name', 'filter'))) break if len(to_apply): if self.apply_actions(event, to_apply): self.logger.debug(u'Event before sent to next engine: %s' % event) event['rk'] = event['_id'] = get_routingkey(event) return event # No rules matched if default_action == 'drop': self.logger.debug("Event '%s' dropped by default action" % (rk)) self.drop_event_count += 1 return DROP self.logger.debug("Event '%s' passed by default action" % (rk)) self.pass_event_count += 1 self.logger.debug(u'Event before sent to next engine: %s' % event) event['rk'] = event['_id'] = get_routingkey(event) return event
def event(self): # Get state information form aggregation states, state, ack_count, wstate_for_ack, infobagot = self.getState() information = None if state == -1 and ack_count == -1: state = 0 information = u'No event matched by selector {}'.format( self.display_name ) # Build output total = 0 total_error = 0 worst_state_with_ack = 0 for s in states: states[s] = int(states[s]) total += states[s] if s > 0: total_error += states[s] # Computed state when all events are not ack computed_state = wstate_for_ack # Is selector produced event acknowleged if ack_count >= (total_error + infobagot) and ack_count > 0: send_ack = True # All matched event were acknowleged, # then user chose what kind of state to use. it is either: # The worst state computed from all events # matched that are ack or not ack if self.state_when_all_ack == 'worststate': if state != -1: computed_state = state else: computed_state = 0 # A defined state set by user if self.state_when_all_ack in self.states_labels: computed_state = self.states_labels[self.state_when_all_ack] else: send_ack = False # Display purpose if ack_count == -1: ack_count = 0 self.logger.debug(u' + state: {}'.format(state)) perf_data_array = [] self.logger.debug(u' + total: {}'.format(total)) # Create perfdata array and generate output data output = self.output_tpl.replace('[TOTAL]', str(total)) # Producing metrics for ack if ack_count: perf_data_array.append({ "metric": 'cps_sel_ack', "value": ack_count, "max": total }) output = output.replace('[ACK]', str(ack_count)) output_data = {} # Metrics and output computation for i in [0, 1, 2, 3]: value = 0 if i in states: value = states[i] metric = u'cps_sel_state_{}'.format(i) output = output.replace(self.template_replace[i], str(value)) output_data[metric] = value perf_data_array.append({ "metric": metric, "value": value, "max": total }) self.logger.info(u'metric {} : {}'.format(metric, value)) perf_data_array.append({ "metric": "cps_sel_total", "value": total }) output_data['total'] = total # Build Event event = forger( connector="canopsis", connector_name="engine", event_type="selector", source_type="component", component=self.display_name, state=computed_state, output=output, perf_data_array=perf_data_array, display_name=self.display_name ) # Build RK rk = get_routingkey(event) return (rk, event, send_ack)
def work(self, event, *args, **kwargs): # Loads associated class selector = Selector( storage=self.storage, record=Record(event), logging_level=self.logging_level ) name = selector.display_name self.logger.debug(u'Start processing selector: {0}'.format(name)) # Selector event have to be published when do state is true. if selector.dostate: rk, event, publish_ack = selector.event() # Compute previous event to know if any difference next turn selector.data['previous_metrics'] = { metric['metric']: metric['value'] for metric in event['perf_data_array'] } do_publish_event = selector.have_to_publish(event) if do_publish_event: self.publish_event(selector, rk, event, publish_ack) # When selector computed, sla may be asked to be computed. if selector.dosla: self.logger.debug(u'Start processing SLA') # Retrieve user ui settings # This template should be always set template = selector.get_sla_output_tpl() # Timewindow computation duration timewindow = selector.get_sla_timewindow() sla_warning = selector.get_sla_warning() sla_critical = selector.get_sla_critical() alert_level = selector.get_alert_level() display_name = selector.display_name rk = get_routingkey(event) sla = Sla( self.storage, rk, template, timewindow, sla_warning, sla_critical, alert_level, display_name, logger=self.logger ) self.publish_sla_event( sla.get_event(), display_name ) else: self.logger.debug(u'Nothing to do') selector.save()
def work(self, event, *xargs, **kwargs): rk = get_routingkey(event) default_action = self.configuration.get('default_action', 'pass') # list of supported actions rules = self.configuration.get('rules', []) to_apply = [] self.logger.debug(u'event {}'.format(event)) # When list configuration then check black and # white lists depending on json configuration for filterItem in rules: actions = filterItem.get('actions') name = filterItem.get('name', 'no_name') self.logger.debug(u'rule {}'.format(filterItem)) self.logger.debug(u'filter is {}'.format(filterItem['mfilter'])) # Try filter rules on current event if filterItem['mfilter'] and check(filterItem['mfilter'], event): self.logger.debug( u'Event: {}, filter matches'.format(event.get('rk', event)) ) if 'pbehaviors' in filterItem: pbehaviors = filterItem.get('pbehaviors', {}) list_in = pbehaviors.get('in', []) list_out = pbehaviors.get('out', []) if list_in or list_out: pbm = singleton_per_scope(PBehaviorManager) cm = singleton_per_scope(ContextGraph) entity = cm.get_entity(event) entity_id = cm.get_entity_id(entity) result = pbm.check_pbehaviors( entity_id, list_in, list_out ) if not result: break for action in actions: if action['type'].lower() == 'drop': self.apply_actions(event, to_apply) return self.a_drop(event, None, name) to_apply.append((name, action)) if filterItem.get('break', 0): self.logger.debug( u' + Filter {} broke the next filters processing' .format( filterItem.get('name', 'filter') ) ) break if len(to_apply): if self.apply_actions(event, to_apply): self.logger.debug( u'Event before sent to next engine: %s' % event ) event['rk'] = event['_id'] = get_routingkey(event) return event # No rules matched if default_action == 'drop': self.logger.debug("Event '%s' dropped by default action" % (rk)) self.drop_event_count += 1 return DROP self.logger.debug("Event '%s' passed by default action" % (rk)) self.pass_event_count += 1 self.logger.debug(u'Event before sent to next engine: %s' % event) event['rk'] = event['_id'] = get_routingkey(event) return event
def get_events(self, alarm): """ Rebuild events from alarm history. :param alarm: Alarm to use for events reconstruction :type alarm: dict :returns: Array of events """ storage = self.alerts_storage alarm_id = alarm[storage.DATA_ID] alarm = alarm[storage.VALUE] entity = self.context_manager.get_entities_by_id(alarm_id) try: entity = entity[0] except IndexError: entity = {} no_author_types = ['stateinc', 'statedec', 'statusinc', 'statusdec'] check_referer_types = [ 'ack', 'ackremove', 'cancel', 'uncancel', 'declareticket', 'assocticket', 'changestate' ] typemap = { 'stateinc': Check.EVENT_TYPE, 'statedec': Check.EVENT_TYPE, 'statusinc': Check.EVENT_TYPE, 'statusdec': Check.EVENT_TYPE, 'ack': 'ack', 'ackremove': 'ackremove', 'cancel': 'cancel', 'uncancel': 'uncancel', 'declareticket': 'declareticket', 'assocticket': 'assocticket', 'changestate': States.changestate.value, 'snooze': 'snooze' } valmap = { 'stateinc': Check.STATE, 'statedec': Check.STATE, 'changestate': Check.STATE, 'statusinc': Check.STATUS, 'statusdec': Check.STATUS, 'assocticket': 'ticket', 'snooze': 'duration' } events = [] eventmodel = self.context_manager.get_event(entity) try: eventmodel.pop("_id") eventmodel.pop("depends") eventmodel.pop("impact") eventmodel.pop("infos") eventmodel.pop("measurements") eventmodel.pop("type") except KeyError: # FIXME : A logger would be nice pass for step in alarm[AlarmField.steps.value]: event = eventmodel.copy() event['timestamp'] = step['t'] event['output'] = step['m'] if step['_t'] in valmap: field = valmap[step['_t']] event[field] = step['val'] if step['_t'] not in no_author_types: event[self.AUTHOR] = step['a'] if step['_t'] in check_referer_types: event['ref_rk'] = get_routingkey(event) if Check.STATE not in event: event[Check.STATE] = get_last_state(alarm) # set event_type to step['_t'] if we don't have any valid mapping. event['event_type'] = typemap.get(step['_t'], step['_t']) for field in self.extra_fields: if field in alarm[AlarmField.extra.value]: event[field] = alarm[AlarmField.extra.value][field] events.append(event) return events