Ejemplo n.º 1
0
    def send_stat_event(self):
        """ Send AMQP Event for drop and pass metrics """

        event = cevent.forger(connector="cengine",
                              connector_name="engine",
                              event_type="check",
                              source_type="resource",
                              resource=self.amqp_queue + '_data',
                              state=0,
                              state_type=1,
                              output="%s event dropped since %s" %
                              (self.drop_event_count, self.beat_interval),
                              perf_data_array=[{
                                  'metric': 'pass_event',
                                  'value': self.pass_event_count,
                                  'type': 'GAUGE'
                              }, {
                                  'metric': 'drop_event',
                                  'value': self.drop_event_count,
                                  'type': 'GAUGE'
                              }])

        self.logger.debug("%s event dropped since %s" %
                          (self.drop_event_count, self.beat_interval))
        self.logger.debug("%s event passed since %s" %
                          (self.pass_event_count, self.beat_interval))

        rk = cevent.get_routingkey(event)
        self.amqp.publish(event, rk, self.amqp.exchange_name_events)

        self.drop_event_count = 0
        self.pass_event_count = 0
Ejemplo n.º 2
0
	def send_stat_event(self):
		""" Send AMQP Event for drop and pass metrics """

		event = cevent.forger(
			connector = "cengine",
			connector_name = "engine",
			event_type = "check",
			source_type="resource",
			resource=self.amqp_queue + '_data',
			state=0,
			state_type=1,
			output="%s event dropped since %s" % (self.drop_event_count, self.beat_interval),
			perf_data_array=[
								{'metric': 'pass_event' , 'value': self.pass_event_count, 'type': 'GAUGE' },
								{'metric': 'drop_event' , 'value': self.drop_event_count, 'type': 'GAUGE' }
							]
		)

		self.logger.debug("%s event dropped since %s" % (self.drop_event_count, self.beat_interval))
		self.logger.debug("%s event passed since %s" % (self.pass_event_count, self.beat_interval))


		rk = cevent.get_routingkey(event)
		self.amqp.publish(event, rk, self.amqp.exchange_name_events)

		self.drop_event_count = 0				
		self.pass_event_count = 0
Ejemplo n.º 3
0
	def _beat(self):
		self.logger.debug("Beat: %s event(s), %s error" % (self.counter_event, self.counter_error))
		
		if not self.input_queue.empty():
			size = self.input_queue.qsize()
			if size > 110:
				self.logger.info("%s event(s) in internal queue" % size)
			
		evt_per_sec = 0
		sec_per_evt = 0
		
		if self.counter_event:
			evt_per_sec = float(self.counter_event) / self.beat_interval
			self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)
		
		if self.counter_worktime and self.counter_event:
			sec_per_evt = self.counter_worktime / self.counter_event
			self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)
			
		self.counter_error = 0
		self.counter_event = 0
		self.counter_worktime = 0
		
		## Submit event
		if self.send_stats_event:
			state = 0
			
			if sec_per_evt > self.thd_warn_sec_per_evt:
				state = 1
				
			if sec_per_evt > self.thd_crit_sec_per_evt:
				state = 2
			
			perf_data_array = [
				{'retention': self.perfdata_retention, 'metric': 'cps_queue_size', 'value': self.input_queue.qsize(), 'unit': 'evt' },
				{'retention': self.perfdata_retention, 'metric': 'cps_evt_per_sec', 'value': round(evt_per_sec,2), 'unit': 'evt/sec' },
				{'retention': self.perfdata_retention, 'metric': 'cps_sec_per_evt', 'value': round(sec_per_evt,5), 'unit': 'sec/evt', 'warn': 0.5, 'crit': 0.8 },
			]
			
			event = cevent.forger(
				connector = "cengine",
				connector_name = "engine",
				event_type = "check",
				source_type="resource",
				resource=self.amqp_queue,
				state=state,
				state_type=1,
				output="%0.2f evt/sec, %0.5f sec/evt" % (evt_per_sec, sec_per_evt),
				perf_data_array=perf_data_array
			)
			
			rk = cevent.get_routingkey(event)
			self.amqp.publish(event, rk, self.amqp.exchange_name_events)
		
		try:
			self.beat()
		except Exception, err:
			self.logger.error("Beat raise exception: %s" % err)
			traceback.print_exc(file=sys.stdout)
Ejemplo n.º 4
0
    def test_01(self):
        event = cevent.forger(connector='unittest',
                              connector_name='test1',
                              event_type='log')
        rk = cevent.get_routingkey(event)

        print rk
        print event
Ejemplo n.º 5
0
def send_events(n, rate=0, burst=10):
    i = 0

    logger.info("Send %s events" % n)
    if (rate):
        #1/10 ou 1/15
        logger.info(" + @ %s events/second (%s events/5min)" % (rate,
                                                                (rate * 300)))
        #time_target = time.time() + float(n)/rate
        time_break = ((float(n) / rate) / n) * burst
        #logger.info(" + @ %s events / %s seconds" % (rate, burst))
        #logger.info(" + sleep %s seconds / %s events" % (time_break, burst))

    time_start_burst = time.time()
    start_time = time.time()
    while RUN and i < n:
        event = base_component_event.copy()

        event['component'] += str(i)
        event['bench_timestamp'] = time.time()

        rk = cevent.get_routingkey(event)
        amqp.publish(event, rk, amqp.exchange_name_events)

        if (rate and (i % burst == 0)):
            elapsed = time.time() - time_start_burst
            if (time_break > elapsed):
                time.sleep(time_break - elapsed)
            time_start_burst = time.time()

        i += 1

    duration = time.time() - start_time
    logger.info(" + Done, elapsed: %.3f ms (%s events/second)" %
                ((duration * 1000), int(n / duration)))

    # Get last event
    record = None
    elapsed = None
    logger.info("Wait last record ...")
    timeout = time.time() + 300
    while RUN:
        raw = storage.find_one({'_id': rk}, mfields={'bench_timestamp': 1})
        if raw:
            elapsed = time.time() - float(raw['bench_timestamp'])
            storage.get_backend('events').remove({'_id': rk}, safe=True)
            logger.info(" + Done, Delta: %.3f s" % elapsed)
            break

        if time.time() > timeout:
            logger.info(" + Fail, timeout")
            break

        time.sleep(0.001)

    return elapsed
Ejemplo n.º 6
0
def send_events(n, rate=0, burst=10):
	i = 0
	
	logger.info("Send %s events" % n)
	if (rate):
		#1/10 ou 1/15
		logger.info(" + @ %s events/second (%s events/5min)" % (rate, (rate*300)))
		#time_target = time.time() + float(n)/rate
		time_break = ((float(n) / rate) / n) * burst
		#logger.info(" + @ %s events / %s seconds" % (rate, burst))
		#logger.info(" + sleep %s seconds / %s events" % (time_break, burst))

	time_start_burst = time.time()
	start_time = time.time()
	while RUN and i < n:
		event = base_component_event.copy()

		event['component'] += str(i)
		event['bench_timestamp'] = time.time()

		rk = cevent.get_routingkey(event)
		amqp.publish(event, rk, amqp.exchange_name_events)

		if (rate and (i % burst == 0)):
			elapsed = time.time() - time_start_burst
			if (time_break > elapsed):
				time.sleep(time_break - elapsed)
			time_start_burst = time.time()

		i+=1

	duration = time.time() - start_time
	logger.info(" + Done, elapsed: %.3f ms (%s events/second)" % ((duration*1000), int(n/duration)) )

	# Get last event
	record = None
	elapsed = None
	logger.info("Wait last record ...")
	timeout = time.time() + 300
	while RUN:
		raw = storage.find_one({'_id': rk}, mfields={'bench_timestamp': 1})
		if raw:
			elapsed = time.time() - float(raw['bench_timestamp'])
			storage.get_backend('events').remove({'_id': rk}, safe=True)
			logger.info(" + Done, Delta: %.3f s" % elapsed )
			break
		
		if time.time() > timeout:
			logger.info(" + Fail, timeout")
			break

		time.sleep(0.001)

	return elapsed
Ejemplo n.º 7
0
    def set_derogation_state(self, derogation, active):
        dactive = derogation.get('active', False)
        name = derogation.get('crecord_name', None)
        notify = False
        state = 0

        if active:
            if not dactive:
                self.logger.info(
                    "%s (%s) is now active" %
                    (derogation['crecord_name'], derogation['_id']))
                self.storage.update(derogation['_id'], {'active': True})
                notify = True
        else:
            if dactive:
                self.logger.info(
                    "%s (%s) is now inactive" %
                    (derogation['crecord_name'], derogation['_id']))
                self.storage.update(derogation['_id'], {'active': False})
                notify = True

        if notify:
            if active:
                output = "Derogation '%s' is now active" % name
                state = 1
            else:
                output = "Derogation '%s' is now inactive" % name

            tags = derogation.get('tags', None)
            self.logger.debug(" + Tags: '%s' (%s)" % (tags, type(tags)))

            if isinstance(tags, str) or isinstance(tags, unicode):
                tags = [tags]

            if not isinstance(tags, list) or tags == "":
                tags = None

            event = cevent.forger(connector="cengine",
                                  connector_name="engine",
                                  event_type="log",
                                  source_type="component",
                                  component=NAME,
                                  state=state,
                                  output=output,
                                  long_output=derogation.get(
                                      'description', None),
                                  tags=tags)
            rk = cevent.get_routingkey(event)

            self.amqp.publish(event, rk, self.amqp.exchange_name_events)
Ejemplo n.º 8
0
	def set_derogation_state(self, derogation, active):
		dactive = derogation.get('active', False)
		name = derogation.get('crecord_name', None)
		notify = False
		state = 0
		
		if active:
			if not dactive:
				self.logger.info("%s (%s) is now active" % (derogation['crecord_name'], derogation['_id']))
				self.storage.update(derogation['_id'], {'active': True})
				notify = True
		else:
			if dactive:
				self.logger.info("%s (%s) is now inactive" % (derogation['crecord_name'], derogation['_id']))
				self.storage.update(derogation['_id'], {'active': False})
				notify = True
				
		if notify:
			if active:
				output = "Derogation '%s' is now active" % name
				state = 1
			else:
				output = "Derogation '%s' is now inactive" % name
			
			
			tags = derogation.get('tags', None)
			self.logger.debug(" + Tags: '%s' (%s)" % (tags, type(tags)))
			
			if isinstance(tags, str) or isinstance(tags, unicode):
				tags = [ tags ]
			
			if not isinstance(tags, list) or tags == "":
				tags = None
				
			event = cevent.forger(
				connector = "cengine",
				connector_name = "engine",
				event_type = "log",
				source_type="component",
				component=NAME,
				state=state,
				output=output,
				long_output=derogation.get('description', None),
				tags=tags
			)
			rk = cevent.get_routingkey(event)
			
			self.amqp.publish(event, rk, self.amqp.exchange_name_events)
Ejemplo n.º 9
0
    def beat(self):
        loaded_topo = self.topo_load()

        if loaded_topo or self.doBeat or int(
                time.time()) >= (self.lastBeat + self.normal_beat_interval):

            self.lastBeat = int(time.time())

            if loaded_topo:
                self.ids = []

                # Parse topo
                for topo in self.topos:
                    self.logger.debug(
                        "Parse topo '%s': %s Nodes with %s Conns" %
                        (topo['crecord_name'], len(
                            topo['nodes']), len(topo['conns'])))

                    topo['ids'] = self.topo_extractIds(topo)

                    topo['nodesById'] = {}

                    for key in topo['nodes']:
                        node = topo['nodes'][key]

                        _id = node['_id']

                        if not node.get('calcul_state', None):
                            if node.get('event_type', None) == 'operator':
                                node[
                                    'calcul_state'] = self.topo_getOperator_fn(
                                        _id)
                                _id = "%s-%s" % (_id, int(random() * 10000))
                                node['_id'] = _id
                            else:
                                node['calcul_state'] = self.default_Operator_fn

                        topo['nodesById'][_id] = node
                        node['childs'] = []

                    self.logger.debug("Fill node's childs")
                    self.topo_fillChilds(topo)

            # Get all states of all topos
            self.stateById = {}
            records = self.storage.find(
                mfilter={'_id': {
                    '$in': self.ids
                }},
                mfields=['state', 'state_type', 'previous_state'],
                namespace='events')
            for record in records:
                self.stateById[record['_id']] = {
                    'state': record['state'],
                    'state_type': record.get('state_type', 1),
                    'previous_state': record.get('previous_state',
                                                 record['state'])
                }

            # Get state by topo
            for topo in self.topos:
                ## Parse tree for calcul state
                self.logger.debug(" + Calcul state:")
                states_info = self.topo_getState(topo)

                self.logger.debug("'%s': State: %s" %
                                  (topo['crecord_name'], states_info))
                self.storage.update(topo['_id'],
                                    {'state': states_info['state']})

                event = cevent.forger(
                    connector=NAME,
                    connector_name="engine",
                    event_type="topology",
                    source_type="component",
                    component=topo['crecord_name'],
                    state=states_info['state'],
                    state_type=states_info['state_type'],
                    output="",
                    long_output="",
                    #perf_data =			None,
                    #perf_data_array =	[],
                    display_name=topo.get('display_name', None))

                # Extra fields
                event['nestedTree'] = self.topo_dump4Ui(topo)

                rk = cevent.get_routingkey(event)

                self.logger.debug("Publish event on %s" % rk)
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

            self.doBeat = False
Ejemplo n.º 10
0
    def beat(self):
        beat_start = time.time()

        self.clean_consolidations()

        non_loaded_records = self.storage.find(
            {
                '$and': [{
                    'crecord_type': 'consolidation'
                }, {
                    'enable': True
                }, {
                    'loaded': {
                        '$ne': True
                    }
                }]
            },
            namespace="object")

        if len(non_loaded_records) > 0:
            for item in non_loaded_records:
                self.logger.info("New consolidation found '%s', load" %
                                 item.name)
                self.load(item)

        for _id in self.records.keys():
            exists = self.storage.find_one({'_id': _id})
            if not exists:
                self.logger.info("%s deleted, remove from record list" %
                                 self.records[_id]['crecord_name'])
                del (self.records[_id])

        for record in self.records.values():
            consolidation_last_timestamp = self.timestamps[record.get('_id')]

            aggregation_interval = record.get('aggregation_interval',
                                              self.default_interval)
            current_interval = int(time.time()) - consolidation_last_timestamp

            self.logger.debug(
                'current interval: %s , consolidation interval: %s' %
                (current_interval, aggregation_interval))
            if current_interval >= aggregation_interval:
                self.logger.debug('Compute new consolidation for: %s' %
                                  record.get('crecord_name', 'No name found'))

                output_message = None
                mfilter = json.loads(record.get('mfilter'))
                mfilter = {
                    '$and': [mfilter, {
                        'me': {
                            '$nin': internal_metrics
                        }
                    }]
                }
                #self.logger.debug('the mongo filter is: %s' % mfilter)
                metric_list = self.manager.store.find(mfilter=mfilter)
                self.logger.debug('length of matching metric list is: %i' %
                                  metric_list.count())

                aggregation_method = record.get('aggregation_method', False)
                consolidation_methods = record.get('consolidation_method',
                                                   False)

                if not isinstance(consolidation_methods, list):
                    consolidation_methods = [consolidation_methods]

                mType = mUnit = mMin = mMax = None
                values = []

                for index, metric in enumerate(metric_list):
                    if index == 0:
                        #mType = metric.get('t')
                        mMin = metric.get('mi')
                        mMax = metric.get('ma')
                        mUnit = metric.get('u')
                        if 'sum' in consolidation_methods:
                            maxSum = mMax
                    else:
                        if metric.get('mi') < mMin:
                            mMin = metric.get('mi')
                        if metric.get('ma') > mMax:
                            mMax = metric.get('ma')
                        if 'sum' in consolidation_methods:
                            maxSum += metric.get('ma')
                        if metric.get('u') != mUnit:
                            output_message = "warning : too many units"

                    self.logger.debug(' + Get points for: %s , %s , %s, %s' %
                                      (metric.get('_id'), metric.get('co'),
                                       metric.get('re', ''), metric.get('me')))

                    if int(
                            time.time()
                    ) - aggregation_interval <= consolidation_last_timestamp + 60:
                        tstart = consolidation_last_timestamp
                        #self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
                    else:
                        tstart = int(time.time()) - aggregation_interval
                        #self.logger.debug('   +   new tstart: %i' % tstart)

                    self.logger.debug(
                        '   +   from: %s to %s' %
                        (datetime.fromtimestamp(tstart).strftime(
                            '%Y-%m-%d %H:%M:%S'),
                         datetime.fromtimestamp(
                             time.time()).strftime('%Y-%m-%d %H:%M:%S')))

                    list_points = self.manager.get_points(
                        tstart=tstart,
                        tstop=time.time(),
                        _id=metric.get('_id'))
                    self.logger.debug(
                        '   +   Values on interval: %s' %
                        ' '.join([str(value[1]) for value in list_points]))

                    if list_points:
                        fn = self.get_math_function(aggregation_method)
                        if fn:
                            point_value = fn(
                                [value[1] for value in list_points])
                        else:
                            point_value = list_points[len(list_points) - 1][1]
                        values.append(point_value)

                self.logger.debug(
                    '   +   Summary of horizontal aggregation "%s":' %
                    aggregation_method)
                self.logger.debug(values)

                if not consolidation_methods:
                    self.storage.update(record.get('_id'), {
                        'output_engine':
                        "No second aggregation function given"
                    })
                    return

                if len(values) == 0:
                    self.logger.debug('  +  No values')
                    self.storage.update(
                        record.get('_id'), {
                            'output_engine': "No input values",
                            'consolidation_ts': int(time.time())
                        })
                    self.timestamps[record.get('_id')] = int(time.time())
                    return

                list_perf_data = []
                for function_name in consolidation_methods:
                    fn = self.get_math_function(function_name)

                    if not fn:
                        self.logger.debug(
                            'No function given for second aggregation')
                        self.storage.update(
                            record.get('_id'), {
                                'output_engine':
                                "No function given for second aggregation"
                            })
                        return

                    if len(values) == 0:
                        if not output_message:
                            self.storage.update(record.get('_id'),
                                                {'output_engine': "No result"})
                        else:
                            self.storage.update(
                                record.get('_id'), {
                                    'output_engine':
                                    "there are issues : %s warning : No result"
                                    % output_message
                                })

                    value = fn(values)

                    self.logger.debug(' + Result of aggregation for "%s": %f' %
                                      (function_name, value))

                    list_perf_data.append({
                        'metric':
                        function_name,
                        'value':
                        roundSignifiantDigit(value, 3),
                        "unit":
                        mUnit,
                        'max':
                        maxSum if function_name == 'sum' else mMax,
                        'min':
                        mMin,
                        'type':
                        'GAUGE'
                    })

                point_timestamp = int(time.time()) - current_interval / 2

                event = cevent.forger(
                    connector="consolidation",
                    connector_name="engine",
                    event_type="consolidation",
                    source_type="resource",
                    component=record['component'],
                    resource=record['resource'],
                    state=0,
                    timestamp=point_timestamp,
                    state_type=1,
                    output="Consolidation: '%s' successfully computed" %
                    record.get('crecord_name', 'No name'),
                    long_output="",
                    perf_data=None,
                    perf_data_array=list_perf_data,
                    display_name=record['crecord_name'])
                rk = cevent.get_routingkey(event)
                self.counter_event += 1
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

                self.logger.debug('The following event was sent:')
                self.logger.debug(event)

                if not output_message:
                    engine_output = '%s : Computation done. Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})
                else:
                    engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        output_message, str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})

                self.storage.update(record.get('_id'),
                                    {'consolidation_ts': int(time.time())})
                self.timestamps[record.get('_id')] = int(time.time())

        self.counter_worktime += time.time() - beat_start
Ejemplo n.º 11
0
def parse_trap(mib, trap_oid, agent, varBinds):

    notification = mib.get_notification(trap_oid)

    ## Parse trap
    if notification:
        try:
            logger.info(
                "[%s][%s] %s-%s: %s (%s)" %
                (agent, mib.name, notification['SEVERITY'],
                 notification['STATE'], notification['TYPE'], trap_oid))
        except Exception, err:
            logger.error(
                "Impossible to parse notification, check mib conversion ...")
            return None

        arguments = notification['ARGUMENTS']
        summary = notification['SUMMARY']

        nb_string_arg = summary.count('%s')

        if varBinds and nb_string_arg:
            for i in range(nb_string_arg):
                logger.debug(" + Get value %s" % i)
                value = None
                oid, components = varBinds[i]
                component = components[0]
                if component != None:
                    #value = component._componentValues[0]
                    for info in component._componentValues:
                        if info:
                            value = str(info)

                    logger.debug("   + %s" % value)

                if value:
                    summary = summary.replace('%s', value, 1)

        logger.info(" + Summary: %s" % summary)

        component = agent
        resource = mib.name
        source_type = 'resource'
        state = severity_to_state[notification['SEVERITY']]
        output = notification['TYPE']
        long_output = summary

        ## convert trap to event
        event = cevent.forger(connector='snmp',
                              connector_name=DAEMON_NAME,
                              component=component,
                              resource=resource,
                              timestamp=None,
                              source_type=source_type,
                              event_type='trap',
                              state=state,
                              output=output,
                              long_output=long_output)

        #own fields
        event['snmp_severity'] = notification['SEVERITY']
        event['snmp_state'] = notification['STATE']
        event['snmp_oid'] = trap_oid

        logger.debug("Event: %s" % event)
        ## send event on amqp
        key = cevent.get_routingkey(event)
        myamqp.publish(event, key, myamqp.exchange_name_events)
Ejemplo n.º 12
0
	def _beat(self):

		now = int(time.time())

		if self.last_stat + 60 <= now:
			self.logger.debug(" + Send stats")
			self.last_stat = now

			evt_per_sec = 0
			sec_per_evt = 0
			
			if self.counter_event:
				evt_per_sec = float(self.counter_event) / self.beat_interval
				self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)
			
			if self.counter_worktime and self.counter_event:
				sec_per_evt = self.counter_worktime / self.counter_event
				self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)
			
			## Submit event
			if self.send_stats_event and self.counter_event != 0:
				state = 0
				
				if sec_per_evt > self.thd_warn_sec_per_evt:
					state = 1
					
				if sec_per_evt > self.thd_crit_sec_per_evt:
					state = 2
				
				perf_data_array = [
					{'retention': self.perfdata_retention, 'metric': 'cps_evt_per_sec', 'value': round(evt_per_sec,2), 'unit': 'evt' },
					{'retention': self.perfdata_retention, 'metric': 'cps_sec_per_evt', 'value': round(sec_per_evt,5), 'unit': 's',
						'warn': self.thd_warn_sec_per_evt,
						'crit': self.thd_crit_sec_per_evt
					},
				]

				self.logger.debug(" + State: %s" % state)
				
				event = cevent.forger(
					connector = "cengine",
					connector_name = "engine",
					event_type = "check",
					source_type="resource",
					resource=self.amqp_queue,
					state=state,
					state_type=1,
					output="%0.2f evt/sec, %0.5f sec/evt" % (evt_per_sec, sec_per_evt),
					perf_data_array=perf_data_array
				)
				
				rk = cevent.get_routingkey(event)
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)
			

			self.counter_error = 0
			self.counter_event = 0
			self.counter_worktime = 0

		try:
			self.beat()
		except Exception, err:
			self.logger.error("Beat raise exception: %s" % err)
			traceback.print_exc(file=sys.stdout)
Ejemplo n.º 13
0
def send_event(	routing_key=None):
	
	account = get_account()
	
	if not check_group_rights(account,group_managing_access):
		return HTTPError(403, 'Insufficient rights')
				
	connector = None
	connector_name = None
	event_type = None
	source_type = None
	component = None
	resource = None
	state = None
	state_type = None
	perf_data = None
	perf_data_array = None
	output = None
	long_output = None
				
	#--------------------explode routing key----------
	if routing_key :
		logger.debug('The routing key is : %s' % str(routing_key))
		
		routing_key = routing_key.split('.')
		if len(routing_key) > 6 or len(routing_key) < 5:
			logger.error('Bad routing key')
			return HTTPError(400, 'Bad routing key')
			
		connector = routing_key[0]
		connector_name = routing_key[1]
		event_type = routing_key[2]
		source_type = routing_key[3]
		component = routing_key[4]
		if routing_key[5]:
			resource = routing_key[5]
	
	
	#-----------------------get params-------------------
	if not connector:
		connector = request.params.get('connector', default=None)
		if not connector :
			logger.error('No connector argument')
			return HTTPError(400, 'Missing connector argument')
			
	if not connector_name:
		connector_name = request.params.get('connector_name', default=None)
		if not connector_name:
			logger.error('No connector name argument')
			return HTTPError(400, 'Missing connector name argument')
			
	if not event_type:
		event_type = request.params.get('event_type', default=None)
		if not event_type:
			logger.error('No event_type argument')
			return HTTPError(400, 'Missing event type argument')
		
	if not source_type:
		source_type = request.params.get('source_type', default=None)
		if not source_type:
			logger.error('No source_type argument')
			return HTTPError(400, 'Missing source type argument')
	
	if not component:
		component = request.params.get('component', default=None)
		if not component:
			logger.error('No component argument')
			return HTTPError(400, 'Missing component argument')
	
	if not resource:
		resource = request.params.get('resource', default=None)
		if not resource:
			logger.error('No resource argument')
			return HTTPError(400, 'Missing resource argument')
		
	if not state:
		state = request.params.get('state', default=None)
		if not state:
			logger.error('No state argument')
			return HTTPError(400, 'Missing state argument')
		
	if not state_type:
		state_type = request.params.get('state_type', default=1)
		
	if not output:
		output = request.params.get('output', default=None)
		
	if not long_output:
		long_output = request.params.get('long_output', default=None)
		
	if not perf_data:
		perf_data = request.params.get('perf_data', default=None)
		
	if not perf_data_array:
		perf_data_array = request.params.get('perf_data_array', default=None)
		#if type(perf_data_array) == 'str':
			#perf_data_array = json.loads(perf_data_array)
		
	#------------------------------forging event----------------------------------

	event = cevent.forger(
				connector = connector,
				connector_name = connector_name,
				event_type = event_type,
				source_type = source_type,
				component = component,
				resource= resource,
				state = int(state),
				state_type = int(state_type),
				output = output,
				long_output = long_output,
				perf_data = perf_data,
				perf_data_array = json.loads(perf_data_array),
			)
	
	logger.debug(type(perf_data_array))
	logger.debug(perf_data_array)
	logger.debug('The forged event is : ')
	logger.debug(str(event))
	
	#------------------------------AMQP Part--------------------------------------
	
	key = cevent.get_routingkey(event)

	amqp.publish(event, key, amqp.exchange_name_events)
		
	logger.debug('Amqp event published')
	
	return {'total':1,'success':True,'data':{'event':event}}
Ejemplo n.º 14
0
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
Ejemplo n.º 15
0
    def event(self):

        ### Transform Selector to Canopsis Event
        self.logger.debug("To Event:")

        # Get state
        (states, state, state_type) = self.getState()

        # Build output
        total = 0
        for s in states:
            states[s] = int(states[s])
            total += states[s]

        self.logger.debug(" + state: %s" % state)
        self.logger.debug(" + state_type: %s" % state_type)

        perf_data_array = []
        long_output = ""
        output = ""

        self.logger.debug(" + total: %s" % total)

        # Create perfdata array
        output_data = {}
        for i in [0, 1, 2, 3]:
            value = 0
            try:
                value = states[i]
            except:
                pass

            metric = self.sel_metric_name % i
            output_data[metric] = value
            perf_data_array.append({
                "metric": metric,
                "value": value,
                "max": total
            })

        perf_data_array.append({
            "metric": self.sel_metric_prefix + "total",
            "value": total
        })

        output_data['total'] = total

        # Fill Output template
        self.logger.debug(" + output TPL: %s" % self.output_tpl)
        output = self.output_tpl
        if output_data:
            for key in output_data:
                output = output.replace("{%s}" % key, str(output_data[key]))

        display_name = self.data.get("display_name", None)

        # Debug
        self.logger.debug(" + Display Name: %s" % display_name)
        self.logger.debug(" + output: %s" % output)
        self.logger.debug(" + long_output: %s" % long_output)
        self.logger.debug(" + perf_data_array: %s" % perf_data_array)

        # Build Event
        event = cevent.forger(
            connector="selector",
            connector_name="engine",
            event_type="selector",
            source_type="component",
            component=self.name,
            #resource=None,
            state=state,
            state_type=state_type,
            output=output,
            long_output=long_output,
            perf_data=None,
            perf_data_array=perf_data_array,
            display_name=display_name)

        # Extra field
        event["selector_id"] = str(self._id)

        # Build RK
        rk = cevent.get_routingkey(event)

        # Save RK
        if not self.rk:
            self.logger.debug("Set RK to '%s'" % rk)
            self.storage.update(self._id, {'rk': rk})
            self.rk = rk

        # Cache event
        self.last_event = event

        return (rk, event)
Ejemplo n.º 16
0
def parse_trap(mib, trap_oid, agent, varBinds):

	notification = mib.get_notification(trap_oid)

	## Parse trap
	if notification:
		try:
			logger.info("[%s][%s] %s-%s: %s (%s)" % (agent, mib.name, notification['SEVERITY'], notification['STATE'], notification['TYPE'], trap_oid))
		except Exception, err:
			logger.error("Impossible to parse notification, check mib conversion ...")
			return None
		
		arguments = notification['ARGUMENTS']
		summary	  = notification['SUMMARY']
		
		nb_string_arg = summary.count('%s')
		
		if varBinds and nb_string_arg:
			for i in range(nb_string_arg):
				logger.debug(" + Get value %s" % i)
				value = None
				oid, components = varBinds[i]
				component = components[0]
				if component != None:
					#value = component._componentValues[0]
					for info in component._componentValues:
						if info:
							value = str(info)

					logger.debug("   + %s" % value)

				if value:
					summary = summary.replace('%s', value, 1)
							

		logger.info(" + Summary: %s" % summary)

		component = agent
		resource = mib.name
		source_type = 'resource'
		state = severity_to_state[notification['SEVERITY']]
		output = notification['TYPE']
		long_output = summary

		## convert trap to event
		event = cevent.forger(
				connector='snmp',
				connector_name=DAEMON_NAME,
				component=component,
				resource=resource,
				timestamp=None,
				source_type=source_type,
				event_type='trap',
				state=state,
				output=output,
				long_output=long_output)

		#own fields
		event['snmp_severity'] = notification['SEVERITY']
		event['snmp_state'] = notification['STATE']
		event['snmp_oid'] = trap_oid

		logger.debug("Event: %s" % event)
		## send event on amqp
		key = cevent.get_routingkey(event)						
		myamqp.publish(event, key, myamqp.exchange_name_events)
Ejemplo n.º 17
0
def send_events(n, rate=0, burst=10):
    i = 0

    logger.info("Send %s events" % n)
    if rate:
        # 1/10 ou 1/15
        logger.info(" + @ %s events/second (%s events/5min)" % (rate, (rate * 300)))
        # time_target = time.time() + float(n)/rate
        time_break = ((float(n) / rate) / n) * burst
        # logger.info(" + @ %s events / %s seconds" % (rate, burst))
        # logger.info(" + sleep %s seconds / %s events" % (time_break, burst))

    time_start_burst = time.time()
    start_time = time.time()
    while RUN and i < n:
        event = base_component_event.copy()

        # event['component'] += str(i)
        event["component"] += "bench"

        if i % 300 == 0:
            event["state"] = 2

        event["bench_timestamp"] = time.time()
        benchId = i
        event["benchId"] = benchId

        rk = cevent.get_routingkey(event)
        amqp.publish(event, rk, amqp.exchange_name_events)

        if rate and (i % burst == 0):
            elapsed = time.time() - time_start_burst
            if time_break > elapsed:
                time.sleep(time_break - elapsed)
            time_start_burst = time.time()

        i += 1

    duration = time.time() - start_time
    logger.info(" + Done, elapsed: %.3f ms (%s events/second)" % ((duration * 1000), int(n / duration)))

    # Get last event
    record = None
    elapsed = None
    logger.info("Wait last record ('%s' %s) ..." % (rk, benchId))
    timeout = time.time() + 300
    while RUN:
        raw = storage.find_one({"_id": rk, "benchId": benchId}, mfields={"bench_timestamp": 1})
        if raw:
            elapsed = time.time() - float(raw["bench_timestamp"])
            storage.get_backend("events").remove({"_id": rk}, safe=True)
            logger.info(" + Done, Delta: %.3f s" % elapsed)
            total = elapsed + duration - 1
            logger.info(" + Est: %.0f Events/sec" % (n / total))
            break

        if time.time() > timeout:
            logger.info(" + Fail, timeout")
            break

        time.sleep(0.001)

    return elapsed
Ejemplo n.º 18
0
	def event(self):
		### Transform Selector to Canopsis Event
		self.logger.debug("To Event:")
		
		# Get state
		(states, state, state_type) = self.getState()
		
		# Build output
		total = 0		
		for s in states:
			states[s] = int(states[s])
			total += states[s]
		
		self.logger.debug(" + state: %s" % state)
		self.logger.debug(" + state_type: %s" % state_type)
		
		perf_data_array = []
		long_output = ""
		output = ""
			
		self.logger.debug(" + total: %s" % total)
		
		# Create perfdata array
		output_data = {}
		for i in [0, 1, 2, 3]:
			value = 0
			try:
				value = states[i]
			except:
				pass
			
			metric =  self.sel_metric_name % i
			output_data[metric] = value
			perf_data_array.append({"metric": metric, "value": value, "max": total})
		
		perf_data_array.append({"metric": self.sel_metric_prefix + "total", "value": total})
		
		# Counte components and resources
		mfilter = self.makeMfilter()
		if mfilter:
		
			sel_nb_component = self.storage.count(mfilter={'$and': [ mfilter, {'source_type': 'component'}]}, namespace=self.namespace)
			sel_nb_resource = self.storage.count(mfilter={'$and': [ mfilter, {'source_type': 'resource'}]}, namespace=self.namespace)		
			
			if sel_nb_component + sel_nb_resource == total:
				perf_data_array.append({"metric": self.sel_metric_prefix + "component", "value": sel_nb_component, 'max': total})
				perf_data_array.append({"metric": self.sel_metric_prefix + "resource", "value": sel_nb_resource, 'max': total})
			else:
				self.logger.error("Invalid count: component: %s, resource: %s, total: %s" % (sel_nb_component, sel_nb_resource, total))
		
		output_data['total'] = total
	
		# Fill Output template
		self.logger.debug(" + output TPL: %s" % self.output_tpl)
		output = self.output_tpl
		if output_data:
			for key in output_data:
				output = output.replace("{%s}" % key, str(output_data[key]))
		
		display_name = self.data.get("display_name", None)
		
		# Debug
		self.logger.debug(" + Display Name: %s" % display_name)
		self.logger.debug(" + output: %s" % output)
		self.logger.debug(" + long_output: %s" % long_output)
		self.logger.debug(" + perf_data_array: %s" % perf_data_array)
		
		# Build Event
		event = cevent.forger(
			connector = "selector",
			connector_name = "engine",
			event_type = "selector",
			source_type="component",
			component=self.name,
			#resource=None,	
			state=state,
			state_type=state_type,
			output=output,
			long_output=long_output,
			perf_data=None,
			perf_data_array=perf_data_array,
			display_name=display_name
		)
				
		# Extra field
		event["selector_id"] = self._id
		
		# Build RK
		rk = cevent.get_routingkey(event)
		
		# Save RK
		if not self.rk:
			self.logger.debug("Set RK to '%s'" % rk)
			self.storage.update(self._id, {'rk': rk})
			self.rk = rk
				
		# Cache event
		self.last_event = event
				
		return (rk, event)
Ejemplo n.º 19
0
	def test_01(self):
		event = cevent.forger(connector='unittest', connector_name='test1', event_type='log')
		rk = cevent.get_routingkey(event)

		print rk
		print event
Ejemplo n.º 20
0
	def beat(self):
		loaded_topo = self.topo_load()
		
		if loaded_topo or self.doBeat or int(time.time()) >= (self.lastBeat + self.normal_beat_interval):
			
			self.lastBeat = int(time.time())
			
			if loaded_topo:
				self.ids = []
				
				# Parse topo
				for topo in self.topos:				
					self.logger.debug("Parse topo '%s': %s Nodes with %s Conns" % (topo['crecord_name'], len(topo['nodes']), len(topo['conns'])))
					
					topo['ids'] = self.topo_extractIds(topo)
			
					topo['nodesById'] = {}
					
					for key in topo['nodes']:
						node = topo['nodes'][key]
						
						_id = node['_id']
						
						if not node.get('calcul_state', None):
							if node.get('event_type', None) == 'operator':
								node['calcul_state'] = self.topo_getOperator_fn(_id)
								_id = "%s-%s" % (_id, int(random() * 10000))
								node['_id'] = _id
							else:
								node['calcul_state'] = self.default_Operator_fn
							
						topo['nodesById'][_id] = node
						node['childs'] = []
								
					self.logger.debug("Fill node's childs")
					self.topo_fillChilds(topo)
				
			
			# Get all states of all topos
			self.stateById = {}
			records = self.storage.find(mfilter={'_id': {'$in': self.ids}}, mfields=['state', 'state_type', 'previous_state'], namespace='events')
			for record in records:
				self.stateById[record['_id']] = {
					'state': record['state'],
					'state_type': record.get('state_type', 1),
					'previous_state': record.get('previous_state', record['state'])
				}
			
			# Get state by topo
			for topo in self.topos:
				## Parse tree for calcul state
				self.logger.debug(" + Calcul state:")
				states_info = self.topo_getState(topo)

				self.logger.debug("'%s': State: %s" % (topo['crecord_name'], states_info))
				self.storage.update(topo['_id'], {'state': states_info['state']})
				
				event = cevent.forger(
					connector =			NAME,
					connector_name =	"engine",
					event_type =		"topology",
					source_type =		"component",
					component =			topo['crecord_name'],
					state =				states_info['state'],
					state_type =		states_info['state_type'],
					output =			"",
					long_output =		"",
					#perf_data =			None,
					#perf_data_array =	[],
					display_name =		topo.get('display_name', None)
				)
				
				# Extra fields			
				event['nestedTree'] = self.topo_dump4Ui(topo)
		
				rk = cevent.get_routingkey(event)
				
				self.logger.debug("Publish event on %s" % rk)
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)
			
			self.doBeat = False
Ejemplo n.º 21
0
class engine(cengine):
    def __init__(self, *args, **kargs):
        cengine.__init__(self, name=NAME, *args, **kargs)

    def create_amqp_queue(self):
        self.amqp.add_queue(self.amqp_queue, ['collectd'],
                            self.on_collectd_event,
                            "amq.topic",
                            auto_delete=False)

    def on_collectd_event(self, body, msg):
        start = time.time()
        error = False

        collectd_info = body.split(' ')

        if len(collectd_info) > 0:
            self.logger.debug(body)
            action = collectd_info[0]
            self.logger.debug(" + Action: %s" % action)

            if len(collectd_info) == 4 and action == "PUTVAL":
                cnode = collectd_info[1].split("/")
                component = cnode[0]
                resource = cnode[1]
                metric = cnode[2]
                options = collectd_info[2]
                values = collectd_info[3]

                self.logger.debug(" + Options: %s" % options)
                self.logger.debug(" + Component: %s" % component)
                self.logger.debug(" + Resource: %s" % resource)
                self.logger.debug(" + Metric: %s" % metric)
                self.logger.debug(" + Raw Values: %s" % values)

                values = values.split(":")

                perf_data_array = []

                ctype = None
                try:
                    ## Know metric
                    ctype = types[metric]
                except:
                    try:
                        ctype = types[metric.split('-')[0]]
                        metric = metric.split('-')[1]
                    except Exception, err:
                        self.logger.error("Invalid format '%s' (%s)" %
                                          (body, err))
                        return None

                try:
                    timestamp = int(Str2Number(values[0]))
                    values = values[1:]
                    self.logger.debug("   + Timestamp: %s" % timestamp)
                    self.logger.debug("   + Values: %s" % values)

                except Exception, err:
                    self.logger.error(
                        "Impossible to get timestamp or values (%s)" % err)
                    return None

                self.logger.debug(" + metric: %s" % metric)
                self.logger.debug(" + ctype: %s" % ctype)
                if ctype:
                    try:
                        i = 0
                        for value in values:
                            name = ctype[i]['name']
                            unit = ctype[i]['unit']
                            vmin = ctype[i]['min']
                            vmax = ctype[i]['max']

                            if vmin == 'U':
                                vmin = None

                            if vmax == 'U':
                                vmax = None

                            if name == "value":
                                name = metric

                            if metric != name:
                                name = "%s-%s" % (metric, name)

                            data_type = ctype[i]['type']

                            value = Str2Number(value)

                            self.logger.debug("     + %s" % name)
                            self.logger.debug("       -> %s (%s)" %
                                              (value, data_type))
                            i += 1

                            perf_data_array.append({
                                'metric': name,
                                'value': value,
                                'type': data_type,
                                'unit': unit,
                                'min': vmin,
                                'max': vmax
                            })

                    except Exception, err:
                        self.logger.error(
                            "Impossible to parse values '%s' (%s)" %
                            (values, err))

                if perf_data_array:
                    self.logger.debug(' + perf_data_array: %s',
                                      perf_data_array)

                    event = cevent.forger(connector='collectd',
                                          connector_name='collectd2event',
                                          component=component,
                                          resource=resource,
                                          timestamp=None,
                                          source_type='resource',
                                          event_type='check',
                                          state=0,
                                          perf_data_array=perf_data_array)

                    rk = cevent.get_routingkey(event)

                    self.logger.debug("Send Event: %s" % event)

                    ## send event on amqp
                    self.amqp.publish(event, rk,
                                      self.amqp.exchange_name_events)
Ejemplo n.º 22
0
	def event(self):
		### Transform Selector to Canopsis Event
		self.logger.debug("To Event:")
		
		# Get state
		(states, state, state_type) = self.getState()
		
		# Build output
		total = 0		
		for s in states:
			states[s] = int(states[s])
			total += states[s]
		
		self.logger.debug(" + state: %s" % state)
		self.logger.debug(" + state_type: %s" % state_type)
		
		perf_data_array = []
		long_output = ""
		output = ""
			
		self.logger.debug(" + total: %s" % total)
		
		# Create perfdata array
		output_data = {}
		for i in [0, 1, 2, 3]:
			value = 0
			try:
				value = states[i]
			except:
				pass
			
			metric =  self.sel_metric_name % i
			output_data[metric] = value
			perf_data_array.append({"metric": metric, "value": value, "max": total})
			
		output_data['total'] = total
	
		# Fill Output template
		self.logger.debug(" + output TPL: %s" % self.output_tpl)
		output = self.output_tpl
		if output_data:
			for key in output_data:
				output = output.replace("{%s}" % key, str(output_data[key]))
		
		# Debug
		self.logger.debug(" + output: %s" % output)
		self.logger.debug(" + long_output: %s" % long_output)
		self.logger.debug(" + perf_data_array: %s" % perf_data_array)
		
		# Build Event
		event = cevent.forger(
			connector = "selector",
			connector_name = "engine",
			event_type = "selector",
			source_type="component",
			component=self.name,
			#resource=None,	
			state=state,
			state_type=state_type,
			output=output,
			long_output=long_output,
			perf_data=None,
			perf_data_array=perf_data_array
		)
				
		# Extra field
		event["selector_id"] = self._id
		
		# Build RK
		rk = cevent.get_routingkey(event)
		
		# Save RK
		if not self.rk:
			self.logger.debug("Set RK to '%s'" % rk)
			self.storage.update(self._id, {'rk': rk})
			self.rk = rk
				
		# Cache event
		self.last_event = event
				
		return (rk, event)
Ejemplo n.º 23
0
    def _beat(self):
        self.logger.debug("Beat: %s event(s), %s error" %
                          (self.counter_event, self.counter_error))
        now = int(time.time())

        if self.last_stat + 60 <= now:
            self.logger.debug(" + Send stats")
            self.last_stat = now

            evt_per_sec = 0
            sec_per_evt = 0

            if self.counter_event:
                evt_per_sec = float(self.counter_event) / self.beat_interval
                self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)

            if self.counter_worktime and self.counter_event:
                sec_per_evt = self.counter_worktime / self.counter_event
                self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)

            ## Submit event
            if self.send_stats_event and self.counter_event != 0:
                state = 0

                if sec_per_evt > self.thd_warn_sec_per_evt:
                    state = 1

                if sec_per_evt > self.thd_crit_sec_per_evt:
                    state = 2

                perf_data_array = [
                    {
                        'retention': self.perfdata_retention,
                        'metric': 'cps_evt_per_sec',
                        'value': round(evt_per_sec, 2),
                        'unit': 'evt'
                    },
                    {
                        'retention': self.perfdata_retention,
                        'metric': 'cps_sec_per_evt',
                        'value': round(sec_per_evt, 5),
                        'unit': 's',
                        'warn': self.thd_warn_sec_per_evt,
                        'crit': self.thd_crit_sec_per_evt
                    },
                ]

                self.logger.debug(" + State: %s" % state)

                event = cevent.forger(connector="cengine",
                                      connector_name="engine",
                                      event_type="check",
                                      source_type="resource",
                                      resource=self.amqp_queue,
                                      state=state,
                                      state_type=1,
                                      output="%0.2f evt/sec, %0.5f sec/evt" %
                                      (evt_per_sec, sec_per_evt),
                                      perf_data_array=perf_data_array)

                rk = cevent.get_routingkey(event)
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

            self.counter_error = 0
            self.counter_event = 0
            self.counter_worktime = 0

        try:
            self.beat()
        except Exception, err:
            self.logger.error("Beat raise exception: %s" % err)
            traceback.print_exc(file=sys.stdout)
Ejemplo n.º 24
0
			component=component,
			resource=resource,
			timestamp=timestamp,
			source_type=source_type,
			event_type='log',
			state=state,
			output=output,
			long_output=long_output)

	event['level'] = gelf['level']
	event['facility'] = gelf['facility']

	logger.debug('Event: %s' % event)
	
	
	key = cevent.get_routingkey(event)						
	myamqp.publish(event, key, myamqp.exchange_name_events)

########################################################
#
#   Main
#
########################################################

def main():

	handler.run()
	
	# global
	global myamqp
Ejemplo n.º 25
0
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
Ejemplo n.º 26
0
logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s %(name)s %(levelname)s %(message)s',
)

event = cevent.forger(connector='canopsis',
                      connector_name='unittest',
                      event_type='check',
                      source_type="component",
                      component="test1",
                      state=0,
                      output="Output_1",
                      perf_data="mymetric=1s;10;20;0;30",
                      tags=['check', 'component', 'test1', 'unittest'])
rk = cevent.get_routingkey(event)

myamqp = None
storage = None
event_alert = None
perfstore = None


def on_alert(body, message):
    print "Alert: %s" % body
    mrk = message.delivery_info['routing_key']
    if mrk == rk:
        global event_alert
        event_alert = body

Ejemplo n.º 27
0
	def beat(self):
		beat_start = time.time()

		self.clean_consolidations()

		non_loaded_records = self.storage.find({ '$and' : [{ 'crecord_type': 'consolidation' },{'enable': True}, {'loaded': { '$ne' : True} } ] }, namespace="object" )

		if len(non_loaded_records) > 0  :
			for item in non_loaded_records :
				self.logger.info("New consolidation found '%s', load" % item.name)
				self.load(item)

		for _id in self.records.keys() :
			exists = self.storage.find_one({ '_id': _id } )
			if not exists:
				self.logger.info("%s deleted, remove from record list" % self.records[_id]['crecord_name'])
				del(self.records[_id])

		for record in self.records.values():
			consolidation_last_timestamp = self.timestamps[record.get('_id')]

			aggregation_interval = record.get('aggregation_interval', self.default_interval)
			current_interval = int(time.time()) - consolidation_last_timestamp

			self.logger.debug('current interval: %s , consolidation interval: %s' % (current_interval,aggregation_interval))
			if  current_interval >= aggregation_interval:
				self.logger.debug('Compute new consolidation for: %s' % record.get('crecord_name','No name found'))

				output_message = None
				mfilter = json.loads(record.get('mfilter'))
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}
				#self.logger.debug('the mongo filter is: %s' % mfilter)
				metric_list = self.manager.store.find(mfilter=mfilter)
				self.logger.debug('length of matching metric list is: %i' % metric_list.count())
				
				aggregation_method = record.get('aggregation_method', False)
				consolidation_methods = record.get('consolidation_method', False)

				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ] 

				mType = mUnit = mMin = mMax = None
				values = []

				for index,metric in enumerate(metric_list) :
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							output_message = "warning : too many units"

					self.logger.debug(' + Get points for: %s , %s , %s, %s' % (metric.get('_id'),metric.get('co'),metric.get('re',''),metric.get('me')))

					if int(time.time()) - aggregation_interval <= consolidation_last_timestamp + 60:
						tstart = consolidation_last_timestamp
						#self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
					else:
						tstart = int(time.time()) - aggregation_interval
						#self.logger.debug('   +   new tstart: %i' % tstart)

					self.logger.debug(
										'   +   from: %s to %s' % 
										(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
										datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
									)

					list_points = self.manager.get_points(tstart=tstart,tstop=time.time(), _id=metric.get('_id'))
					self.logger.debug('   +   Values on interval: %s' % ' '.join([str(value[1]) for value in list_points]))

					if list_points:
						fn = self.get_math_function(aggregation_method)
						if fn:
							point_value = fn([value[1] for value in list_points])
						else:
							point_value = list_points[len(list_points)-1][1]
						values.append(point_value)

				self.logger.debug('   +   Summary of horizontal aggregation "%s":' % aggregation_method)
				self.logger.debug(values)

				if not consolidation_methods:
					self.storage.update(record.get('_id'), {'output_engine': "No second aggregation function given"  } )
					return

				if len(values) == 0 :
					self.logger.debug('  +  No values')
					self.storage.update(record.get('_id'), {
															'output_engine': "No input values",
															'consolidation_ts':int(time.time())
															})
					self.timestamps[record.get('_id')] = int(time.time())
					return

				list_perf_data = []
				for function_name in consolidation_methods :
					fn = self.get_math_function(function_name)

					if not fn:
						self.logger.debug('No function given for second aggregation')
						self.storage.update(record.get('_id'), {'output_engine': "No function given for second aggregation"})
						return

					if len(values) == 0 :
						if not output_message:
							self.storage.update(record.get('_id'), {'output_engine': "No result"  } )
						else:
							self.storage.update(record.get('_id'), {'output_engine': "there are issues : %s warning : No result" % output_message } )

					value = fn(values)

					self.logger.debug(' + Result of aggregation for "%s": %f' % (function_name,value))

					list_perf_data.append({ 
											'metric' : function_name, 
											'value' : roundSignifiantDigit(value,3), 
											"unit": mUnit, 
											'max': maxSum if function_name == 'sum' else mMax, 
											'min': mMin, 
											'type': 'GAUGE' } ) 

				point_timestamp = int(time.time()) - current_interval/2

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=point_timestamp,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % record.get('crecord_name','No name'),
					long_output="",
					perf_data=None,
					perf_data_array=list_perf_data,
					display_name=record['crecord_name']
				)	
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.logger.debug('The following event was sent:')
				self.logger.debug(event)

				if not output_message:
					engine_output = '%s : Computation done. Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),str(aggregation_interval))
					self.storage.update(record.get('_id'),{'output_engine':engine_output} )
				else:
					engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),output_message,str(aggregation_interval))
					self.storage.update(record.get('_id'), {'output_engine': engine_output} )

				self.storage.update(record.get('_id'), {'consolidation_ts':int(time.time())})
				self.timestamps[record.get('_id')] = int(time.time())
		
		self.counter_worktime += time.time() - beat_start