コード例 #1
0
ファイル: event_filter.py プロジェクト: jbbqqf/canopsis
    def send_stat_event(self):
        """ Send AMQP Event for drop and pass metrics """

        event = cevent.forger(connector="cengine",
                              connector_name="engine",
                              event_type="check",
                              source_type="resource",
                              resource=self.amqp_queue + '_data',
                              state=0,
                              state_type=1,
                              output="%s event dropped since %s" %
                              (self.drop_event_count, self.beat_interval),
                              perf_data_array=[{
                                  'metric': 'pass_event',
                                  'value': self.pass_event_count,
                                  'type': 'GAUGE'
                              }, {
                                  'metric': 'drop_event',
                                  'value': self.drop_event_count,
                                  'type': 'GAUGE'
                              }])

        self.logger.debug("%s event dropped since %s" %
                          (self.drop_event_count, self.beat_interval))
        self.logger.debug("%s event passed since %s" %
                          (self.pass_event_count, self.beat_interval))

        rk = cevent.get_routingkey(event)
        self.amqp.publish(event, rk, self.amqp.exchange_name_events)

        self.drop_event_count = 0
        self.pass_event_count = 0
コード例 #2
0
ファイル: event_filter.py プロジェクト: EzanLTD/canopsis
	def send_stat_event(self):
		""" Send AMQP Event for drop and pass metrics """

		event = cevent.forger(
			connector = "cengine",
			connector_name = "engine",
			event_type = "check",
			source_type="resource",
			resource=self.amqp_queue + '_data',
			state=0,
			state_type=1,
			output="%s event dropped since %s" % (self.drop_event_count, self.beat_interval),
			perf_data_array=[
								{'metric': 'pass_event' , 'value': self.pass_event_count, 'type': 'GAUGE' },
								{'metric': 'drop_event' , 'value': self.drop_event_count, 'type': 'GAUGE' }
							]
		)

		self.logger.debug("%s event dropped since %s" % (self.drop_event_count, self.beat_interval))
		self.logger.debug("%s event passed since %s" % (self.pass_event_count, self.beat_interval))


		rk = cevent.get_routingkey(event)
		self.amqp.publish(event, rk, self.amqp.exchange_name_events)

		self.drop_event_count = 0				
		self.pass_event_count = 0
コード例 #3
0
ファイル: cengine.py プロジェクト: dmichau/canopsis
	def _beat(self):
		self.logger.debug("Beat: %s event(s), %s error" % (self.counter_event, self.counter_error))
		
		if not self.input_queue.empty():
			size = self.input_queue.qsize()
			if size > 110:
				self.logger.info("%s event(s) in internal queue" % size)
			
		evt_per_sec = 0
		sec_per_evt = 0
		
		if self.counter_event:
			evt_per_sec = float(self.counter_event) / self.beat_interval
			self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)
		
		if self.counter_worktime and self.counter_event:
			sec_per_evt = self.counter_worktime / self.counter_event
			self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)
			
		self.counter_error = 0
		self.counter_event = 0
		self.counter_worktime = 0
		
		## Submit event
		if self.send_stats_event:
			state = 0
			
			if sec_per_evt > self.thd_warn_sec_per_evt:
				state = 1
				
			if sec_per_evt > self.thd_crit_sec_per_evt:
				state = 2
			
			perf_data_array = [
				{'retention': self.perfdata_retention, 'metric': 'cps_queue_size', 'value': self.input_queue.qsize(), 'unit': 'evt' },
				{'retention': self.perfdata_retention, 'metric': 'cps_evt_per_sec', 'value': round(evt_per_sec,2), 'unit': 'evt/sec' },
				{'retention': self.perfdata_retention, 'metric': 'cps_sec_per_evt', 'value': round(sec_per_evt,5), 'unit': 'sec/evt', 'warn': 0.5, 'crit': 0.8 },
			]
			
			event = cevent.forger(
				connector = "cengine",
				connector_name = "engine",
				event_type = "check",
				source_type="resource",
				resource=self.amqp_queue,
				state=state,
				state_type=1,
				output="%0.2f evt/sec, %0.5f sec/evt" % (evt_per_sec, sec_per_evt),
				perf_data_array=perf_data_array
			)
			
			rk = cevent.get_routingkey(event)
			self.amqp.publish(event, rk, self.amqp.exchange_name_events)
		
		try:
			self.beat()
		except Exception, err:
			self.logger.error("Beat raise exception: %s" % err)
			traceback.print_exc(file=sys.stdout)
コード例 #4
0
ファイル: cevent-Myunittest.py プロジェクト: moas/canopsis
    def test_01(self):
        event = cevent.forger(connector='unittest',
                              connector_name='test1',
                              event_type='log')
        rk = cevent.get_routingkey(event)

        print rk
        print event
コード例 #5
0
ファイル: derogation.py プロジェクト: moas/canopsis
    def set_derogation_state(self, derogation, active):
        dactive = derogation.get('active', False)
        name = derogation.get('crecord_name', None)
        notify = False
        state = 0

        if active:
            if not dactive:
                self.logger.info(
                    "%s (%s) is now active" %
                    (derogation['crecord_name'], derogation['_id']))
                self.storage.update(derogation['_id'], {'active': True})
                notify = True
        else:
            if dactive:
                self.logger.info(
                    "%s (%s) is now inactive" %
                    (derogation['crecord_name'], derogation['_id']))
                self.storage.update(derogation['_id'], {'active': False})
                notify = True

        if notify:
            if active:
                output = "Derogation '%s' is now active" % name
                state = 1
            else:
                output = "Derogation '%s' is now inactive" % name

            tags = derogation.get('tags', None)
            self.logger.debug(" + Tags: '%s' (%s)" % (tags, type(tags)))

            if isinstance(tags, str) or isinstance(tags, unicode):
                tags = [tags]

            if not isinstance(tags, list) or tags == "":
                tags = None

            event = cevent.forger(connector="cengine",
                                  connector_name="engine",
                                  event_type="log",
                                  source_type="component",
                                  component=NAME,
                                  state=state,
                                  output=output,
                                  long_output=derogation.get(
                                      'description', None),
                                  tags=tags)
            rk = cevent.get_routingkey(event)

            self.amqp.publish(event, rk, self.amqp.exchange_name_events)
コード例 #6
0
ファイル: derogation.py プロジェクト: EzanLTD/canopsis
	def set_derogation_state(self, derogation, active):
		dactive = derogation.get('active', False)
		name = derogation.get('crecord_name', None)
		notify = False
		state = 0
		
		if active:
			if not dactive:
				self.logger.info("%s (%s) is now active" % (derogation['crecord_name'], derogation['_id']))
				self.storage.update(derogation['_id'], {'active': True})
				notify = True
		else:
			if dactive:
				self.logger.info("%s (%s) is now inactive" % (derogation['crecord_name'], derogation['_id']))
				self.storage.update(derogation['_id'], {'active': False})
				notify = True
				
		if notify:
			if active:
				output = "Derogation '%s' is now active" % name
				state = 1
			else:
				output = "Derogation '%s' is now inactive" % name
			
			
			tags = derogation.get('tags', None)
			self.logger.debug(" + Tags: '%s' (%s)" % (tags, type(tags)))
			
			if isinstance(tags, str) or isinstance(tags, unicode):
				tags = [ tags ]
			
			if not isinstance(tags, list) or tags == "":
				tags = None
				
			event = cevent.forger(
				connector = "cengine",
				connector_name = "engine",
				event_type = "log",
				source_type="component",
				component=NAME,
				state=state,
				output=output,
				long_output=derogation.get('description', None),
				tags=tags
			)
			rk = cevent.get_routingkey(event)
			
			self.amqp.publish(event, rk, self.amqp.exchange_name_events)
コード例 #7
0
ファイル: consolidation.py プロジェクト: linkdd/canopsis
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
コード例 #8
0
ファイル: gelf2amqp.py プロジェクト: EzanLTD/canopsis
		timestamp = None

	output   = message['output']
	resource = message['resource']

	#component = str(gelf['host'])
	component= message['component']

	source_type='resource'

	event = cevent.forger(
			connector='gelf',
			connector_name=DAEMON_NAME,
			component=component,
			resource=resource,
			timestamp=timestamp,
			source_type=source_type,
			event_type='log',
			state=state,
			output=output,
			long_output=long_output)

	event['level'] = gelf['level']
	event['facility'] = gelf['facility']

	logger.debug('Event: %s' % event)
	
	
	key = cevent.get_routingkey(event)						
	myamqp.publish(event, key, myamqp.exchange_name_events)
コード例 #9
0
ファイル: aps_to_celery.py プロジェクト: dmichau/canopsis
			if success == True:
				status=0
				#result['aps_output'] = 'APS task success'
				#task_output = result
				task_output = ('APS : Task success - Celery : %s - Duration : %is' % (result['celery_output'],execution_time))
			else:
				status=1
				#result['aps_output'] = aps_error
				#task_output = result
				task_output = ('APS : %s - Celery : %s - Duration : %is' % (aps_error,result['celery_output'],execution_time))
				
			event = cevent.forger(
				connector='celery',
				connector_name='task_log',
				event_type='log',
				source_type='resource',
				resource=('task.%s.%s.%s' %  (celery_task_name,account.user,task_name)), 
				output=task_output,
				state=status
				)
			
			#logger.info('Send Event: %s' % event)
			key = cevent.get_routingkey(event)

			amqp.publish(event, key, amqp.exchange_name_events)
						
			logger.info('Amqp event published')

			#--------------------return result-------------------
			return result
			
コード例 #10
0
ファイル: sla.py プロジェクト: nsipieter/canopsis
        if output_data:
            for key in output_data:
                output = output.replace("{%s}" % key, str(output_data[key]))

        self.logger.debug("     + Output:    %s" % output)
        self.logger.debug("     + Perfdata:  %s" % perf_data_array)

        # Send AMQP Event
        event = cevent.forger(
            connector="sla",
            connector_name="engine",
            event_type="sla",
            source_type="resource",
            component=config["name"],
            resource="sla",
            state=state,
            state_type=1,
            output=output,
            long_output="",
            perf_data=None,
            perf_data_array=perf_data_array,
            display_name=config.get("display_name", None),
        )

        # Extra fields
        event["selector_id"] = config["_id"]
        event["selector_rk"] = config["rk"]

        rk = self.get_rk(config["name"])

        self.logger.debug("Publish event on %s" % rk)
コード例 #11
0
                task_output = (
                    'APS : Task success - Celery : %s - Duration : %is' %
                    (result['celery_output'], execution_time))
            else:
                status = 1
                #result['aps_output'] = aps_error
                #task_output = result
                task_output = (
                    'APS : %s - Celery : %s - Duration : %is' %
                    (aps_error, result['celery_output'], execution_time))

            event = cevent.forger(
                connector='celery',
                connector_name='task_log',
                event_type='log',
                source_type='resource',
                resource=('task.%s.%s.%s' %
                          (celery_task_name, account.user, task_name)),
                output=task_output,
                state=status)

            #logger.info('Send Event: %s' % event)
            key = cevent.get_routingkey(event)

            amqp.publish(event, key, amqp.exchange_name_events)

            logger.info('Amqp event published')

            # Stop AMQP
            amqp.stop()
            amqp.join()
コード例 #12
0
ファイル: gelf2amqp.py プロジェクト: moas/canopsis
    except:
        timestamp = None

    output = message['output']
    resource = message['resource']

    #component = str(gelf['host'])
    component = message['component']

    source_type = 'resource'

    event = cevent.forger(connector='gelf',
                          connector_name=DAEMON_NAME,
                          component=component,
                          resource=resource,
                          timestamp=timestamp,
                          source_type=source_type,
                          event_type='log',
                          state=state,
                          output=output,
                          long_output=long_output)

    event['level'] = gelf['level']
    event['facility'] = gelf['facility']

    logger.debug('Event: %s' % event)

    key = cevent.get_routingkey(event)
    myamqp.publish(event, key, myamqp.exchange_name_events)


########################################################
コード例 #13
0
ファイル: cps_bench.py プロジェクト: J1bz/canopsis
base_component_event = cevent.forger(
    connector="bench",
    connector_name="engine",
    event_type="check",
    source_type="component",
    component="component-",
    state=0,
    state_type=1,
    output="Output",
    long_output="",
    # perf_data =			None,
    perf_data_array=[
        {
            "metric": "metric1",
            "value": 0.25,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric2",
            "value": 0.16,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric3",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric4",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric5",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric6",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric7",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric8",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric9",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
        {
            "metric": "metric10",
            "value": 0.12,
            "unit": None,
            "min": None,
            "max": None,
            "warn": None,
            "crit": None,
            "type": "GAUGE",
        },
    ]
    # display_name =		""
)
コード例 #14
0
ファイル: cselector.py プロジェクト: Httqm/canopsis
	def event(self):
		### Transform Selector to Canopsis Event
		self.logger.debug("To Event:")
		
		# Get state
		(states, state, state_type) = self.getState()
		
		# Build output
		total = 0		
		for s in states:
			states[s] = int(states[s])
			total += states[s]
		
		self.logger.debug(" + state: %s" % state)
		self.logger.debug(" + state_type: %s" % state_type)
		
		perf_data_array = []
		long_output = ""
		output = ""
			
		self.logger.debug(" + total: %s" % total)
		
		# Create perfdata array
		output_data = {}
		for i in [0, 1, 2, 3]:
			value = 0
			try:
				value = states[i]
			except:
				pass
			
			metric =  self.sel_metric_name % i
			output_data[metric] = value
			perf_data_array.append({"metric": metric, "value": value, "max": total})
		
		perf_data_array.append({"metric": self.sel_metric_prefix + "total", "value": total})
		
		# Counte components and resources
		mfilter = self.makeMfilter()
		if mfilter:
		
			sel_nb_component = self.storage.count(mfilter={'$and': [ mfilter, {'source_type': 'component'}]}, namespace=self.namespace)
			sel_nb_resource = self.storage.count(mfilter={'$and': [ mfilter, {'source_type': 'resource'}]}, namespace=self.namespace)		
			
			if sel_nb_component + sel_nb_resource == total:
				perf_data_array.append({"metric": self.sel_metric_prefix + "component", "value": sel_nb_component, 'max': total})
				perf_data_array.append({"metric": self.sel_metric_prefix + "resource", "value": sel_nb_resource, 'max': total})
			else:
				self.logger.error("Invalid count: component: %s, resource: %s, total: %s" % (sel_nb_component, sel_nb_resource, total))
		
		output_data['total'] = total
	
		# Fill Output template
		self.logger.debug(" + output TPL: %s" % self.output_tpl)
		output = self.output_tpl
		if output_data:
			for key in output_data:
				output = output.replace("{%s}" % key, str(output_data[key]))
		
		display_name = self.data.get("display_name", None)
		
		# Debug
		self.logger.debug(" + Display Name: %s" % display_name)
		self.logger.debug(" + output: %s" % output)
		self.logger.debug(" + long_output: %s" % long_output)
		self.logger.debug(" + perf_data_array: %s" % perf_data_array)
		
		# Build Event
		event = cevent.forger(
			connector = "selector",
			connector_name = "engine",
			event_type = "selector",
			source_type="component",
			component=self.name,
			#resource=None,	
			state=state,
			state_type=state_type,
			output=output,
			long_output=long_output,
			perf_data=None,
			perf_data_array=perf_data_array,
			display_name=display_name
		)
				
		# Extra field
		event["selector_id"] = self._id
		
		# Build RK
		rk = cevent.get_routingkey(event)
		
		# Save RK
		if not self.rk:
			self.logger.debug("Set RK to '%s'" % rk)
			self.storage.update(self._id, {'rk': rk})
			self.rk = rk
				
		# Cache event
		self.last_event = event
				
		return (rk, event)
コード例 #15
0
ファイル: event.py プロジェクト: hesaul/canopsis
        if perf_data_array:
            try:
                perf_data_array = json.loads(perf_data_array)
            except Exception, err:
                logger.error("Impossible to parse 'perf_data_array': %s (%s)" %
                             (perf_data_array, err))

    #------------------------------forging event----------------------------------

    event = cevent.forger(connector=connector,
                          connector_name=connector_name,
                          event_type=event_type,
                          source_type=source_type,
                          component=component,
                          resource=resource,
                          state=int(state),
                          state_type=int(state_type),
                          output=output,
                          long_output=long_output,
                          perf_data=perf_data,
                          perf_data_array=perf_data_array,
                          timestamp=timestamp,
                          display_name=display_name)

    logger.debug(type(perf_data_array))
    logger.debug(perf_data_array)
    logger.debug('The forged event is : ')
    logger.debug(str(event))

    #------------------------------AMQP Part--------------------------------------

    key = cevent.get_routingkey(event)
コード例 #16
0
ファイル: cevent-Myunittest.py プロジェクト: EzanLTD/canopsis
	def test_01(self):
		event = cevent.forger(connector='unittest', connector_name='test1', event_type='log')
		rk = cevent.get_routingkey(event)

		print rk
		print event
コード例 #17
0
ファイル: cps_bench.py プロジェクト: linkdd/canopsis
storage = get_storage(namespace='events', account=caccount(user="******", group="root"))
manager = pyperfstore2.manager(logging_level=logging.INFO)

base_component_event = cevent.forger(
					connector =			'bench',
					connector_name =	"engine",
					event_type =		"check",
					source_type =		"component",
					component =			"component-",
					state =				0,
					state_type =		1,
					output =			"Output",
					long_output =		"",
					#perf_data =			None,
					perf_data_array =	[
						{'metric': 'metric1', 'value': 0.25, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric2',   'value': 0.16, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric3',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric4',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric5',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric6',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric7',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric8',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric9',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
						{'metric': 'metric10',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' }
                    ]
					#display_name =		""
				)

base_component_event['latency'] = 0.141
base_component_event['current_attempt'] = 1
コード例 #18
0
ファイル: event.py プロジェクト: vcandeau/canopsis
def send_event(	routing_key=None):
	
	account = get_account()
	
	if not check_group_rights(account,group_managing_access):
		return HTTPError(403, 'Insufficient rights')
				
	connector = None
	connector_name = None
	event_type = None
	source_type = None
	component = None
	resource = None
	state = None
	state_type = None
	perf_data = None
	perf_data_array = None
	output = None
	long_output = None
				
	#--------------------explode routing key----------
	if routing_key :
		logger.debug('The routing key is : %s' % str(routing_key))
		
		routing_key = routing_key.split('.')
		if len(routing_key) > 6 or len(routing_key) < 5:
			logger.error('Bad routing key')
			return HTTPError(400, 'Bad routing key')
			
		connector = routing_key[0]
		connector_name = routing_key[1]
		event_type = routing_key[2]
		source_type = routing_key[3]
		component = routing_key[4]
		if routing_key[5]:
			resource = routing_key[5]
	
	
	#-----------------------get params-------------------
	if not connector:
		connector = request.params.get('connector', default=None)
		if not connector :
			logger.error('No connector argument')
			return HTTPError(400, 'Missing connector argument')
			
	if not connector_name:
		connector_name = request.params.get('connector_name', default=None)
		if not connector_name:
			logger.error('No connector name argument')
			return HTTPError(400, 'Missing connector name argument')
			
	if not event_type:
		event_type = request.params.get('event_type', default=None)
		if not event_type:
			logger.error('No event_type argument')
			return HTTPError(400, 'Missing event type argument')
		
	if not source_type:
		source_type = request.params.get('source_type', default=None)
		if not source_type:
			logger.error('No source_type argument')
			return HTTPError(400, 'Missing source type argument')
	
	if not component:
		component = request.params.get('component', default=None)
		if not component:
			logger.error('No component argument')
			return HTTPError(400, 'Missing component argument')
	
	if not resource:
		resource = request.params.get('resource', default=None)
		if not resource:
			logger.error('No resource argument')
			return HTTPError(400, 'Missing resource argument')
		
	if not state:
		state = request.params.get('state', default=None)
		if not state:
			logger.error('No state argument')
			return HTTPError(400, 'Missing state argument')
		
	if not state_type:
		state_type = request.params.get('state_type', default=1)
		
	if not output:
		output = request.params.get('output', default=None)
		
	if not long_output:
		long_output = request.params.get('long_output', default=None)
		
	if not perf_data:
		perf_data = request.params.get('perf_data', default=None)
		
	if not perf_data_array:
		perf_data_array = request.params.get('perf_data_array', default=None)
		#if type(perf_data_array) == 'str':
			#perf_data_array = json.loads(perf_data_array)
		
	#------------------------------forging event----------------------------------

	event = cevent.forger(
				connector = connector,
				connector_name = connector_name,
				event_type = event_type,
				source_type = source_type,
				component = component,
				resource= resource,
				state = int(state),
				state_type = int(state_type),
				output = output,
				long_output = long_output,
				perf_data = perf_data,
				perf_data_array = json.loads(perf_data_array),
			)
	
	logger.debug(type(perf_data_array))
	logger.debug(perf_data_array)
	logger.debug('The forged event is : ')
	logger.debug(str(event))
	
	#------------------------------AMQP Part--------------------------------------
	
	key = cevent.get_routingkey(event)

	amqp.publish(event, key, amqp.exchange_name_events)
		
	logger.debug('Amqp event published')
	
	return {'total':1,'success':True,'data':{'event':event}}
コード例 #19
0
    def beat(self):
        beat_start = time.time()

        self.clean_consolidations()

        non_loaded_records = self.storage.find(
            {
                '$and': [{
                    'crecord_type': 'consolidation'
                }, {
                    'enable': True
                }, {
                    'loaded': {
                        '$ne': True
                    }
                }]
            },
            namespace="object")

        if len(non_loaded_records) > 0:
            for item in non_loaded_records:
                self.logger.info("New consolidation found '%s', load" %
                                 item.name)
                self.load(item)

        for _id in self.records.keys():
            exists = self.storage.find_one({'_id': _id})
            if not exists:
                self.logger.info("%s deleted, remove from record list" %
                                 self.records[_id]['crecord_name'])
                del (self.records[_id])

        for record in self.records.values():
            consolidation_last_timestamp = self.timestamps[record.get('_id')]

            aggregation_interval = record.get('aggregation_interval',
                                              self.default_interval)
            current_interval = int(time.time()) - consolidation_last_timestamp

            self.logger.debug(
                'current interval: %s , consolidation interval: %s' %
                (current_interval, aggregation_interval))
            if current_interval >= aggregation_interval:
                self.logger.debug('Compute new consolidation for: %s' %
                                  record.get('crecord_name', 'No name found'))

                output_message = None
                mfilter = json.loads(record.get('mfilter'))
                mfilter = {
                    '$and': [mfilter, {
                        'me': {
                            '$nin': internal_metrics
                        }
                    }]
                }
                #self.logger.debug('the mongo filter is: %s' % mfilter)
                metric_list = self.manager.store.find(mfilter=mfilter)
                self.logger.debug('length of matching metric list is: %i' %
                                  metric_list.count())

                aggregation_method = record.get('aggregation_method', False)
                consolidation_methods = record.get('consolidation_method',
                                                   False)

                if not isinstance(consolidation_methods, list):
                    consolidation_methods = [consolidation_methods]

                mType = mUnit = mMin = mMax = None
                values = []

                for index, metric in enumerate(metric_list):
                    if index == 0:
                        #mType = metric.get('t')
                        mMin = metric.get('mi')
                        mMax = metric.get('ma')
                        mUnit = metric.get('u')
                        if 'sum' in consolidation_methods:
                            maxSum = mMax
                    else:
                        if metric.get('mi') < mMin:
                            mMin = metric.get('mi')
                        if metric.get('ma') > mMax:
                            mMax = metric.get('ma')
                        if 'sum' in consolidation_methods:
                            maxSum += metric.get('ma')
                        if metric.get('u') != mUnit:
                            output_message = "warning : too many units"

                    self.logger.debug(' + Get points for: %s , %s , %s, %s' %
                                      (metric.get('_id'), metric.get('co'),
                                       metric.get('re', ''), metric.get('me')))

                    if int(
                            time.time()
                    ) - aggregation_interval <= consolidation_last_timestamp + 60:
                        tstart = consolidation_last_timestamp
                        #self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
                    else:
                        tstart = int(time.time()) - aggregation_interval
                        #self.logger.debug('   +   new tstart: %i' % tstart)

                    self.logger.debug(
                        '   +   from: %s to %s' %
                        (datetime.fromtimestamp(tstart).strftime(
                            '%Y-%m-%d %H:%M:%S'),
                         datetime.fromtimestamp(
                             time.time()).strftime('%Y-%m-%d %H:%M:%S')))

                    list_points = self.manager.get_points(
                        tstart=tstart,
                        tstop=time.time(),
                        _id=metric.get('_id'))
                    self.logger.debug(
                        '   +   Values on interval: %s' %
                        ' '.join([str(value[1]) for value in list_points]))

                    if list_points:
                        fn = self.get_math_function(aggregation_method)
                        if fn:
                            point_value = fn(
                                [value[1] for value in list_points])
                        else:
                            point_value = list_points[len(list_points) - 1][1]
                        values.append(point_value)

                self.logger.debug(
                    '   +   Summary of horizontal aggregation "%s":' %
                    aggregation_method)
                self.logger.debug(values)

                if not consolidation_methods:
                    self.storage.update(record.get('_id'), {
                        'output_engine':
                        "No second aggregation function given"
                    })
                    return

                if len(values) == 0:
                    self.logger.debug('  +  No values')
                    self.storage.update(
                        record.get('_id'), {
                            'output_engine': "No input values",
                            'consolidation_ts': int(time.time())
                        })
                    self.timestamps[record.get('_id')] = int(time.time())
                    return

                list_perf_data = []
                for function_name in consolidation_methods:
                    fn = self.get_math_function(function_name)

                    if not fn:
                        self.logger.debug(
                            'No function given for second aggregation')
                        self.storage.update(
                            record.get('_id'), {
                                'output_engine':
                                "No function given for second aggregation"
                            })
                        return

                    if len(values) == 0:
                        if not output_message:
                            self.storage.update(record.get('_id'),
                                                {'output_engine': "No result"})
                        else:
                            self.storage.update(
                                record.get('_id'), {
                                    'output_engine':
                                    "there are issues : %s warning : No result"
                                    % output_message
                                })

                    value = fn(values)

                    self.logger.debug(' + Result of aggregation for "%s": %f' %
                                      (function_name, value))

                    list_perf_data.append({
                        'metric':
                        function_name,
                        'value':
                        roundSignifiantDigit(value, 3),
                        "unit":
                        mUnit,
                        'max':
                        maxSum if function_name == 'sum' else mMax,
                        'min':
                        mMin,
                        'type':
                        'GAUGE'
                    })

                point_timestamp = int(time.time()) - current_interval / 2

                event = cevent.forger(
                    connector="consolidation",
                    connector_name="engine",
                    event_type="consolidation",
                    source_type="resource",
                    component=record['component'],
                    resource=record['resource'],
                    state=0,
                    timestamp=point_timestamp,
                    state_type=1,
                    output="Consolidation: '%s' successfully computed" %
                    record.get('crecord_name', 'No name'),
                    long_output="",
                    perf_data=None,
                    perf_data_array=list_perf_data,
                    display_name=record['crecord_name'])
                rk = cevent.get_routingkey(event)
                self.counter_event += 1
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

                self.logger.debug('The following event was sent:')
                self.logger.debug(event)

                if not output_message:
                    engine_output = '%s : Computation done. Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})
                else:
                    engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        output_message, str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})

                self.storage.update(record.get('_id'),
                                    {'consolidation_ts': int(time.time())})
                self.timestamps[record.get('_id')] = int(time.time())

        self.counter_worktime += time.time() - beat_start
コード例 #20
0
ファイル: snmp2amqp.py プロジェクト: EzanLTD/canopsis
def parse_trap(mib, trap_oid, agent, varBinds):

	notification = mib.get_notification(trap_oid)

	## Parse trap
	if notification:
		try:
			logger.info("[%s][%s] %s-%s: %s (%s)" % (agent, mib.name, notification['SEVERITY'], notification['STATE'], notification['TYPE'], trap_oid))
		except Exception, err:
			logger.error("Impossible to parse notification, check mib conversion ...")
			return None
		
		arguments = notification['ARGUMENTS']
		summary	  = notification['SUMMARY']
		
		nb_string_arg = summary.count('%s')
		
		if varBinds and nb_string_arg:
			for i in range(nb_string_arg):
				logger.debug(" + Get value %s" % i)
				value = None
				oid, components = varBinds[i]
				component = components[0]
				if component != None:
					#value = component._componentValues[0]
					for info in component._componentValues:
						if info:
							value = str(info)

					logger.debug("   + %s" % value)

				if value:
					summary = summary.replace('%s', value, 1)
							

		logger.info(" + Summary: %s" % summary)

		component = agent
		resource = mib.name
		source_type = 'resource'
		state = severity_to_state[notification['SEVERITY']]
		output = notification['TYPE']
		long_output = summary

		## convert trap to event
		event = cevent.forger(
				connector='snmp',
				connector_name=DAEMON_NAME,
				component=component,
				resource=resource,
				timestamp=None,
				source_type=source_type,
				event_type='trap',
				state=state,
				output=output,
				long_output=long_output)

		#own fields
		event['snmp_severity'] = notification['SEVERITY']
		event['snmp_state'] = notification['STATE']
		event['snmp_oid'] = trap_oid

		logger.debug("Event: %s" % event)
		## send event on amqp
		key = cevent.get_routingkey(event)						
		myamqp.publish(event, key, myamqp.exchange_name_events)
コード例 #21
0
ファイル: snmp2amqp.py プロジェクト: moas/canopsis
def parse_trap(mib, trap_oid, agent, varBinds):

    notification = mib.get_notification(trap_oid)

    ## Parse trap
    if notification:
        try:
            logger.info(
                "[%s][%s] %s-%s: %s (%s)" %
                (agent, mib.name, notification['SEVERITY'],
                 notification['STATE'], notification['TYPE'], trap_oid))
        except Exception, err:
            logger.error(
                "Impossible to parse notification, check mib conversion ...")
            return None

        arguments = notification['ARGUMENTS']
        summary = notification['SUMMARY']

        nb_string_arg = summary.count('%s')

        if varBinds and nb_string_arg:
            for i in range(nb_string_arg):
                logger.debug(" + Get value %s" % i)
                value = None
                oid, components = varBinds[i]
                component = components[0]
                if component != None:
                    #value = component._componentValues[0]
                    for info in component._componentValues:
                        if info:
                            value = str(info)

                    logger.debug("   + %s" % value)

                if value:
                    summary = summary.replace('%s', value, 1)

        logger.info(" + Summary: %s" % summary)

        component = agent
        resource = mib.name
        source_type = 'resource'
        state = severity_to_state[notification['SEVERITY']]
        output = notification['TYPE']
        long_output = summary

        ## convert trap to event
        event = cevent.forger(connector='snmp',
                              connector_name=DAEMON_NAME,
                              component=component,
                              resource=resource,
                              timestamp=None,
                              source_type=source_type,
                              event_type='trap',
                              state=state,
                              output=output,
                              long_output=long_output)

        #own fields
        event['snmp_severity'] = notification['SEVERITY']
        event['snmp_state'] = notification['STATE']
        event['snmp_oid'] = trap_oid

        logger.debug("Event: %s" % event)
        ## send event on amqp
        key = cevent.get_routingkey(event)
        myamqp.publish(event, key, myamqp.exchange_name_events)
コード例 #22
0
    def event(self):

        ### Transform Selector to Canopsis Event
        self.logger.debug("To Event:")

        # Get state
        (states, state, state_type) = self.getState()

        # Build output
        total = 0
        for s in states:
            states[s] = int(states[s])
            total += states[s]

        self.logger.debug(" + state: %s" % state)
        self.logger.debug(" + state_type: %s" % state_type)

        perf_data_array = []
        long_output = ""
        output = ""

        self.logger.debug(" + total: %s" % total)

        # Create perfdata array
        output_data = {}
        for i in [0, 1, 2, 3]:
            value = 0
            try:
                value = states[i]
            except:
                pass

            metric = self.sel_metric_name % i
            output_data[metric] = value
            perf_data_array.append({
                "metric": metric,
                "value": value,
                "max": total
            })

        perf_data_array.append({
            "metric": self.sel_metric_prefix + "total",
            "value": total
        })

        output_data['total'] = total

        # Fill Output template
        self.logger.debug(" + output TPL: %s" % self.output_tpl)
        output = self.output_tpl
        if output_data:
            for key in output_data:
                output = output.replace("{%s}" % key, str(output_data[key]))

        display_name = self.data.get("display_name", None)

        # Debug
        self.logger.debug(" + Display Name: %s" % display_name)
        self.logger.debug(" + output: %s" % output)
        self.logger.debug(" + long_output: %s" % long_output)
        self.logger.debug(" + perf_data_array: %s" % perf_data_array)

        # Build Event
        event = cevent.forger(
            connector="selector",
            connector_name="engine",
            event_type="selector",
            source_type="component",
            component=self.name,
            #resource=None,
            state=state,
            state_type=state_type,
            output=output,
            long_output=long_output,
            perf_data=None,
            perf_data_array=perf_data_array,
            display_name=display_name)

        # Extra field
        event["selector_id"] = str(self._id)

        # Build RK
        rk = cevent.get_routingkey(event)

        # Save RK
        if not self.rk:
            self.logger.debug("Set RK to '%s'" % rk)
            self.storage.update(self._id, {'rk': rk})
            self.rk = rk

        # Cache event
        self.last_event = event

        return (rk, event)
コード例 #23
0
ファイル: functional-test.py プロジェクト: J1bz/canopsis
from crecord import crecord
from caccount import caccount
from cwebservices import cwebservices
from ctools import parse_perfdata
import pyperfstore2

from subprocess import Popen

logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(name)s %(levelname)s %(message)s")

event = cevent.forger(
    connector="canopsis",
    connector_name="unittest",
    event_type="check",
    source_type="component",
    component="test1",
    state=0,
    output="Output_1",
    perf_data="mymetric=1s;10;20;0;30",
    tags=["check", "component", "test1", "unittest"],
)
rk = cevent.get_routingkey(event)

myamqp = None
storage = None
event_alert = None
perfstore = None


def on_alert(body, message):
    print "Alert: %s" % body
コード例 #24
0
ファイル: topology.py プロジェクト: EzanLTD/canopsis
	def beat(self):
		loaded_topo = self.topo_load()
		
		if loaded_topo or self.doBeat or int(time.time()) >= (self.lastBeat + self.normal_beat_interval):
			
			self.lastBeat = int(time.time())
			
			if loaded_topo:
				self.ids = []
				
				# Parse topo
				for topo in self.topos:				
					self.logger.debug("Parse topo '%s': %s Nodes with %s Conns" % (topo['crecord_name'], len(topo['nodes']), len(topo['conns'])))
					
					topo['ids'] = self.topo_extractIds(topo)
			
					topo['nodesById'] = {}
					
					for key in topo['nodes']:
						node = topo['nodes'][key]
						
						_id = node['_id']
						
						if not node.get('calcul_state', None):
							if node.get('event_type', None) == 'operator':
								node['calcul_state'] = self.topo_getOperator_fn(_id)
								_id = "%s-%s" % (_id, int(random() * 10000))
								node['_id'] = _id
							else:
								node['calcul_state'] = self.default_Operator_fn
							
						topo['nodesById'][_id] = node
						node['childs'] = []
								
					self.logger.debug("Fill node's childs")
					self.topo_fillChilds(topo)
				
			
			# Get all states of all topos
			self.stateById = {}
			records = self.storage.find(mfilter={'_id': {'$in': self.ids}}, mfields=['state', 'state_type', 'previous_state'], namespace='events')
			for record in records:
				self.stateById[record['_id']] = {
					'state': record['state'],
					'state_type': record.get('state_type', 1),
					'previous_state': record.get('previous_state', record['state'])
				}
			
			# Get state by topo
			for topo in self.topos:
				## Parse tree for calcul state
				self.logger.debug(" + Calcul state:")
				states_info = self.topo_getState(topo)

				self.logger.debug("'%s': State: %s" % (topo['crecord_name'], states_info))
				self.storage.update(topo['_id'], {'state': states_info['state']})
				
				event = cevent.forger(
					connector =			NAME,
					connector_name =	"engine",
					event_type =		"topology",
					source_type =		"component",
					component =			topo['crecord_name'],
					state =				states_info['state'],
					state_type =		states_info['state_type'],
					output =			"",
					long_output =		"",
					#perf_data =			None,
					#perf_data_array =	[],
					display_name =		topo.get('display_name', None)
				)
				
				# Extra fields			
				event['nestedTree'] = self.topo_dump4Ui(topo)
		
				rk = cevent.get_routingkey(event)
				
				self.logger.debug("Publish event on %s" % rk)
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)
			
			self.doBeat = False
コード例 #25
0
    def beat(self):
        loaded_topo = self.topo_load()

        if loaded_topo or self.doBeat or int(
                time.time()) >= (self.lastBeat + self.normal_beat_interval):

            self.lastBeat = int(time.time())

            if loaded_topo:
                self.ids = []

                # Parse topo
                for topo in self.topos:
                    self.logger.debug(
                        "Parse topo '%s': %s Nodes with %s Conns" %
                        (topo['crecord_name'], len(
                            topo['nodes']), len(topo['conns'])))

                    topo['ids'] = self.topo_extractIds(topo)

                    topo['nodesById'] = {}

                    for key in topo['nodes']:
                        node = topo['nodes'][key]

                        _id = node['_id']

                        if not node.get('calcul_state', None):
                            if node.get('event_type', None) == 'operator':
                                node[
                                    'calcul_state'] = self.topo_getOperator_fn(
                                        _id)
                                _id = "%s-%s" % (_id, int(random() * 10000))
                                node['_id'] = _id
                            else:
                                node['calcul_state'] = self.default_Operator_fn

                        topo['nodesById'][_id] = node
                        node['childs'] = []

                    self.logger.debug("Fill node's childs")
                    self.topo_fillChilds(topo)

            # Get all states of all topos
            self.stateById = {}
            records = self.storage.find(
                mfilter={'_id': {
                    '$in': self.ids
                }},
                mfields=['state', 'state_type', 'previous_state'],
                namespace='events')
            for record in records:
                self.stateById[record['_id']] = {
                    'state': record['state'],
                    'state_type': record.get('state_type', 1),
                    'previous_state': record.get('previous_state',
                                                 record['state'])
                }

            # Get state by topo
            for topo in self.topos:
                ## Parse tree for calcul state
                self.logger.debug(" + Calcul state:")
                states_info = self.topo_getState(topo)

                self.logger.debug("'%s': State: %s" %
                                  (topo['crecord_name'], states_info))
                self.storage.update(topo['_id'],
                                    {'state': states_info['state']})

                event = cevent.forger(
                    connector=NAME,
                    connector_name="engine",
                    event_type="topology",
                    source_type="component",
                    component=topo['crecord_name'],
                    state=states_info['state'],
                    state_type=states_info['state_type'],
                    output="",
                    long_output="",
                    #perf_data =			None,
                    #perf_data_array =	[],
                    display_name=topo.get('display_name', None))

                # Extra fields
                event['nestedTree'] = self.topo_dump4Ui(topo)

                rk = cevent.get_routingkey(event)

                self.logger.debug("Publish event on %s" % rk)
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

            self.doBeat = False
コード例 #26
0
ファイル: event.py プロジェクト: EzanLTD/canopsis
				logger.error("Impossible to parse 'perf_data_array': %s (%s)" % (perf_data_array, err))

		if not isinstance(perf_data_array, list):
			perf_data_array = []
		
	#------------------------------forging event----------------------------------

	event = cevent.forger(
				connector = connector,
				connector_name = connector_name,
				event_type = event_type,
				source_type = source_type,
				component = component,
				resource= resource,
				state = int(state),
				state_type = int(state_type),
				output = output,
				long_output = long_output,
				perf_data = perf_data,
				perf_data_array = perf_data_array,
				timestamp = timestamp,
				display_name = display_name,
				tags = tags
			)
	
	logger.debug(type(perf_data_array))
	logger.debug(perf_data_array)
	logger.debug('The forged event is : ')
	logger.debug(str(event))
	
	#------------------------------AMQP Part--------------------------------------
コード例 #27
0
class engine(cengine):
    def __init__(self, *args, **kargs):
        cengine.__init__(self, name=NAME, *args, **kargs)

    def create_amqp_queue(self):
        self.amqp.add_queue(self.amqp_queue, ['collectd'],
                            self.on_collectd_event,
                            "amq.topic",
                            auto_delete=False)

    def on_collectd_event(self, body, msg):
        start = time.time()
        error = False

        collectd_info = body.split(' ')

        if len(collectd_info) > 0:
            self.logger.debug(body)
            action = collectd_info[0]
            self.logger.debug(" + Action: %s" % action)

            if len(collectd_info) == 4 and action == "PUTVAL":
                cnode = collectd_info[1].split("/")
                component = cnode[0]
                resource = cnode[1]
                metric = cnode[2]
                options = collectd_info[2]
                values = collectd_info[3]

                self.logger.debug(" + Options: %s" % options)
                self.logger.debug(" + Component: %s" % component)
                self.logger.debug(" + Resource: %s" % resource)
                self.logger.debug(" + Metric: %s" % metric)
                self.logger.debug(" + Raw Values: %s" % values)

                values = values.split(":")

                perf_data_array = []

                ctype = None
                try:
                    ## Know metric
                    ctype = types[metric]
                except:
                    try:
                        ctype = types[metric.split('-')[0]]
                        metric = metric.split('-')[1]
                    except Exception, err:
                        self.logger.error("Invalid format '%s' (%s)" %
                                          (body, err))
                        return None

                try:
                    timestamp = int(Str2Number(values[0]))
                    values = values[1:]
                    self.logger.debug("   + Timestamp: %s" % timestamp)
                    self.logger.debug("   + Values: %s" % values)

                except Exception, err:
                    self.logger.error(
                        "Impossible to get timestamp or values (%s)" % err)
                    return None

                self.logger.debug(" + metric: %s" % metric)
                self.logger.debug(" + ctype: %s" % ctype)
                if ctype:
                    try:
                        i = 0
                        for value in values:
                            name = ctype[i]['name']
                            unit = ctype[i]['unit']
                            vmin = ctype[i]['min']
                            vmax = ctype[i]['max']

                            if vmin == 'U':
                                vmin = None

                            if vmax == 'U':
                                vmax = None

                            if name == "value":
                                name = metric

                            if metric != name:
                                name = "%s-%s" % (metric, name)

                            data_type = ctype[i]['type']

                            value = Str2Number(value)

                            self.logger.debug("     + %s" % name)
                            self.logger.debug("       -> %s (%s)" %
                                              (value, data_type))
                            i += 1

                            perf_data_array.append({
                                'metric': name,
                                'value': value,
                                'type': data_type,
                                'unit': unit,
                                'min': vmin,
                                'max': vmax
                            })

                    except Exception, err:
                        self.logger.error(
                            "Impossible to parse values '%s' (%s)" %
                            (values, err))

                if perf_data_array:
                    self.logger.debug(' + perf_data_array: %s',
                                      perf_data_array)

                    event = cevent.forger(connector='collectd',
                                          connector_name='collectd2event',
                                          component=component,
                                          resource=resource,
                                          timestamp=None,
                                          source_type='resource',
                                          event_type='check',
                                          state=0,
                                          perf_data_array=perf_data_array)

                    rk = cevent.get_routingkey(event)

                    self.logger.debug("Send Event: %s" % event)

                    ## send event on amqp
                    self.amqp.publish(event, rk,
                                      self.amqp.exchange_name_events)
コード例 #28
0
ファイル: cselector.py プロジェクト: vcandeau/canopsis
	def event(self):
		### Transform Selector to Canopsis Event
		self.logger.debug("To Event:")
		
		# Get state
		(states, state, state_type) = self.getState()
		
		# Build output
		total = 0		
		for s in states:
			states[s] = int(states[s])
			total += states[s]
		
		self.logger.debug(" + state: %s" % state)
		self.logger.debug(" + state_type: %s" % state_type)
		
		perf_data_array = []
		long_output = ""
		output = ""
			
		self.logger.debug(" + total: %s" % total)
		
		# Create perfdata array
		output_data = {}
		for i in [0, 1, 2, 3]:
			value = 0
			try:
				value = states[i]
			except:
				pass
			
			metric =  self.sel_metric_name % i
			output_data[metric] = value
			perf_data_array.append({"metric": metric, "value": value, "max": total})
			
		output_data['total'] = total
	
		# Fill Output template
		self.logger.debug(" + output TPL: %s" % self.output_tpl)
		output = self.output_tpl
		if output_data:
			for key in output_data:
				output = output.replace("{%s}" % key, str(output_data[key]))
		
		# Debug
		self.logger.debug(" + output: %s" % output)
		self.logger.debug(" + long_output: %s" % long_output)
		self.logger.debug(" + perf_data_array: %s" % perf_data_array)
		
		# Build Event
		event = cevent.forger(
			connector = "selector",
			connector_name = "engine",
			event_type = "selector",
			source_type="component",
			component=self.name,
			#resource=None,	
			state=state,
			state_type=state_type,
			output=output,
			long_output=long_output,
			perf_data=None,
			perf_data_array=perf_data_array
		)
				
		# Extra field
		event["selector_id"] = self._id
		
		# Build RK
		rk = cevent.get_routingkey(event)
		
		# Save RK
		if not self.rk:
			self.logger.debug("Set RK to '%s'" % rk)
			self.storage.update(self._id, {'rk': rk})
			self.rk = rk
				
		# Cache event
		self.last_event = event
				
		return (rk, event)
コード例 #29
0
ファイル: functional-test.py プロジェクト: vcandeau/canopsis
from camqp import camqp
from cstorage import cstorage
from crecord import crecord
from caccount import caccount
from pyperfstore import node
from pyperfstore import mongostore
from cwebservices import cwebservices
from ctools import parse_perfdata

from subprocess import Popen

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(name)s %(levelname)s %(message)s',
                    )

event = cevent.forger(connector='canopsis', connector_name='unittest', event_type='check', source_type = "component", component="test1", state=0, output="Output_1", perf_data="mymetric=1s;10;20;0;30", tags = ['check', 'component', 'test1', 'unittest'])
rk = cevent.get_routingkey(event)

myamqp = None
storage = None
perfstore = None
event_alert = None

def on_alert(body, message):
	print "Alert: %s" % body
	mrk = message.delivery_info['routing_key']
	if mrk == rk:
		global event_alert
		event_alert = body
	
def clean():
コード例 #30
0
ファイル: cps_bench.py プロジェクト: Httqm/canopsis
                    )

logger = logging.getLogger("bench")
amqp = camqp()

storage = get_storage(namespace='events', account=caccount(user="******", group="root"))

base_component_event = cevent.forger(
					connector =			'bench',
					connector_name =	"engine",
					event_type =		"check",
					source_type =		"component",
					component =			"component-",
					state =				0,
					state_type =		1,
					output =			"Output",
					long_output =		"",
					#perf_data =			None,
					#perf_data_array =	[
					#	{'metric': 'shortterm', 'value': 0.25, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
					#	{'metric': 'midterm',   'value': 0.16, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
					#	{'metric': 'longterm',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' }
                    #]
					#display_name =		""
				)

base_resource_event = cevent.forger(
					connector =			'bench',
					connector_name =	"engine",
					event_type =		"check",
					source_type =		"resource",
					component =			"component-",
コード例 #31
0
ファイル: cps_bench.py プロジェクト: hesaul/canopsis
logger = logging.getLogger("bench")
amqp = camqp()

storage = get_storage(namespace='events',
                      account=caccount(user="******", group="root"))

base_component_event = cevent.forger(
    connector='bench',
    connector_name="engine",
    event_type="check",
    source_type="component",
    component="component-",
    state=0,
    state_type=1,
    output="Output",
    long_output="",
    #perf_data =			None,
    #perf_data_array =	[
    #	{'metric': 'shortterm', 'value': 0.25, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
    #	{'metric': 'midterm',   'value': 0.16, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' },
    #	{'metric': 'longterm',  'value': 0.12, 'unit': None, 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' }
    #]
    #display_name =		""
)

base_resource_event = cevent.forger(
    connector='bench',
    connector_name="engine",
    event_type="check",
    source_type="resource",
    component="component-",
コード例 #32
0
ファイル: sla.py プロジェクト: moas/canopsis
        output = sla_output_tpl
        if output_data:
            for key in output_data:
                output = output.replace("{%s}" % key, str(output_data[key]))

        self.logger.debug("     + Output:    %s" % output)
        self.logger.debug("     + Perfdata:  %s" % perf_data_array)

        # Send AMQP Event
        event = cevent.forger(connector="sla",
                              connector_name="engine",
                              event_type="sla",
                              source_type="resource",
                              component=config['name'],
                              resource="sla",
                              state=state,
                              state_type=1,
                              output=output,
                              long_output="",
                              perf_data=None,
                              perf_data_array=perf_data_array,
                              display_name=config.get('display_name', None))

        # Extra fields
        event['selector_id'] = config['_id']
        event['selector_rk'] = config['rk']

        rk = self.get_rk(config['name'])

        self.logger.debug("Publish event on %s" % rk)
        self.amqp.publish(event, rk, self.amqp.exchange_name_events)
コード例 #33
0
ファイル: cengine.py プロジェクト: moas/canopsis
    def _beat(self):
        self.logger.debug("Beat: %s event(s), %s error" %
                          (self.counter_event, self.counter_error))
        now = int(time.time())

        if self.last_stat + 60 <= now:
            self.logger.debug(" + Send stats")
            self.last_stat = now

            evt_per_sec = 0
            sec_per_evt = 0

            if self.counter_event:
                evt_per_sec = float(self.counter_event) / self.beat_interval
                self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)

            if self.counter_worktime and self.counter_event:
                sec_per_evt = self.counter_worktime / self.counter_event
                self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)

            ## Submit event
            if self.send_stats_event and self.counter_event != 0:
                state = 0

                if sec_per_evt > self.thd_warn_sec_per_evt:
                    state = 1

                if sec_per_evt > self.thd_crit_sec_per_evt:
                    state = 2

                perf_data_array = [
                    {
                        'retention': self.perfdata_retention,
                        'metric': 'cps_evt_per_sec',
                        'value': round(evt_per_sec, 2),
                        'unit': 'evt'
                    },
                    {
                        'retention': self.perfdata_retention,
                        'metric': 'cps_sec_per_evt',
                        'value': round(sec_per_evt, 5),
                        'unit': 's',
                        'warn': self.thd_warn_sec_per_evt,
                        'crit': self.thd_crit_sec_per_evt
                    },
                ]

                self.logger.debug(" + State: %s" % state)

                event = cevent.forger(connector="cengine",
                                      connector_name="engine",
                                      event_type="check",
                                      source_type="resource",
                                      resource=self.amqp_queue,
                                      state=state,
                                      state_type=1,
                                      output="%0.2f evt/sec, %0.5f sec/evt" %
                                      (evt_per_sec, sec_per_evt),
                                      perf_data_array=perf_data_array)

                rk = cevent.get_routingkey(event)
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

            self.counter_error = 0
            self.counter_event = 0
            self.counter_worktime = 0

        try:
            self.beat()
        except Exception, err:
            self.logger.error("Beat raise exception: %s" % err)
            traceback.print_exc(file=sys.stdout)
コード例 #34
0
ファイル: sla.py プロジェクト: vcandeau/canopsis
	def calcul_state_by_timewindow(self, _id, config, slanode):
		rk  = self.get_rk(config['name'])
		
		self.logger.debug("Calcul state by timewindow")
		self.logger.debug(" + Get States of %s (%s)" % (_id, rk))

		sla_timewindow = config.get('sla_timewindow', self.default_sla_timewindow) # 1 day
		
		thd_warn_sla_timewindow = config.get('thd_warn_sla_timewindow', self.thd_warn_sla_timewindow)
		thd_crit_sla_timewindow = config.get('thd_crit_sla_timewindow', self.thd_crit_sla_timewindow)
		
		sla_output_tpl = config.get('sla_output_tpl', self.default_sla_output_tpl)
		
		if sla_output_tpl == "":
			sla_output_tpl = self.default_sla_output_tpl
		
		# Prevent empty string
		if not thd_warn_sla_timewindow:
			thd_warn_sla_timewindow = self.thd_warn_sla_timewindow
		if not thd_crit_sla_timewindow:
			thd_crit_sla_timewindow = self.thd_crit_sla_timewindow
			
		thd_warn_sla_timewindow = float(thd_warn_sla_timewindow)
		thd_crit_sla_timewindow = float(thd_crit_sla_timewindow)
		
		#consider unknown time
		sla_timewindow_doUnknown = config.get('sla_timewindow_doUnknown', True)

		stop = int(time.time())
		start = stop - sla_timewindow
		
		self.logger.debug(" + output TPL:     %s" % sla_output_tpl)
		self.logger.debug(" + Thd Warning:    %s" % thd_warn_sla_timewindow)
		self.logger.debug(" + Thd Critical:   %s" % thd_crit_sla_timewindow)
		self.logger.debug(" + do Unknown:     %s" % sla_timewindow_doUnknown)
		self.logger.debug(" + sla_timewindow: %s" % sla_timewindow)
		self.logger.debug(" + start:          %s" % start)
		self.logger.debug(" + stop:           %s" % stop)
		
		## TODO: Tweaks
		total = 0
		states_sum = states.copy()
		first_timestamp = None
		
		for state in states:
			self.logger.debug("Get %s (%s) time's:" % (states_str[state], state))
			
			points = slanode.metric_get_values(
						dn='cps_time_by_state_%s' % state,
						tstart=start - self.beat_interval,
						tstop=stop,
						aggregate=False
			)
			
			if points:
				first_timestamp = points[0][0]
				
				if first_timestamp < start:
					del points[0]
				
				mysum = sum([point[1] for point in points])				
				states_sum[state] += mysum

				total += states_sum[state]
				
				self.logger.debug(" + %s seconds" % states_sum[state])
		
		if first_timestamp:
			self.logger.debug("Check Unknown time's:")
			self.logger.debug(" + Start:           %s" % start)
			self.logger.debug(" + First timestamp: %s" % first_timestamp)
			delta = start - first_timestamp
			self.logger.debug(" + Delta: %s seconds" % delta)
			if sla_timewindow_doUnknown and delta < 0:
				self.logger.debug("   + Set Unknown time")
				states_sum[3] += delta
				total += states_sum[3]
		
		self.logger.debug("Total: %s seconds" % total)
		
		## Calcul PCT
		perf_data_array = []
		output_data = {}
		states_pct = states.copy()
		for state in states:
			states_pct[state] = 0
			if states_sum[state] > 0:
				states_pct[state] = round((states_sum[state] * 100) / float(total), 3)
			
			metric = 'cps_pct_by_state_%s' % state
			output_data[metric] = states_pct[state]
			perf_data_array.append({"metric": metric, "value": states_pct[state], "max": 100, "unit": "%"})
		
		# Fill template
		output = sla_output_tpl
		if output_data:
			for key in output_data:
				output = output.replace("{%s}" % key, str(output_data[key]))
				
		self.logger.debug(" + output: %s" % output)
		
		state = 0
		if states_pct[0] < thd_warn_sla_timewindow:
			state = 1
		if states_pct[0] < thd_crit_sla_timewindow:
			state = 2
		
		self.logger.debug(" + State: %s (%s)" % (states_str[state], state))
		
		# Send event
		event = cevent.forger(
			connector = "sla",
			connector_name = "engine",
			event_type = "sla",
			source_type="resource",
			component=config['name'],
			resource="sla",
			state=state,
			state_type=1,
			output=output,
			long_output="",
			perf_data=None,
			perf_data_array=perf_data_array
		)
		event['selector_id'] = config['_id']
		event['selector_rk'] = config['rk']
		self.logger.debug("Publish event on %s" % rk)
		self.amqp.publish(event, rk, self.amqp.exchange_name_events)
		
		self.storage.update(_id, {'sla_timewindow_lastcalcul': stop, 'sla_timewindow_perfdata': perf_data_array, 'sla_state': event['state']})
コード例 #35
0
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
コード例 #36
0
ファイル: cps_bench.py プロジェクト: moas/canopsis
base_component_event = cevent.forger(
    connector='bench',
    connector_name="engine",
    event_type="check",
    source_type="component",
    component="component-",
    state=0,
    state_type=1,
    output="Output",
    long_output="",
    #perf_data =			None,
    perf_data_array=[{
        'metric': 'metric1',
        'value': 0.25,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric2',
        'value': 0.16,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric3',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric4',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric5',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric6',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric7',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric8',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric9',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }, {
        'metric': 'metric10',
        'value': 0.12,
        'unit': None,
        'min': None,
        'max': None,
        'warn': None,
        'crit': None,
        'type': 'GAUGE'
    }]
    #display_name =		""
)
コード例 #37
0
ファイル: cengine.py プロジェクト: EzanLTD/canopsis
	def _beat(self):

		now = int(time.time())

		if self.last_stat + 60 <= now:
			self.logger.debug(" + Send stats")
			self.last_stat = now

			evt_per_sec = 0
			sec_per_evt = 0
			
			if self.counter_event:
				evt_per_sec = float(self.counter_event) / self.beat_interval
				self.logger.debug(" + %0.2f event(s)/seconds" % evt_per_sec)
			
			if self.counter_worktime and self.counter_event:
				sec_per_evt = self.counter_worktime / self.counter_event
				self.logger.debug(" + %0.5f seconds/event" % sec_per_evt)
			
			## Submit event
			if self.send_stats_event and self.counter_event != 0:
				state = 0
				
				if sec_per_evt > self.thd_warn_sec_per_evt:
					state = 1
					
				if sec_per_evt > self.thd_crit_sec_per_evt:
					state = 2
				
				perf_data_array = [
					{'retention': self.perfdata_retention, 'metric': 'cps_evt_per_sec', 'value': round(evt_per_sec,2), 'unit': 'evt' },
					{'retention': self.perfdata_retention, 'metric': 'cps_sec_per_evt', 'value': round(sec_per_evt,5), 'unit': 's',
						'warn': self.thd_warn_sec_per_evt,
						'crit': self.thd_crit_sec_per_evt
					},
				]

				self.logger.debug(" + State: %s" % state)
				
				event = cevent.forger(
					connector = "cengine",
					connector_name = "engine",
					event_type = "check",
					source_type="resource",
					resource=self.amqp_queue,
					state=state,
					state_type=1,
					output="%0.2f evt/sec, %0.5f sec/evt" % (evt_per_sec, sec_per_evt),
					perf_data_array=perf_data_array
				)
				
				rk = cevent.get_routingkey(event)
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)
			

			self.counter_error = 0
			self.counter_event = 0
			self.counter_worktime = 0

		try:
			self.beat()
		except Exception, err:
			self.logger.error("Beat raise exception: %s" % err)
			traceback.print_exc(file=sys.stdout)
コード例 #38
0
ファイル: functional-test.py プロジェクト: moas/canopsis
from cwebservices import cwebservices
from ctools import parse_perfdata
import pyperfstore2

from subprocess import Popen

logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s %(name)s %(levelname)s %(message)s',
)

event = cevent.forger(connector='canopsis',
                      connector_name='unittest',
                      event_type='check',
                      source_type="component",
                      component="test1",
                      state=0,
                      output="Output_1",
                      perf_data="mymetric=1s;10;20;0;30",
                      tags=['check', 'component', 'test1', 'unittest'])
rk = cevent.get_routingkey(event)

myamqp = None
storage = None
event_alert = None
perfstore = None


def on_alert(body, message):
    print "Alert: %s" % body
    mrk = message.delivery_info['routing_key']
コード例 #39
0
ファイル: consolidation.py プロジェクト: Httqm/canopsis
	def beat(self):
		beat_start = time.time()

		self.clean_consolidations()

		non_loaded_records = self.storage.find({ '$and' : [{ 'crecord_type': 'consolidation' },{'enable': True}, {'loaded': { '$ne' : True} } ] }, namespace="object" )

		if len(non_loaded_records) > 0  :
			for item in non_loaded_records :
				self.logger.info("New consolidation found '%s', load" % item.name)
				self.load(item)

		for _id in self.records.keys() :
			exists = self.storage.find_one({ '_id': _id } )
			if not exists:
				self.logger.info("%s deleted, remove from record list" % self.records[_id]['crecord_name'])
				del(self.records[_id])

		for record in self.records.values():
			consolidation_last_timestamp = self.timestamps[record.get('_id')]

			aggregation_interval = record.get('aggregation_interval', self.default_interval)
			current_interval = int(time.time()) - consolidation_last_timestamp

			self.logger.debug('current interval: %s , consolidation interval: %s' % (current_interval,aggregation_interval))
			if  current_interval >= aggregation_interval:
				self.logger.debug('Compute new consolidation for: %s' % record.get('crecord_name','No name found'))

				output_message = None
				mfilter = json.loads(record.get('mfilter'))
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}
				#self.logger.debug('the mongo filter is: %s' % mfilter)
				metric_list = self.manager.store.find(mfilter=mfilter)
				self.logger.debug('length of matching metric list is: %i' % metric_list.count())
				
				aggregation_method = record.get('aggregation_method', False)
				consolidation_methods = record.get('consolidation_method', False)

				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ] 

				mType = mUnit = mMin = mMax = None
				values = []

				for index,metric in enumerate(metric_list) :
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							output_message = "warning : too many units"

					self.logger.debug(' + Get points for: %s , %s , %s, %s' % (metric.get('_id'),metric.get('co'),metric.get('re',''),metric.get('me')))

					if int(time.time()) - aggregation_interval <= consolidation_last_timestamp + 60:
						tstart = consolidation_last_timestamp
						#self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
					else:
						tstart = int(time.time()) - aggregation_interval
						#self.logger.debug('   +   new tstart: %i' % tstart)

					self.logger.debug(
										'   +   from: %s to %s' % 
										(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
										datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
									)

					list_points = self.manager.get_points(tstart=tstart,tstop=time.time(), _id=metric.get('_id'))
					self.logger.debug('   +   Values on interval: %s' % ' '.join([str(value[1]) for value in list_points]))

					if list_points:
						fn = self.get_math_function(aggregation_method)
						if fn:
							point_value = fn([value[1] for value in list_points])
						else:
							point_value = list_points[len(list_points)-1][1]
						values.append(point_value)

				self.logger.debug('   +   Summary of horizontal aggregation "%s":' % aggregation_method)
				self.logger.debug(values)

				if not consolidation_methods:
					self.storage.update(record.get('_id'), {'output_engine': "No second aggregation function given"  } )
					return

				if len(values) == 0 :
					self.logger.debug('  +  No values')
					self.storage.update(record.get('_id'), {
															'output_engine': "No input values",
															'consolidation_ts':int(time.time())
															})
					self.timestamps[record.get('_id')] = int(time.time())
					return

				list_perf_data = []
				for function_name in consolidation_methods :
					fn = self.get_math_function(function_name)

					if not fn:
						self.logger.debug('No function given for second aggregation')
						self.storage.update(record.get('_id'), {'output_engine': "No function given for second aggregation"})
						return

					if len(values) == 0 :
						if not output_message:
							self.storage.update(record.get('_id'), {'output_engine': "No result"  } )
						else:
							self.storage.update(record.get('_id'), {'output_engine': "there are issues : %s warning : No result" % output_message } )

					value = fn(values)

					self.logger.debug(' + Result of aggregation for "%s": %f' % (function_name,value))

					list_perf_data.append({ 
											'metric' : function_name, 
											'value' : roundSignifiantDigit(value,3), 
											"unit": mUnit, 
											'max': maxSum if function_name == 'sum' else mMax, 
											'min': mMin, 
											'type': 'GAUGE' } ) 

				point_timestamp = int(time.time()) - current_interval/2

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=point_timestamp,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % record.get('crecord_name','No name'),
					long_output="",
					perf_data=None,
					perf_data_array=list_perf_data,
					display_name=record['crecord_name']
				)	
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.logger.debug('The following event was sent:')
				self.logger.debug(event)

				if not output_message:
					engine_output = '%s : Computation done. Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),str(aggregation_interval))
					self.storage.update(record.get('_id'),{'output_engine':engine_output} )
				else:
					engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),output_message,str(aggregation_interval))
					self.storage.update(record.get('_id'), {'output_engine': engine_output} )

				self.storage.update(record.get('_id'), {'consolidation_ts':int(time.time())})
				self.timestamps[record.get('_id')] = int(time.time())
		
		self.counter_worktime += time.time() - beat_start