def test_03_Load(self):
		global mynode

		dump1 = mynode.dump()

		metric1 =  mynode.metric_dump(dn='load1')

		del mynode

		mynode = node('nagios.Central.check.service.localhost9', storage=storage)	

		dump2 = mynode.dump()
		metric2 =  mynode.metric_dump(dn='load1')

		del dump1['writetime']
		del dump2['writetime']

		del metric1['writetime']
		del metric2['writetime']

		if dump1 != dump2:
			print "First:"
			print dump1
			print "Second:"
			print dump2
			raise Exception('Invalid load of nodes...')

		if metric1 != metric2:
			print "First:"
			print metric1
			print "Second:"
			print metric2
			raise Exception('Invalid load of metrics ...')
Exemple #2
0
def bench_store(store, interval=60, duration=60*60*24, point_per_dca=None):
	print "Start Bench ..."
	
	msize = store.size()
	
	mynode = node(node_id, storage=store, rotate_plan=rotate_plan, point_per_dca=point_per_dca, dn=[ "BENCH", "FILL" ])

	# 1 value / 5 min = 8928 values/month = 107136 values/year

	bench_start = int(time.time()) - (day*24*60*60)
	timestamp = bench_start
	
	nb = duration / interval
	
	start = time.time()
	for i in range(1,nb+1):
		mynode.metric_push_value(dn='load1', unit=None, value=math.cos((i/50)) * 10 + 15, timestamp=timestamp)
		mynode.metric_push_value(dn='load5', unit=None, value=math.sin((i/50)) * 10 + 15, timestamp=timestamp)

		timestamp += interval

	bench_stop = timestamp

	nb = nb * 2
	elapsed = time.time() - start
	
	print " + WRITE:"
	print "    + %.2f days" % (duration / (60*60*24))
	msize = store.size()
	print "    + %.2f MB (%.2f MB/Year)" % ((msize / 1024.0 / 1024.0), ((msize / float(duration))/ 1024.0 / 1024.0) *  60*60*24*365)
	#nsize = mynode.size()
	#print "    + %.2f MB (%.2f MB/Year)" % ((nsize / 1024.0 / 1024.0), ((nsize / float(duration))/ 1024.0 / 1024.0) *  60*60*24*365)
	#print "    + Delta: %s B" % (msize - nsize)
	print "    + %s values in %s seconds" % ( nb, elapsed)
	print "    + %s values per second" % (int(nb/elapsed))
	print ""

	start = time.time()
	mynode = node('nagios.Central.check.service.localhost', storage=store)
	print "Get values between %s and %s" % (bench_start, bench_stop)
	values = mynode.metric_get_values(dn='load1', tstart=bench_start, tstop=bench_stop)
	nb = len(values)
	elapsed = time.time() - start
	print " + READ:"
	print "    + %s values in %s seconds" % ( nb, elapsed)
	print "    + %s values per second" % (int(nb/elapsed))
	print ""
	def test_01_Init(self):
		global mynode, storage, timestamp
		#storage = filestore(base_path="/tmp/")
		storage = memstore()
		_id = 'nagios.Central.check.service.localhost9'
		mynode = node(_id=_id, dn=_id, point_per_dca=100, storage=storage)

		timestamp = 1
Exemple #4
0
	def test_5_Check_amqp2mongodb_perfstore(self):
		mynode = node(rk, storage=perfstore)
		mynode.pretty_print()

		values = mynode.metric_get_values(dn='mymetric', tstart=int(time.time() - 10), tstop=int(time.time()))
		
		if len(values) != 2:
			raise Exception("Perfsore don't work ...")
			
		if values[1][1] != 1:
			raise Exception("Perfsore don't work ...")		
Exemple #5
0
	def get_states(self, nodeId, metric, start, stop):
		#Load perfstore Node
		mynode = node(_id=nodeId, storage=self.perfstorage)
		
		# Get Values
		points = mynode.metric_get_values(
			dn=metric,
			tstart=start,
			tstop=stop,
			aggregate=False
		)
		return points
Exemple #6
0
	def to_perfstore(self, _id, perf_data, timestamp, dn=None):
		
		if isinstance(perf_data, list):
	
			try:
				mynode = node(	_id=_id,
								dn=dn,
								storage=self.storage,
								point_per_dca=self.point_per_dca,
								rotate_plan=self.rotate_plan,
								logging_level=self.logging_level)
								
			except Exception, err:
				raise Exception("Imposible to init node: %s (%s)" % (_id, err))

			#[ {'min': 0.0, 'metric': u'rta', 'value': 0.097, 'warn': 100.0, 'crit': 500.0, 'unit': u'ms'}, {'min': 0.0, 'metric': u'pl', 'value': 0.0, 'warn': 20.0, 'crit': 60.0, 'unit': u'%'} ]

			for perf in perf_data:
				
				dn = perf['metric']
				value = perf['value']
				
				dtype = perf.get('type', None)
				unit = perf.get('unit', None)
				
				if unit:
					unit = str(unit)
					
				vmin =	perf.get('min', None)
				vmax =	perf.get('max', None)
				vwarn =	perf.get('warn', None)
				vcrit =	perf.get('crit', None)
				retention =	perf.get('retention', None)

				if vmin:
					vmin = Str2Number(vmin)
				if vmax:
					vmax = Str2Number(vmax)
				if vwarn:
					vwarn = Str2Number(vwarn)
				if vcrit:
					vcrit = Str2Number(vcrit)

				value = Str2Number(value)
					
				self.logger.debug(" + Put metric '%s' (%s %s (%s)) for ts %s ..." % (dn, value, unit, dtype, timestamp))

				try:
					mynode.metric_push_value(dn=dn, unit=unit, value=value, timestamp=timestamp, dtype=dtype, min_value=vmin, max_value=vmax, thld_warn_value=vwarn, thld_crit_value=vcrit)
				except Exception, err:
					self.logger.warning('Impossible to put value in perfstore (%s) (metric=%s, unit=%s, value=%s)', err, dn, unit, value)
Exemple #7
0
def perfstore_getMetric(_id):

	logger.error("GET metrics of '%s'" % _id)

	mynode = node(_id, storage=perfstore)

	metrics = mynode.metric_get_all_dn()
	
	output = []
	if metrics:
		for metric in metrics:
			output.append({'metric': metric,'node':_id })
	
	output = {'total': len(output), 'success': True, 'data': output}
	
	return output
Exemple #8
0
def perfstore_get_last_value(_id, metrics):
	output=[]
	logger.debug(" + node:      %s" % _id)
	logger.debug(" + metrics:   %s" % metrics)
		
	mynode = node(_id, storage=perfstore)
	
	if metrics:
		if (metrics[0] == "<all>"):
			metrics = mynode.metric_get_all_dn()
			logger.debug(" + metrics:   %s" % metrics)

		for dn in metrics:
			metric = mynode.metric_get(dn=dn)
			value = metric.last_point
			value[0] = value[0] * 1000
			
			output.append({'node': _id, 'metric': dn, 'values': [value], 'bunit': metric.bunit, 'min': metric.min_value, 'max': metric.max_value, 'thld_warn': metric.thld_warn_value, 'thld_crit': metric.thld_crit_value})
	
	return output
Exemple #9
0
	def calcul_time_by_state(self, _id, config):
		rk = config['rk']
		
		self.logger.debug("Calcul time by state")
		self.logger.debug(" + Get States of %s (%s)" % (_id, rk))
			
		sla_timewindow = config.get('sla_timewindow', self.default_sla_timewindow)
			
		sla_interval   = config.get('sla_interval', sla_timewindow)
		sla_lastcalcul = config.get('sla_lastcalcul', int(time.time() - sla_interval))
			
		stop = int(time.time())
		start = sla_lastcalcul
		
		self.logger.debug(" + sla_lastcalcul: %s" % sla_lastcalcul)
		self.logger.debug(" + start:          %s" % start)
		self.logger.debug(" + stop:           %s" % stop)
			
		points = self.get_states(rk, "cps_state", start, stop)
			
		if len(points) >= 2:
				
			first_point = points[0]
			last_point = points[len(points)-1]				
				
			# Get the first state (initial) of serie
			if start == first_point[0]:
				(last_state, state_type, extra) = self.split_state(first_point[1])
				self.logger.debug(" + Set last state to %s (initial)" % last_state)
					
				# Remove first point
				del points[0]
			else:
				last_state = 0
				start = first_point[0]
				self.logger.debug(" + Set last state to default: %s (initial)" % last_state)
				self.logger.debug(" + New start:                 %s" % start)
					
			# Calcul each state's time for period start -> stop
			self.logger.debug(" + Parse Points:")
			states_sum = states.copy()
			total = 0
			last_timestamp = start
			for point in points:
				timestamp = point[0]
				value = point[1]
					
				try:
					(state, state_type, extra) = self.split_state(value)
						
					interval = timestamp - last_timestamp
					states_sum[last_state] += interval
					total += interval
										
					self.logger.debug("   + %s: interval (%s): state: %s, state_type: %s, extra: %s, last_state: %s" % (timestamp, interval, state, state_type, extra, last_state))
						
					last_state = state
					last_timestamp = timestamp
						
				except Exception, err:
					self.logger.error("Error in parsing: %s" % err)
				
			self.logger.debug(" + Total: %s" % total)	
			self.logger.debug(" + States: %s" % states_sum)
				
			# Set last point timestamp
			self.logger.debug(" + Set sla_lastcalcul to: %s" % last_point[0])
			last_timestamp = last_point[0]
			
			# Store result in perfstore
			# Don't submit Canopsis event because data it's just for next calcul
			rk  = self.get_rk(config['name'])
			slanode = node( _id=rk,
							 dn=[ config['name'], "sla" ],
							 storage=self.perfstorage,
							 point_per_dca=300,
							 rotate_plan={'PLAIN': 0, 'TSC': 3,}
			)
			
			for state in states_sum:
				#perf_data_array.append({"metric": 'cps_time_by_state_%s' % state, "value": states_sum[state], "unit": "s"})
				slanode.metric_push_value(	dn = 'cps_time_by_state_%s' % state,
											unit = "s",
											value = states_sum[state],
											timestamp = last_timestamp
				)
				
			self.storage.update(_id, {'sla_lastcalcul': last_timestamp, 'sla_node_id': slanode._id})
			return slanode
Exemple #10
0
def clean():
		storage.remove(rk)
		records = storage.find({'rk': rk}, namespace='events_log')
		storage.remove(records, namespace='events_log')
		node(rk, storage=perfstore).remove()
Exemple #11
0
	msize = store.size()
	print "    + %.2f MB (%.2f MB/Year)" % ((msize / 1024.0 / 1024.0), ((msize / float(duration))/ 1024.0 / 1024.0) *  60*60*24*365)
	#nsize = mynode.size()
	#print "    + %.2f MB (%.2f MB/Year)" % ((nsize / 1024.0 / 1024.0), ((nsize / float(duration))/ 1024.0 / 1024.0) *  60*60*24*365)
	#print "    + Delta: %s B" % (msize - nsize)
	print "    + %s values in %s seconds" % ( nb, elapsed)
	print "    + %s values per second" % (int(nb/elapsed))
	print ""

	start = time.time()
	mynode = node('nagios.Central.check.service.localhost', storage=store)
	print "Get values between %s and %s" % (bench_start, bench_stop)
	values = mynode.metric_get_values(dn='load1', tstart=bench_start, tstop=bench_stop)
	nb = len(values)
	elapsed = time.time() - start
	print " + READ:"
	print "    + %s values in %s seconds" % ( nb, elapsed)
	print "    + %s values per second" % (int(nb/elapsed))
	print ""


print "Mongo Store"
storage = mongostore(mongo_safe=False)
mynode = node(node_id, storage=storage)
mynode.remove()

bench_store(	storage,
				interval=interval,
				duration=60*60*24*day,
				point_per_dca=point_per_dca)
Exemple #12
0
def perfstore_node_get(_id):
	mynode = node(_id, storage=perfstore)

	output = [ mynode.dump() ]

	return {'total': len(output), 'success': True, 'data': output}
Exemple #13
0
def perfstore_get_values(_id, metrics, start=None, stop=None, time_interval=None, aggregate_method=None,use_window_ts=None):
	
	if start and not stop:
		stop = start
	
	if stop:
		stop = int(int(stop) / 1000)
	else:
		stop = int(time.time())
		
	if start:
		start = int(int(start) / 1000)
	else:
		start = stop - 86400

	if time_interval:
		time_interval = int(time_interval)
		
	max_points = pyperfstore_aggregate_maxpoints
		
	if not aggregate_method:
		aggregate_method = pyperfstore_aggregate_method
	
	logger.debug(" + node:      %s" % _id)
	logger.debug(" + metrics:   %s" % metrics)
	logger.debug(" + start:     %s" % start)
	logger.debug(" + stop:      %s" % stop)
	logger.debug('Aggregate:')
	logger.debug(' + max_points : %s' % max_points)
	logger.debug(' + interval : %s' % time_interval)
	
	if (time_interval):
		start -= start % time_interval
		stop -= stop % time_interval
		max_points = int( round((stop - start) / time_interval + 0.5) )
	
	mynode = node(_id=_id, storage=perfstore)
	
	output=[]
	
	if metrics:
		if (metrics[0] == "<all>"):
			metrics = mynode.metric_get_all_dn()
			logger.debug(" + metrics:   %s" % metrics)



		for dn in metrics:
			try:
				values = mynode.metric_get_values(
					dn=dn,
					tstart=start,
					tstop=stop,
					aggregate=pyperfstore_aggregate,
					atype=aggregate_method,
					max_points=max_points,
					time_interval=time_interval
					)
					
				values = [[x[0] * 1000, x[1]] for x in values]

				if len(values) >= 1:
					metric = mynode.metric_get(dn=dn)
					bunit = metric.bunit
					output.append({'node': _id, 'metric': dn, 'values': values, 'bunit': bunit, 'min': metric.min_value, 'max': metric.max_value, 'thld_warn': metric.thld_warn_value, 'thld_crit': metric.thld_crit_value})
						
			except Exception, err:
				logger.error(err)