Пример #1
0
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
Пример #2
0
    def beat(self):
        beat_start = time.time()

        self.clean_consolidations()

        non_loaded_records = self.storage.find(
            {
                '$and': [{
                    'crecord_type': 'consolidation'
                }, {
                    'enable': True
                }, {
                    'loaded': {
                        '$ne': True
                    }
                }]
            },
            namespace="object")

        if len(non_loaded_records) > 0:
            for item in non_loaded_records:
                self.logger.info("New consolidation found '%s', load" %
                                 item.name)
                self.load(item)

        for _id in self.records.keys():
            exists = self.storage.find_one({'_id': _id})
            if not exists:
                self.logger.info("%s deleted, remove from record list" %
                                 self.records[_id]['crecord_name'])
                del (self.records[_id])

        for record in self.records.values():
            consolidation_last_timestamp = self.timestamps[record.get('_id')]

            aggregation_interval = record.get('aggregation_interval',
                                              self.default_interval)
            current_interval = int(time.time()) - consolidation_last_timestamp

            self.logger.debug(
                'current interval: %s , consolidation interval: %s' %
                (current_interval, aggregation_interval))
            if current_interval >= aggregation_interval:
                self.logger.debug('Compute new consolidation for: %s' %
                                  record.get('crecord_name', 'No name found'))

                output_message = None
                mfilter = json.loads(record.get('mfilter'))
                mfilter = {
                    '$and': [mfilter, {
                        'me': {
                            '$nin': internal_metrics
                        }
                    }]
                }
                #self.logger.debug('the mongo filter is: %s' % mfilter)
                metric_list = self.manager.store.find(mfilter=mfilter)
                self.logger.debug('length of matching metric list is: %i' %
                                  metric_list.count())

                aggregation_method = record.get('aggregation_method', False)
                consolidation_methods = record.get('consolidation_method',
                                                   False)

                if not isinstance(consolidation_methods, list):
                    consolidation_methods = [consolidation_methods]

                mType = mUnit = mMin = mMax = None
                values = []

                for index, metric in enumerate(metric_list):
                    if index == 0:
                        #mType = metric.get('t')
                        mMin = metric.get('mi')
                        mMax = metric.get('ma')
                        mUnit = metric.get('u')
                        if 'sum' in consolidation_methods:
                            maxSum = mMax
                    else:
                        if metric.get('mi') < mMin:
                            mMin = metric.get('mi')
                        if metric.get('ma') > mMax:
                            mMax = metric.get('ma')
                        if 'sum' in consolidation_methods:
                            maxSum += metric.get('ma')
                        if metric.get('u') != mUnit:
                            output_message = "warning : too many units"

                    self.logger.debug(' + Get points for: %s , %s , %s, %s' %
                                      (metric.get('_id'), metric.get('co'),
                                       metric.get('re', ''), metric.get('me')))

                    if int(
                            time.time()
                    ) - aggregation_interval <= consolidation_last_timestamp + 60:
                        tstart = consolidation_last_timestamp
                        #self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
                    else:
                        tstart = int(time.time()) - aggregation_interval
                        #self.logger.debug('   +   new tstart: %i' % tstart)

                    self.logger.debug(
                        '   +   from: %s to %s' %
                        (datetime.fromtimestamp(tstart).strftime(
                            '%Y-%m-%d %H:%M:%S'),
                         datetime.fromtimestamp(
                             time.time()).strftime('%Y-%m-%d %H:%M:%S')))

                    list_points = self.manager.get_points(
                        tstart=tstart,
                        tstop=time.time(),
                        _id=metric.get('_id'))
                    self.logger.debug(
                        '   +   Values on interval: %s' %
                        ' '.join([str(value[1]) for value in list_points]))

                    if list_points:
                        fn = self.get_math_function(aggregation_method)
                        if fn:
                            point_value = fn(
                                [value[1] for value in list_points])
                        else:
                            point_value = list_points[len(list_points) - 1][1]
                        values.append(point_value)

                self.logger.debug(
                    '   +   Summary of horizontal aggregation "%s":' %
                    aggregation_method)
                self.logger.debug(values)

                if not consolidation_methods:
                    self.storage.update(record.get('_id'), {
                        'output_engine':
                        "No second aggregation function given"
                    })
                    return

                if len(values) == 0:
                    self.logger.debug('  +  No values')
                    self.storage.update(
                        record.get('_id'), {
                            'output_engine': "No input values",
                            'consolidation_ts': int(time.time())
                        })
                    self.timestamps[record.get('_id')] = int(time.time())
                    return

                list_perf_data = []
                for function_name in consolidation_methods:
                    fn = self.get_math_function(function_name)

                    if not fn:
                        self.logger.debug(
                            'No function given for second aggregation')
                        self.storage.update(
                            record.get('_id'), {
                                'output_engine':
                                "No function given for second aggregation"
                            })
                        return

                    if len(values) == 0:
                        if not output_message:
                            self.storage.update(record.get('_id'),
                                                {'output_engine': "No result"})
                        else:
                            self.storage.update(
                                record.get('_id'), {
                                    'output_engine':
                                    "there are issues : %s warning : No result"
                                    % output_message
                                })

                    value = fn(values)

                    self.logger.debug(' + Result of aggregation for "%s": %f' %
                                      (function_name, value))

                    list_perf_data.append({
                        'metric':
                        function_name,
                        'value':
                        roundSignifiantDigit(value, 3),
                        "unit":
                        mUnit,
                        'max':
                        maxSum if function_name == 'sum' else mMax,
                        'min':
                        mMin,
                        'type':
                        'GAUGE'
                    })

                point_timestamp = int(time.time()) - current_interval / 2

                event = cevent.forger(
                    connector="consolidation",
                    connector_name="engine",
                    event_type="consolidation",
                    source_type="resource",
                    component=record['component'],
                    resource=record['resource'],
                    state=0,
                    timestamp=point_timestamp,
                    state_type=1,
                    output="Consolidation: '%s' successfully computed" %
                    record.get('crecord_name', 'No name'),
                    long_output="",
                    perf_data=None,
                    perf_data_array=list_perf_data,
                    display_name=record['crecord_name'])
                rk = cevent.get_routingkey(event)
                self.counter_event += 1
                self.amqp.publish(event, rk, self.amqp.exchange_name_events)

                self.logger.debug('The following event was sent:')
                self.logger.debug(event)

                if not output_message:
                    engine_output = '%s : Computation done. Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})
                else:
                    engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        output_message, str(aggregation_interval))
                    self.storage.update(record.get('_id'),
                                        {'output_engine': engine_output})

                self.storage.update(record.get('_id'),
                                    {'consolidation_ts': int(time.time())})
                self.timestamps[record.get('_id')] = int(time.time())

        self.counter_worktime += time.time() - beat_start
Пример #3
0
	def beat(self):
		self.logger.debug("Consolidate metrics:")

		now = time.time()
		beat_elapsed = 0

		self.load_consolidation()

		for record in self.records.values():
			
			#self.logger.debug("Raw: %s" % record)

			_id = record.get('_id')
			name = record.get('crecord_name')

			aggregation_interval = record.get('aggregation_interval')

			self.logger.debug("'%s':" % name)
			self.logger.debug(" + interval: %s" % aggregation_interval)

			last_run = record.get('consolidation_ts', now)

			elapsed = now - last_run

			self.logger.debug(" + elapsed: %s" % elapsed)

			if elapsed == 0 or elapsed >= aggregation_interval:
				self.logger.debug("Step 1: Select metrics")

				mfilter = json.loads(record.get('mfilter'))
				self.logger.debug(' + mfilter: %s' % mfilter)

				# Exclude internal metrics
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}

				metric_list = self.manager.store.find(mfilter=mfilter)

				self.logger.debug(" + %s metrics found" % metric_list.count())

				if not metric_list.count():
					self.storage.update(_id, { 'output_engine': "No metrics, check your filter" })
					continue

				aggregation_method = record.get('aggregation_method')
				self.logger.debug(" + aggregation_method: %s" % aggregation_method)

				consolidation_methods = record.get('consolidation_method')
				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ]

				self.logger.debug(" + consolidation_methods: %s" % consolidation_methods)

				mType = mUnit = mMin = mMax = None

				# Get metrics
				metrics = []
				for index, metric in enumerate(metric_list):
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods and mMax:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							self.logger.warning("%s: too many units" % name)
							output_message = "warning : too many units"

					self.logger.debug(' + %s , %s , %s, %s' % (
						metric.get('_id'),
						metric.get('co'),
						metric.get('re',''),
						metric.get('me'))
					)

					metrics.append(metric.get('_id'))

				self.logger.debug(' + mMin: %s' % mMin)
				self.logger.debug(' + mMax: %s' % mMax)
				self.logger.debug(' + mUnit: %s' % mUnit)

				self.logger.debug("Step 2: Aggregate (%s)" % aggregation_method)

				# Set time range
				tstart = last_run

				if elapsed == 0 or last_run < (now - 2 * aggregation_interval):
					tstart = now - aggregation_interval

				self.logger.debug(
					" + From: %s To %s "% 
					(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
					datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
				)

				values = []
				for mid in metrics:
					points = self.manager.get_points(tstart=tstart, tstop=now, _id=mid)
					fn = self.get_math_function(aggregation_method)

					pValues = [point[1] for point in points]

					if not len(pValues):
						continue

					values.append(fn(pValues))

				self.logger.debug(" + %s values" % len(values))

				if not len(values):
					self.storage.update(_id, { 'output_engine': "No values, check your interval" })
					continue

				self.logger.debug("Step 3: Consolidate (%s)" % consolidation_methods)

				perf_data_array = []
				
				for consolidation_method in consolidation_methods:
					fn = self.get_math_function(consolidation_method)
					value = fn(values)

					self.logger.debug(" + %s: %s %s" % (consolidation_method, value, mUnit))

					perf_data_array.append({
						'metric' : consolidation_method,
						'value' : roundSignifiantDigit(value,3),
						"unit": mUnit,
						'max': maxSum if consolidation_method == 'sum' else mMax,
						'min': mMin,
						'type': 'GAUGE'
					}) 

				self.logger.debug("Step 4: Send event")

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=now,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % name,
					long_output="",
					perf_data=None,
					perf_data_array=perf_data_array,
					display_name=name
				)
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.timestamps[_id] = now

				self.logger.debug("Step 5: Update configuration")

				beat_elapsed = time.time() - now

				self.storage.update(_id, {
					'consolidation_ts': int(now),
					'nb_items': len(metrics),
					'output_engine': "Computation done in %.2fs (%s/%s)" % (beat_elapsed, len(values), len(metrics))
				})

			else:
				self.logger.debug("Not the moment")

		if not beat_elapsed:
			beat_elapsed = time.time() - now

		self.counter_worktime += beat_elapsed
Пример #4
0
	def beat(self):
		beat_start = time.time()

		self.clean_consolidations()

		non_loaded_records = self.storage.find({ '$and' : [{ 'crecord_type': 'consolidation' },{'enable': True}, {'loaded': { '$ne' : True} } ] }, namespace="object" )

		if len(non_loaded_records) > 0  :
			for item in non_loaded_records :
				self.logger.info("New consolidation found '%s', load" % item.name)
				self.load(item)

		for _id in self.records.keys() :
			exists = self.storage.find_one({ '_id': _id } )
			if not exists:
				self.logger.info("%s deleted, remove from record list" % self.records[_id]['crecord_name'])
				del(self.records[_id])

		for record in self.records.values():
			consolidation_last_timestamp = self.timestamps[record.get('_id')]

			aggregation_interval = record.get('aggregation_interval', self.default_interval)
			current_interval = int(time.time()) - consolidation_last_timestamp

			self.logger.debug('current interval: %s , consolidation interval: %s' % (current_interval,aggregation_interval))
			if  current_interval >= aggregation_interval:
				self.logger.debug('Compute new consolidation for: %s' % record.get('crecord_name','No name found'))

				output_message = None
				mfilter = json.loads(record.get('mfilter'))
				mfilter = {'$and': [mfilter, {'me': {'$nin':internal_metrics}}]}
				#self.logger.debug('the mongo filter is: %s' % mfilter)
				metric_list = self.manager.store.find(mfilter=mfilter)
				self.logger.debug('length of matching metric list is: %i' % metric_list.count())
				
				aggregation_method = record.get('aggregation_method', False)
				consolidation_methods = record.get('consolidation_method', False)

				if not isinstance(consolidation_methods, list):
					consolidation_methods = [ consolidation_methods ] 

				mType = mUnit = mMin = mMax = None
				values = []

				for index,metric in enumerate(metric_list) :
					if  index == 0 :
						#mType = metric.get('t')
						mMin = metric.get('mi')
						mMax = metric.get('ma')
						mUnit = metric.get('u')
						if 'sum' in consolidation_methods:
							maxSum = mMax
					else:
						if  metric.get('mi') < mMin :
							mMin = metric.get('mi')
						if metric.get('ma') > mMax :
							mMax = metric.get('ma')
						if 'sum' in consolidation_methods:
							maxSum += metric.get('ma')
						if metric.get('u') != mUnit :
							output_message = "warning : too many units"

					self.logger.debug(' + Get points for: %s , %s , %s, %s' % (metric.get('_id'),metric.get('co'),metric.get('re',''),metric.get('me')))

					if int(time.time()) - aggregation_interval <= consolidation_last_timestamp + 60:
						tstart = consolidation_last_timestamp
						#self.logger.debug('   +   Use original tstart: %i' % consolidation_last_timestamp)
					else:
						tstart = int(time.time()) - aggregation_interval
						#self.logger.debug('   +   new tstart: %i' % tstart)

					self.logger.debug(
										'   +   from: %s to %s' % 
										(datetime.fromtimestamp(tstart).strftime('%Y-%m-%d %H:%M:%S'),
										datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
									)

					list_points = self.manager.get_points(tstart=tstart,tstop=time.time(), _id=metric.get('_id'))
					self.logger.debug('   +   Values on interval: %s' % ' '.join([str(value[1]) for value in list_points]))

					if list_points:
						fn = self.get_math_function(aggregation_method)
						if fn:
							point_value = fn([value[1] for value in list_points])
						else:
							point_value = list_points[len(list_points)-1][1]
						values.append(point_value)

				self.logger.debug('   +   Summary of horizontal aggregation "%s":' % aggregation_method)
				self.logger.debug(values)

				if not consolidation_methods:
					self.storage.update(record.get('_id'), {'output_engine': "No second aggregation function given"  } )
					return

				if len(values) == 0 :
					self.logger.debug('  +  No values')
					self.storage.update(record.get('_id'), {
															'output_engine': "No input values",
															'consolidation_ts':int(time.time())
															})
					self.timestamps[record.get('_id')] = int(time.time())
					return

				list_perf_data = []
				for function_name in consolidation_methods :
					fn = self.get_math_function(function_name)

					if not fn:
						self.logger.debug('No function given for second aggregation')
						self.storage.update(record.get('_id'), {'output_engine': "No function given for second aggregation"})
						return

					if len(values) == 0 :
						if not output_message:
							self.storage.update(record.get('_id'), {'output_engine': "No result"  } )
						else:
							self.storage.update(record.get('_id'), {'output_engine': "there are issues : %s warning : No result" % output_message } )

					value = fn(values)

					self.logger.debug(' + Result of aggregation for "%s": %f' % (function_name,value))

					list_perf_data.append({ 
											'metric' : function_name, 
											'value' : roundSignifiantDigit(value,3), 
											"unit": mUnit, 
											'max': maxSum if function_name == 'sum' else mMax, 
											'min': mMin, 
											'type': 'GAUGE' } ) 

				point_timestamp = int(time.time()) - current_interval/2

				event = cevent.forger(
					connector ="consolidation",
					connector_name = "engine",
					event_type = "consolidation",
					source_type = "resource",
					component = record['component'],
					resource=record['resource'],
					state=0,
					timestamp=point_timestamp,
					state_type=1,
					output="Consolidation: '%s' successfully computed" % record.get('crecord_name','No name'),
					long_output="",
					perf_data=None,
					perf_data_array=list_perf_data,
					display_name=record['crecord_name']
				)	
				rk = cevent.get_routingkey(event)
				self.counter_event += 1
				self.amqp.publish(event, rk, self.amqp.exchange_name_events)

				self.logger.debug('The following event was sent:')
				self.logger.debug(event)

				if not output_message:
					engine_output = '%s : Computation done. Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),str(aggregation_interval))
					self.storage.update(record.get('_id'),{'output_engine':engine_output} )
				else:
					engine_output = '%s : Computation done but there are issues : "%s" . Next Computation in %s s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),output_message,str(aggregation_interval))
					self.storage.update(record.get('_id'), {'output_engine': engine_output} )

				self.storage.update(record.get('_id'), {'consolidation_ts':int(time.time())})
				self.timestamps[record.get('_id')] = int(time.time())
		
		self.counter_worktime += time.time() - beat_start