class PerfdataModule(MigrationModule): def __init__(self, *args, **kwargs): super(PerfdataModule, self).__init__(*args, **kwargs) self.manager = PerfData() def init(self): pass def update(self): if self.get_version('perfdata') < 1: self.logger.info('Migrating to version 1') self.update_to_version_1() self.set_version('perfdata', 1) def update_to_version_1(self): storage = self.manager[PerfData.PERFDATA_STORAGE] nan = float('nan') oneweek = 3600 * 24 * 7 for document in storage.find_elements(): metric_id = document['i'] values = document['v'] t = document['t'] points = list( (t + int(ts), nan if values[ts] is None else values[ts]) for ts in values ) rightvalues = { key: values[key] for key in values if int(key) < oneweek } document['v'] = rightvalues storage.put_element( element=document, cache=False ) self.manager.put(metric_id=metric_id, points=points, cache=False)
class PerfDataTest(TestCase): def setUp(self): self.perfdata = PerfData(data_scope='test') def test_put_get_data(self): timewindow = TimeWindow() metric_id = 'test_manager' self.perfdata.remove(metric_id=metric_id, with_meta=True) count = self.perfdata.count(metric_id=metric_id) self.assertEqual(count, 0) tv0 = (int(timewindow.start()), None) tv1 = (int(timewindow.start() + 1), 0) tv2 = (int(timewindow.stop()), 2) tv3 = (int(timewindow.stop() + 1000000), 3) # set values with timestamp without order points = [tv0, tv2, tv1, tv3] meta = {'plop': None} self.perfdata.put( metric_id=metric_id, points=points, meta=meta ) data, _meta = self.perfdata.get( metric_id=metric_id, timewindow=timewindow, with_meta=True ) self.assertEqual(meta, _meta[0][PerfData.META_VALUE]) self.assertEqual([tv0, tv1, tv2], data) # remove 1 data at stop point _timewindow = get_offset_timewindow(timewindow.stop()) self.perfdata.remove( metric_id=metric_id, timewindow=_timewindow ) data, _meta = self.perfdata.get( metric_id=metric_id, timewindow=timewindow, with_meta=True ) self.assertEqual(meta, _meta[0][PerfData.META_VALUE]) self.assertEqual(data, [tv0, tv1]) # get data on timewindow data, _meta = self.perfdata.get( metric_id=metric_id, timewindow=timewindow, with_meta=True ) self.assertEqual(meta, _meta[0][PerfData.META_VALUE]) # get all data data, _meta = self.perfdata.get( metric_id=metric_id, with_meta=True ) self.assertEqual(meta, _meta[0][PerfData.META_VALUE]) self.assertEqual(len(data), 3) # remove all data self.perfdata.remove( metric_id=metric_id, with_meta=True ) data, _meta = self.perfdata.get( metric_id=metric_id, with_meta=True ) self.assertIsNone(_meta) self.assertEqual(len(data), 0)
class PerfdataModule(MigrationModule): def __init__(self, *args, **kwargs): super(PerfdataModule, self).__init__(*args, **kwargs) self.manager = PerfData() def init(self): pass def update_mongo2influxdb(self): """Convert mongo data to influxdb data.""" mongostorage = MongoStorage(table='periodic_perfdata') count = mongostorage._backend.count() if count: # if data have to be deleted for document in mongostorage._backend.find(): data_id = document['i'] timestamp = int(document['t']) values = document['v'] points = [(timestamp + int(ts), values[ts]) for ts in values] perfdata.put(metric_id=data_id, points=points) mongostorage.remove_elements(ids=document['_id']) def update(self): # FIXME if self.get_version('perfdata') < 1 and False: self.logger.info(u'Migrating to version 1') self.update_to_version_1() self.set_version('perfdata', 1) if self.get_version('perfdata') < 2: self.logger.info(u'Migrating to version 2') self.update_mongo2influxdb() self.set_version('perfdata', 2) def update_to_version_1(self): storage = self.manager[PerfData.PERFDATA_STORAGE] nan = float('nan') oneweek = 3600 * 24 * 7 for document in storage.find_elements(): metric_id = document['i'] values = document['v'] t = document['t'] points = list( (t + int(ts), nan if values[ts] is None else values[ts]) for ts in values ) rightvalues = { key: values[key] for key in values if int(key) < oneweek } document['v'] = rightvalues storage.put_element( element=document, cache=False ) self.manager.put(metric_id=metric_id, points=points, cache=False)
class engine(Engine): etype = 'consolidation' def __init__(self, *args, **kargs): super(engine, self).__init__(*args, **kargs) self.storage = get_storage( namespace='events', account=Account( user="******", group="root" ) ) self.manager = PerfData() self.perf_data = PerfDataUtils() def pre_run(self): self.storage = get_storage( namespace='object', account=Account(user="******", group="root") ) self.manager = PerfData() def fetch(self, serie, _from, _to): self.logger.debug("*Start fetch*") t_serie = serie.copy() timewindow = {'start': _from, 'stop': _to, 'timezone': gmtime()} if (len(t_serie['metrics']) > 1 and t_serie['aggregate_method'].lower() == 'none'): self.logger.debug( 'More than one metric in serie, performing an aggregation' ) self.logger.debug('serie:'.format(t_serie)) self.logger.debug('aggregation: average - 60s') t_serie['aggregate_method'] = 'average' t_serie['aggregate_interval'] = 60 if t_serie['aggregate_method'].lower() == 'none': self.logger.debug('serie:'.format(t_serie)) timeserie = {'aggregation': 'NONE'} results = self.perf_data.perfdata( metric_id=t_serie['metrics'], timewindow=timewindow, timeserie=timeserie ) else: self.logger.debug('serie:', t_serie) timeserie = { 'aggregation': t_serie['aggregate_method'], 'period': {'second': t_serie['aggregate_interval']} } results = self.perf_data.perfdata( metric_id=t_serie['metrics'], timewindow=timewindow, timeserie=timeserie ) formula = t_serie['formula'] finalserie = self.metric_raw(results, formula) self.logger.debug("*End fetch*") return finalserie def metric_raw(self, results, formula): #nmetric = results[1] metrics, _ = results # Build points dictionnary points = {} length = False for m in metrics: cid = m['meta']['data_id'] mid = 'metric_' + hashlib.md5(cid).hexdigest() mname = self.retreive_metric_name(cid) # Replace metric name in formula by the unique id formula = formula.replace(mname, mid) self.logger.debug("Metric {0} - {1}".format(mname, mid)) points[mid] = m['points'] # Make sure we treat the same amount of points by selecting # The metric with less points. if not length or len(m['points']) < length: length = len(m['points']) self.logger.debug('formula: {}'.format(formula)) #self.logger.debug('points: {}'.format(points)) mids = points.keys() finalSerie = [] # Now loop over all points to calculate the final serie for i in range(length): data = {} ts = 0 for j in range(len(mids)): mid = mids[j] # Get point value at timestamp "i" for metric "mid" data[mid] = points[mid][i][1] # Set timestamp ts = points[mid][i][0] # import data in math context math = Formulas(data) # Evaluate the mathematic formula pointval = math.evaluate(formula) # Add computed point in the serie finalSerie.append([ts * 1000, pointval]) # Remove variables values from math context math.reset() self.logger.debug('finalserie: {}'.format(finalSerie)) return finalSerie, points[mid] def retreive_metric_name(self, name): ''' This method allow to slice data from an existing one. TODO: improve this method with the Context ID. ''' if name is None: return None li = name.split('/') for i in range(4): li.pop(0) name = '/'+'/'.join(li) return name def consume_dispatcher(self, event, *args, **kargs): self.logger.debug("Start metrics consolidation") t_serie = event.copy() self.logger.debug('\n\n\n\n----->serie: {}'.format(t_serie)) if not t_serie: # Show error message self.logger.error('No record found.') # Test Settings _from = 1425394522 _to = 1425402296 perf_data_array = [] _, points = self.fetch(t_serie, _from, _to) # This method allow us to update an metric or a list of metrics self.manager.put(metric_id=t_serie['_id'], points=points) # Publish the consolidation metrics # metric_name = 'metric_name' # Change the value with UI data for t, v in points: #c_event['timestamp'] = t perf_data_array.append( { 'metric': t_serie['_id'], 'value': v, 'unit': t_serie['_id'], 'min': None, 'max': None, 'warn': None, 'crit': None, 'type': 'GAUGE' } ) conso_event = forger( timestamp=t, component='conso', connector='Engine', connector_name='consolidation', event_type='perf', source_type='component', perf_data_array=perf_data_array ) self.logger.debug('Publishing {}'.format(conso_event)) publish(publisher=self.amqp, event=conso_event) perf_data_array = [] # reset the perf_data_array data # Update crecords informations event_id = t_serie['_id'] self.crecord_task_complete(event_id)
class engine(Engine): etype = 'perfdata' def __init__(self, *args, **kargs): super(engine, self).__init__(*args, **kargs) self.perfdata = PerfData() def work(self, event, *args, **kargs): # Get perfdata perf_data = event.get('perf_data') perf_data_array = event.get('perf_data_array', []) if perf_data_array is None: perf_data_array = [] # Parse perfdata if perf_data: self.logger.debug(' + perf_data: {0}'.format(perf_data)) try: perf_data_array += self.perfdata.parse_perfdata(perf_data) except Exception as err: self.logger.error( "Impossible to parse perfdata from: {0} ({1})" .format(event, err) ) self.logger.debug(' + perf_data_array: {0}'.format(perf_data_array)) # Add status informations event_type = event.get('event_type') handled_event_types = ['check', 'selector', 'sla'] if event_type is not None and event_type in handled_event_types: self.logger.debug('Add status informations') state = int(event.get('state', 0)) state_type = int(event.get('state_type', 0)) state_extra = 0 # Multiplex state cps_state = state * 100 + state_type * 10 + state_extra perf_data_array.append( { "metric": "cps_state", "value": cps_state } ) event['perf_data_array'] = perf_data_array # remove perf_data_keys where values are None for index, perf_data in enumerate(event['perf_data_array']): perf_data_array_with_Nones = event['perf_data_array'][index] event['perf_data_array'][index] = { name: perf_data_array_with_Nones[name] for name in perf_data_array_with_Nones if perf_data_array_with_Nones[name] is not None } self.logger.debug('perf_data_array: {0}'.format(perf_data_array)) event = deepcopy(event) # Metrology timestamp = event['timestamp'] if 'timestamp' in event else None if timestamp is not None: perf_data_array = event.get('perf_data_array', []) for perf_data in perf_data_array: perf_data = perf_data.copy() event_with_metric = deepcopy(event) event_with_metric['type'] = 'metric' event_with_metric[Context.NAME] = perf_data.pop('metric') metric_id = self.perfdata.context.get_entity_id( event_with_metric ) value = perf_data.pop('value', None) self.perfdata.put( metric_id=metric_id, points=[(timestamp, value)], meta=perf_data, cache=True ) return event