def process(metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.preRules: metric = rule.apply(metric) aggregate_metrics = [] if settings.AGGREGATOR_RULE_METHOD == "rules": for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.append(aggregate_metric) buffer = BufferManager.get_buffer(aggregate_metric) if not buffer.configured: buffer.configure_aggregation(rule.frequency, rule.aggregation_func) buffer.input(datapoint) # Custom rule to sum metrics elif settings.AGGREGATOR_RULE_METHOD == "sumall": sum_index = metric.find(".sum.") metric_namespace = metric[:metric.find(".")] nsDict = settings["aggregation-sumall-rules"] if sum_index != -1: aggregate_metric = metric[:sum_index] + ".sum_all.hosts" aggregate_metrics.append(aggregate_metric) buffer = BufferManager.get_buffer(aggregate_metric) aggregate_time_interval = 60 if metric_namespace in nsDict: aggregate_time_interval = int(nsDict[metric_namespace]) if not buffer.configured: buffer.configure_aggregation(aggregate_time_interval, sum) buffer.input(datapoint) for rule in RewriteRuleManager.postRules: metric = rule.apply(metric) if settings['FORWARD_ALL'] and metric not in aggregate_metrics: #log.msg("Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric) events.metricGenerated(metric, datapoint)
def process(self, metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.rules(PRE): metric = rule.apply(metric) aggregate_metrics = set() for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.add(aggregate_metric) values_buffer = BufferManager.get_buffer(aggregate_metric) if not values_buffer.configured: values_buffer.configure_aggregation(rule.frequency, rule.aggregation_func) values_buffer.input(datapoint) for rule in RewriteRuleManager.rules(POST): metric = rule.apply(metric) if metric not in aggregate_metrics: if settings.LOG_AGGREGATOR_MISSES and len(aggregate_metrics) == 0: log.msg( "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric) yield (metric, datapoint)
def _bench_aggregator(name): print "== %s ==" % name max_intervals = settings['MAX_AGGREGATION_INTERVALS'] now = time.time() - (max_intervals * FREQUENCY) buf = None for n in [1, 1000, 10000, 100000, 1000000, 10000000]: count = 0 processor = AggregationProcessor() processor.process(METRIC, (now, 1)) def _process(): processor.process(METRIC, (now + _process.i, 1)) if (_process.i % FREQUENCY) == 0 and buf is not None: buf.compute_values() _process.i += 1 _process.i = 0 if buf is None: buf = BufferManager.get_buffer(METRIC_AGGR, 1, None) t = timeit.timeit(_process, number=n) buf.close() print_stats(n, t) print ""
def process(self, metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.rules(PRE): metric = rule.apply(metric) aggregate_metrics = set() for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.add(aggregate_metric) values_buffer = BufferManager.get_buffer(aggregate_metric) if not values_buffer.configured: values_buffer.configure_aggregation(rule.frequency, rule.aggregation_func) values_buffer.input(datapoint) for rule in RewriteRuleManager.rules(POST): metric = rule.apply(metric) if metric not in aggregate_metrics: yield (metric, datapoint)
def process(self, metric, datapoint): increment('datapointsReceived') aggregate_metrics = set() for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.add(aggregate_metric) values_buffer = BufferManager.get_buffer(aggregate_metric) if not values_buffer.configured: values_buffer.configure_aggregation(rule.frequency, rule.aggregation_func) values_buffer.input(datapoint) if settings.FORWARD_ALL and metric not in aggregate_metrics: if settings.LOG_AGGREGATOR_MISSES and len(aggregate_metrics) == 0: log.msg( "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric) yield (metric, datapoint)
def process(metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.preRules: metric = rule.apply(metric) aggregate_metrics = [] for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.append(aggregate_metric) buffer = BufferManager.get_buffer(aggregate_metric) if not buffer.configured: buffer.configure_aggregation(rule.frequency, rule.aggregation_func) buffer.input(datapoint) for rule in RewriteRuleManager.postRules: metric = rule.apply(metric) if metric not in aggregate_metrics: events.metricGenerated(metric, datapoint)
def _bench_aggregator(name): print("== %s ==" % name) max_intervals = settings['MAX_AGGREGATION_INTERVALS'] now = time.time() - (max_intervals * FREQUENCY) buf = None for n in [1, 1000, 10000, 100000, 1000000, 10000000]: processor = AggregationProcessor() processor.process(METRIC, (now, 1)) def _process(): processor.process(METRIC, (now + _process.i, 1)) if (_process.i % FREQUENCY) == 0 and buf is not None: buf.compute_values() _process.i += 1 _process.i = 0 if buf is None: buf = BufferManager.get_buffer(METRIC_AGGR, 1, None) t = timeit.timeit(_process, number=n) buf.close() print_stats(n, t) print("")
def process(metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.preRules: metric = rule.apply(metric) aggregate_metrics = [] for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.append(aggregate_metric) buffer = BufferManager.get_buffer(aggregate_metric) if not buffer.configured: buffer.configure_aggregation(rule.frequency, rule.aggregation_func) buffer.input(datapoint) for rule in RewriteRuleManager.postRules: metric = rule.apply(metric) if metric not in aggregate_metrics: events.metricGenerated(metric, datapoint) if len(aggregate_metrics) == 0: log.msg("Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
def process(metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.preRules: metric = rule.apply(metric) aggregate_metrics = [] for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue else: aggregate_metrics.append(aggregate_metric) buffer = BufferManager.get_buffer(aggregate_metric) if not buffer.configured: buffer.configure_aggregation(rule.frequency, rule.aggregation_func) buffer.input(datapoint) for rule in RewriteRuleManager.postRules: metric = rule.apply(metric) if metric not in aggregate_metrics: events.metricGenerated(metric, datapoint) if len(aggregate_metrics) == 0: log.msg( "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
def test_new_buffer_configured(self): RuleManager.rules = [self.sample_aggregation_rule] list(self.processor.process('carbon.foo', (0, 0))) values_buffer = BufferManager.get_buffer('carbon.foo.sum') self.assertTrue(values_buffer.configured) self.assertEqual(1, values_buffer.aggregation_frequency) self.assertEqual(sum, values_buffer.aggregation_func)
def test_new_buffer_configured(self): RuleManager.rules = [self.sample_aggregation_rule] list(self.processor.process('carbon.foo', (0, 0))) values_buffer = BufferManager.get_buffer('carbon.foo.sum') self.assertTrue(values_buffer.configured) self.assertEqual(1, values_buffer.aggregation_frequency) self.assertEqual(sum, values_buffer.aggregation_func)
def process(metric, datapoint): increment('datapointsReceived') for rule in RewriteRuleManager.preRules: metric = rule.apply(metric) for rule in RuleManager.rules: aggregate_metric = rule.get_aggregate_metric(metric) if aggregate_metric is None: continue buffer = BufferManager.get_buffer(aggregate_metric) if not buffer.configured: buffer.configure_aggregation(rule.frequency, rule.aggregation_func) buffer.input(datapoint) for rule in RewriteRuleManager.postRules: metric = rule.apply(metric) events.metricGenerated(metric, datapoint)
def test_clear_closes_buffers(self): metric_buffer_mock = BufferManager.get_buffer("carbon.foo") BufferManager.clear() metric_buffer_mock.close.assert_called_once_with()
def test_get_nonexistent_buffer_creates_new(self, metric_buffer_mock): BufferManager.get_buffer("carbon.foo") metric_buffer_mock.assert_called_once_with("carbon.foo")
def test_clear_closes_buffers(self): metric_buffer_mock = BufferManager.get_buffer("carbon.foo") BufferManager.clear() metric_buffer_mock.close.assert_called_once_with()
def test_get_nonexistent_buffer_creates_and_saves_it(self): new_buffer = BufferManager.get_buffer("carbon.foo") existing_buffer = BufferManager.get_buffer("carbon.foo") self.assertTrue(new_buffer is existing_buffer)
def test_buffer_receives_value(self): RuleManager.rules = [self.sample_aggregation_rule] list(self.processor.process('carbon.foo', (0, 0))) values_buffer = BufferManager.get_buffer('carbon.foo.sum') self.assertEqual([0], values_buffer.interval_buffers[0].values)
def test_get_nonexistent_buffer_creates_new(self, metric_buffer_mock): BufferManager.get_buffer("carbon.foo") metric_buffer_mock.assert_called_once_with("carbon.foo")
def test_buffer_receives_value(self): RuleManager.rules = [self.sample_aggregation_rule] list(self.processor.process('carbon.foo', (0, 0))) values_buffer = BufferManager.get_buffer('carbon.foo.sum') self.assertEqual([0], values_buffer.interval_buffers[0].values)
def test_get_nonexistent_buffer_creates_and_saves_it(self): new_buffer = BufferManager.get_buffer("carbon.foo") existing_buffer = BufferManager.get_buffer("carbon.foo") self.assertTrue(new_buffer is existing_buffer)