示例#1
0
    def read_rules(self):
        if not exists(self.rules_file):
            self.clear()
            return

        # Only read if the rules file has been modified
        try:
            mtime = getmtime(self.rules_file)
        except OSError:
            log.err("Failed to get mtime of %s" % self.rules_file)
            return
        if mtime <= self.rules_last_read:
            return

        # Read new rules
        log.aggregator("reading new aggregation rules from %s" %
                       self.rules_file)
        new_rules = []
        for line in open(self.rules_file):
            line = line.strip()
            if line.startswith('#') or not line:
                continue

            rule = self.parse_definition(line)
            new_rules.append(rule)

        log.aggregator("clearing aggregation buffers")
        BufferManager.clear()
        self.rules = new_rules
        self.rules_last_read = mtime
示例#2
0
文件: rules.py 项目: NixM0nk3y/carbon
  def read_rules(self):
    if not exists(self.rules_file):
      self.clear()
      return

    # Only read if the rules file has been modified
    try:
      mtime = getmtime(self.rules_file)
    except OSError:
      log.err("Failed to get mtime of %s" % self.rules_file)
      return
    if mtime <= self.rules_last_read:
      return

    # Read new rules
    log.aggregator("reading new aggregation rules from %s" % self.rules_file)
    new_rules = []
    for line in open(self.rules_file):
      line = line.strip()
      if line.startswith('#') or not line:
        continue

      rule = self.parse_definition(line)
      new_rules.append(rule)

    log.aggregator("clearing aggregation buffers")
    BufferManager.clear()
    self.rules = new_rules
    self.rules_last_read = mtime
示例#3
0
文件: receiver.py 项目: zhpn/carbon
def process(metric, datapoint):
  increment('datapointsReceived')

  for rule in RewriteRuleManager.preRules:
    metric = rule.apply(metric)

  aggregate_metrics = []

  if settings.AGGREGATOR_RULE_METHOD == "rules":
    for rule in RuleManager.rules:
      aggregate_metric = rule.get_aggregate_metric(metric)

      if aggregate_metric is None:
        continue
      else:
        aggregate_metrics.append(aggregate_metric)

      buffer = BufferManager.get_buffer(aggregate_metric)

      if not buffer.configured:
        buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

      buffer.input(datapoint)
  # Custom rule to sum metrics
  elif settings.AGGREGATOR_RULE_METHOD == "sumall":
    sum_index = metric.find(".sum.")
    metric_namespace = metric[:metric.find(".")]
    nsDict = settings["aggregation-sumall-rules"]
    if sum_index != -1:
      aggregate_metric = metric[:sum_index] + ".sum_all.hosts"
      aggregate_metrics.append(aggregate_metric)
      buffer = BufferManager.get_buffer(aggregate_metric)
      aggregate_time_interval = 60

      if metric_namespace in nsDict:
          aggregate_time_interval = int(nsDict[metric_namespace])

      if not buffer.configured:
        buffer.configure_aggregation(aggregate_time_interval, sum)

      buffer.input(datapoint)

  for rule in RewriteRuleManager.postRules:
    metric = rule.apply(metric)

  if settings['FORWARD_ALL'] and metric not in aggregate_metrics:
    #log.msg("Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
    events.metricGenerated(metric, datapoint)
示例#4
0
def process(metric, datapoint):
  increment('datapointsReceived')

  for rule in RewriteRuleManager.preRules:
    metric = rule.apply(metric)

  aggregate_metrics = []

  for rule in RuleManager.rules:
    aggregate_metric = rule.get_aggregate_metric(metric)

    if aggregate_metric is None:
      continue
    else:
      aggregate_metrics.append(aggregate_metric)

    buffer = BufferManager.get_buffer(aggregate_metric)

    if not buffer.configured:
      buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

    buffer.input(datapoint)

  for rule in RewriteRuleManager.postRules:
    metric = rule.apply(metric)

  if metric not in aggregate_metrics:
    events.metricGenerated(metric, datapoint)
示例#5
0
def _bench_aggregator(name):
    print("== %s ==" % name)
    max_intervals = settings['MAX_AGGREGATION_INTERVALS']
    now = time.time() - (max_intervals * FREQUENCY)

    buf = None
    for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
        processor = AggregationProcessor()
        processor.process(METRIC, (now, 1))

        def _process():
            processor.process(METRIC, (now + _process.i, 1))
            if (_process.i % FREQUENCY) == 0 and buf is not None:
                buf.compute_values()
            _process.i += 1

        _process.i = 0

        if buf is None:
            buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)

        t = timeit.timeit(_process, number=n)
        buf.close()
        print_stats(n, t)
    print("")
示例#6
0
def process(metric, datapoint):
  increment('datapointsReceived')

  for rule in RewriteRuleManager.preRules:
    metric = rule.apply(metric)

  aggregate_metrics = []

  for rule in RuleManager.rules:
    aggregate_metric = rule.get_aggregate_metric(metric)

    if aggregate_metric is None:
      continue
    else:
      aggregate_metrics.append(aggregate_metric)

    buffer = BufferManager.get_buffer(aggregate_metric)

    if not buffer.configured:
      buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

    buffer.input(datapoint)

  for rule in RewriteRuleManager.postRules:
    metric = rule.apply(metric)

  if metric not in aggregate_metrics:
    events.metricGenerated(metric, datapoint)

  if len(aggregate_metrics) == 0:
    log.msg("Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
示例#7
0
  def process(self, metric, datapoint):
    increment('datapointsReceived')

    aggregate_metrics = set()

    for rule in RuleManager.rules:
      aggregate_metric = rule.get_aggregate_metric(metric)

      if aggregate_metric is None:
        continue
      else:
        aggregate_metrics.add(aggregate_metric)

      values_buffer = BufferManager.get_buffer(aggregate_metric)

      if not values_buffer.configured:
        values_buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

      values_buffer.input(datapoint)

    if settings.FORWARD_ALL and metric not in aggregate_metrics:
      if settings.LOG_AGGREGATOR_MISSES and len(aggregate_metrics) == 0:
        log.msg(
          "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated." % metric)
      yield (metric, datapoint)
示例#8
0
  def process(self, metric, datapoint):
    increment('datapointsReceived')

    for rule in RewriteRuleManager.rules(PRE):
      metric = rule.apply(metric)

    aggregate_metrics = set()

    for rule in RuleManager.rules:
      aggregate_metric = rule.get_aggregate_metric(metric)

      if aggregate_metric is None:
        continue
      else:
        aggregate_metrics.add(aggregate_metric)

      values_buffer = BufferManager.get_buffer(aggregate_metric)

      if not values_buffer.configured:
        values_buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

      values_buffer.input(datapoint)

    for rule in RewriteRuleManager.rules(POST):
      metric = rule.apply(metric)

    if metric not in aggregate_metrics:
      yield (metric, datapoint)
示例#9
0
def process(metric, datapoint):
    increment('datapointsReceived')

    for rule in RewriteRuleManager.preRules:
        metric = rule.apply(metric)

    aggregate_metrics = []

    for rule in RuleManager.rules:
        aggregate_metric = rule.get_aggregate_metric(metric)

        if aggregate_metric is None:
            continue
        else:
            aggregate_metrics.append(aggregate_metric)

        buffer = BufferManager.get_buffer(aggregate_metric)

        if not buffer.configured:
            buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

        buffer.input(datapoint)

    for rule in RewriteRuleManager.postRules:
        metric = rule.apply(metric)

    if metric not in aggregate_metrics:
        events.metricGenerated(metric, datapoint)
示例#10
0
def process(metric, datapoint):
    increment('datapointsReceived')

    for rule in RewriteRuleManager.preRules:
        metric = rule.apply(metric)

    aggregate_metrics = []

    for rule in RuleManager.rules:
        aggregate_metric = rule.get_aggregate_metric(metric)

        if aggregate_metric is None:
            continue
        else:
            aggregate_metrics.append(aggregate_metric)

        buffer = BufferManager.get_buffer(aggregate_metric)

        if not buffer.configured:
            buffer.configure_aggregation(rule.frequency, rule.aggregation_func)

        buffer.input(datapoint)

    for rule in RewriteRuleManager.postRules:
        metric = rule.apply(metric)

    if metric not in aggregate_metrics:
        events.metricGenerated(metric, datapoint)

    if settings.LOG_AGGREGATOR_MISSES and len(aggregate_metrics) == 0:
        log.msg(
            "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated."
            % metric)
示例#11
0
    def process(self, metric, datapoint):
        increment('datapointsReceived')

        for rule in RewriteRuleManager.rules(PRE):
            metric = rule.apply(metric)

        aggregate_metrics = set()

        for rule in RuleManager.rules:
            aggregate_metric = rule.get_aggregate_metric(metric)

            if aggregate_metric is None:
                continue
            else:
                aggregate_metrics.add(aggregate_metric)

            values_buffer = BufferManager.get_buffer(aggregate_metric)

            if not values_buffer.configured:
                values_buffer.configure_aggregation(rule.frequency,
                                                    rule.aggregation_func)

            values_buffer.input(datapoint)

        for rule in RewriteRuleManager.rules(POST):
            metric = rule.apply(metric)

        if metric not in aggregate_metrics:
            yield (metric, datapoint)
示例#12
0
def _bench_aggregator(name):
    print "== %s ==" % name
    max_intervals = settings['MAX_AGGREGATION_INTERVALS']
    now = time.time() - (max_intervals * FREQUENCY)

    buf = None
    for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
        count = 0
        processor = AggregationProcessor()
        processor.process(METRIC, (now, 1))

        def _process():
            processor.process(METRIC, (now + _process.i, 1))
            if (_process.i % FREQUENCY) == 0 and buf is not None:
                buf.compute_values()
            _process.i += 1
        _process.i = 0

        if buf is None:
            buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)

        t = timeit.timeit(_process, number=n)
        buf.close()
        print_stats(n, t)
    print ""
示例#13
0
    def process(self, metric, datapoint):
        increment('datapointsReceived')

        for rule in RewriteRuleManager.rules(PRE):
            metric = rule.apply(metric)

        aggregate_metrics = set()

        for rule in RuleManager.rules:
            aggregate_metric = rule.get_aggregate_metric(metric)

            if aggregate_metric is None:
                continue
            else:
                aggregate_metrics.add(aggregate_metric)

            values_buffer = BufferManager.get_buffer(aggregate_metric)

            if not values_buffer.configured:
                values_buffer.configure_aggregation(rule.frequency,
                                                    rule.aggregation_func)

            values_buffer.input(datapoint)

        for rule in RewriteRuleManager.rules(POST):
            metric = rule.apply(metric)

        if metric not in aggregate_metrics:
            if settings.LOG_AGGREGATOR_MISSES and len(aggregate_metrics) == 0:
                log.msg(
                    "Couldn't match metric %s with any aggregation rule. Passing on un-aggregated."
                    % metric)
            yield (metric, datapoint)
示例#14
0
    def test_new_buffer_configured(self):
        RuleManager.rules = [self.sample_aggregation_rule]
        list(self.processor.process('carbon.foo', (0, 0)))
        values_buffer = BufferManager.get_buffer('carbon.foo.sum')

        self.assertTrue(values_buffer.configured)
        self.assertEqual(1, values_buffer.aggregation_frequency)
        self.assertEqual(sum, values_buffer.aggregation_func)
  def test_new_buffer_configured(self):
    RuleManager.rules = [self.sample_aggregation_rule]
    list(self.processor.process('carbon.foo', (0, 0)))
    values_buffer = BufferManager.get_buffer('carbon.foo.sum')

    self.assertTrue(values_buffer.configured)
    self.assertEqual(1, values_buffer.aggregation_frequency)
    self.assertEqual(sum, values_buffer.aggregation_func)
示例#16
0
 def tearDown(self):
     instrumentation.stats.clear()
     BufferManager.clear()
     RuleManager.clear()
 def test_get_nonexistent_buffer_creates_and_saves_it(self):
   new_buffer = BufferManager.get_buffer("carbon.foo")
   existing_buffer = BufferManager.get_buffer("carbon.foo")
   self.assertTrue(new_buffer is existing_buffer)
 def test_clear_closes_buffers(self):
   metric_buffer_mock = BufferManager.get_buffer("carbon.foo")
   BufferManager.clear()
   metric_buffer_mock.close.assert_called_once_with()
  def test_buffer_receives_value(self):
    RuleManager.rules = [self.sample_aggregation_rule]
    list(self.processor.process('carbon.foo', (0, 0)))
    values_buffer = BufferManager.get_buffer('carbon.foo.sum')

    self.assertEqual([0], values_buffer.interval_buffers[0].values)
示例#20
0
 def test_get_nonexistent_buffer_creates_new(self, metric_buffer_mock):
     BufferManager.get_buffer("carbon.foo")
     metric_buffer_mock.assert_called_once_with("carbon.foo")
 def tearDown(self):
   instrumentation.stats.clear()
   BufferManager.clear()
   RuleManager.clear()
 def test_get_nonexistent_buffer_creates_new(self, metric_buffer_mock):
   BufferManager.get_buffer("carbon.foo")
   metric_buffer_mock.assert_called_once_with("carbon.foo")
示例#23
0
 def test_clear_closes_buffers(self):
     metric_buffer_mock = BufferManager.get_buffer("carbon.foo")
     BufferManager.clear()
     metric_buffer_mock.close.assert_called_once_with()
示例#24
0
 def test_get_nonexistent_buffer_creates_and_saves_it(self):
     new_buffer = BufferManager.get_buffer("carbon.foo")
     existing_buffer = BufferManager.get_buffer("carbon.foo")
     self.assertTrue(new_buffer is existing_buffer)
示例#25
0
    def test_buffer_receives_value(self):
        RuleManager.rules = [self.sample_aggregation_rule]
        list(self.processor.process('carbon.foo', (0, 0)))
        values_buffer = BufferManager.get_buffer('carbon.foo.sum')

        self.assertEqual([0], values_buffer.interval_buffers[0].values)
 def tearDown(self):
   BufferManager.clear()
示例#27
0
 def tearDown(self):
     BufferManager.clear()