Exemple #1
0
def optimalWriteOrder():
  "Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
  global lastCreateInterval
  global createCount
  metrics = [ (metric, len(datapoints)) for metric,datapoints in MetricCache.items() ]

  t = time.time()
  metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
  log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))

  for metric, queueSize in metrics:
    dbFilePath = getFilesystemPath(metric)
    dbFileExists = exists(dbFilePath)

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        continue

    try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue # we simply move on to the next metric when this race condition occurs

    yield (metric, datapoints, dbFilePath, dbFileExists)
Exemple #2
0
def _flush(prefix=None):
    """ Write/create whisped files at maximal speed """
    assert(prefix==None or hasattr(prefix, 'startswith'))
    log.msg("flush started (prefix: %s)" % prefix)
    started = time.time()
    metrics = MetricCache.counts()
    updates = 0
    write_lock.acquire()
    try:
        for metric, queueSize in metrics:
            if prefix and not metric.startswith(prefix):
                continue
            dbFilePath = getFilesystemPath(metric)
            dbFileExists = exists(dbFilePath)
            try:
                datapoints = MetricCache.pop(metric)
            except KeyError:
                continue
            if not createWhisperFile(metric, dbFilePath, dbFileExists):
                continue
            if not writeWhisperFile(dbFilePath, datapoints):
                continue
    	    updates += 1
    finally:
        write_lock.release()
    log.msg('flush finished (updates: %d, time: %.5f sec)' % (updates, time.time()-started))
    return updates
def optimalWriteOrder():
  "Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
  global lastCreateInterval
  global createCount
  metrics = MetricCache.counts()

  t = time.time()
  #metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
  log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))

  if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
    events.cacheSpaceAvailable()

  for metric, queueSize in metrics:
    #Handling special characters in metric names
    metric_sanit = list(metric)
    for i in range(0,len(metric_sanit),1):
        if metric_sanit[i] < '\x20' or metric_sanit[i] > '\x7e':
            metric_sanit[i] = '_'
    metric_sanit = "".join(metric_sanit)

    try:
      dbFilePath = getFilesystemPath(metric_sanit)
      dbFileExists = exists(dbFilePath)
    except:
      log.err()
      log.msg("dbFilePath: %s" % (dbFilePath))
      continue

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        # dropping queued up datapoints for new metrics prevents filling up the entire cache
        # when a bunch of new metrics are received.
        try:
          MetricCache.pop(metric)
        except KeyError:
          pass

        continue

    try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue # we simply move on to the next metric when this race condition occurs

    yield (metric_sanit, datapoints, dbFilePath, dbFileExists)
Exemple #4
0
def setMetadata(metric, key, value):
  if key != 'aggregationMethod':
    return dict(error="Unsupported metadata key \"%s\"" % key)

  wsp_path = getFilesystemPath(metric)
  try:
    old_value = whisper.setAggregationMethod(wsp_path, value)
    return dict(old_value=old_value, new_value=value)
  except Exception:
    log.err()
    return dict(error=traceback.format_exc())
Exemple #5
0
def getMetadata(metric, key):
  if key != 'aggregationMethod':
    return dict(error="Unsupported metadata key \"%s\"" % key)

  wsp_path = getFilesystemPath(metric)
  try:
    value = whisper.info(wsp_path)['aggregationMethod']
    return dict(value=value)
  except Exception:
    log.err()
    return dict(error=traceback.format_exc())
Exemple #6
0
def setMetadata(metric, key, value):
    if key != 'aggregationMethod':
        return dict(error="Unsupported metadata key \"%s\"" % key)

    wsp_path = getFilesystemPath(metric)
    try:
        old_value = whisper.setAggregationMethod(wsp_path, value)
        return dict(old_value=old_value, new_value=value)
    except Exception:
        log.err()
        return dict(error=traceback.format_exc())
Exemple #7
0
def getMetadata(metric, key):
    if key != 'aggregationMethod':
        return dict(error="Unsupported metadata key \"%s\"" % key)

    wsp_path = getFilesystemPath(metric)
    try:
        value = whisper.info(wsp_path)['aggregationMethod']
        return dict(value=value)
    except Exception:
        log.err()
        return dict(error=traceback.format_exc())
Exemple #8
0
def optimalWriteOrder():
    """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
    global lastCreateInterval
    global createCount
    metrics = MetricCache.counts()

    t = time.time()
    metrics.sort(key=lambda item: item[1],
                 reverse=True)  # by queue size, descending
    log.debug("Sorted %d cache queues in %.6f seconds" %
              (len(metrics), time.time() - t))

    for metric, queueSize in metrics:
        if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
            events.cacheSpaceAvailable()

        dbFilePath = getFilesystemPath(metric)
        dbFileExists = exists(dbFilePath)

        if not dbFileExists:
            createCount += 1
            now = time.time()

            if now - lastCreateInterval >= 60:
                lastCreateInterval = now
                createCount = 1

            elif createCount >= settings.MAX_CREATES_PER_MINUTE:
                # dropping queued up datapoints for new metrics prevents filling up the entire cache
                # when a bunch of new metrics are received.
                try:
                    MetricCache.pop(metric)
                except KeyError:
                    pass

                continue

        try:  # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
            datapoints = MetricCache.pop(metric)
        except KeyError:
            log.msg("MetricCache contention, skipping %s update for now" %
                    metric)
            continue  # we simply move on to the next metric when this race condition occurs

        yield (metric, datapoints, dbFilePath, dbFileExists)
Exemple #9
0
def optimalWriteOrder():
  """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
  while MetricCache:
    (metric, datapoints) = MetricCache.drain_metric()
    dbFilePath = getFilesystemPath(metric)
    dbFileExists = state.database.exists(metric)

    if not dbFileExists and CREATE_BUCKET:
      # If our tokenbucket has enough tokens available to create a new metric
      # file then yield the metric data to complete that operation. Otherwise
      # we'll just drop the metric on the ground and move on to the next
      # metric.
      # XXX This behavior should probably be configurable to no tdrop metrics
      # when rate limitng unless our cache is too big or some other legit
      # reason.
      if CREATE_BUCKET.drain(1):
        yield (metric, datapoints, dbFilePath, dbFileExists)
      continue

    yield (metric, datapoints, dbFilePath, dbFileExists)
Exemple #10
0
def optimalWriteOrder():
    """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
    while MetricCache:
        (metric, datapoints) = MetricCache.drain_metric()
        dbFilePath = getFilesystemPath(metric)
        dbFileExists = state.database.exists(metric)

        if not dbFileExists and CREATE_BUCKET:
            # If our tokenbucket has enough tokens available to create a new metric
            # file then yield the metric data to complete that operation. Otherwise
            # we'll just drop the metric on the ground and move on to the next
            # metric.
            # XXX This behavior should probably be configurable to no tdrop metrics
            # when rate limitng unless our cache is too big or some other legit
            # reason.
            if CREATE_BUCKET.drain(1):
                yield (metric, datapoints, dbFilePath, dbFileExists)
            continue

        yield (metric, datapoints, dbFilePath, dbFileExists)
Exemple #11
0
 def fetch(cls, path, start, end):
   wsp_path = getFilesystemPath(path)
   return whisper.fetch(wsp_path, start, end)
Exemple #12
0
 def test_getFilesystemPath(self):
     from carbon.storage import getFilesystemPath
     result = getFilesystemPath('stats.example.counts')
     self.assertEquals(result, '/tmp/stats/example/counts.wsp')
Exemple #13
0
 def create(cls, path, archiveList, xFilesFactor=None, aggregationMethod=None):
   wsp_path = getFilesystemPath(path)
   return whisper.create(wsp_path, archiveList, xFilesFactor, aggregationMethod)
Exemple #14
0
 def test_getFilesystemPath(self):
     from carbon.storage import getFilesystemPath
     result = getFilesystemPath('stats.example.counts')
     self.assertEquals(result, '/tmp/stats/example/counts.wsp')
Exemple #15
0
 def delete(cls, path):
   wsp_path = getFilesystemPath(path)
   return os.removedirs(wsp_path)
Exemple #16
0
 def info(cls, path):
   wsp_path = getFilesystemPath(path)
   return whisper.info(wsp_path)
 def get_dbinfo(self, metric):
   dbFilePath = getFilesystemPath(metric)
   return (dbFilePath, exists(dbFilePath))