Example #1
0
def fetchDataLocal(requestContext, pathExpr):
  seriesList = []
  startTime = requestContext['startTime']
  endTime = requestContext['endTime']

  if requestContext['localOnly']:
    store = LOCAL_STORE
  else:
    store = STORE

  for dbFile in store.find(pathExpr):
    log.metric_access(dbFile.metric_path)
    dbResults = dbFile.fetch( timestamp(startTime), timestamp(endTime) )
    try:
      cachedResults = CarbonLink.query(dbFile.real_metric)
      results = mergeResults(dbResults, cachedResults)
    except:
      log.exception()
      results = dbResults

    if not results:
      continue

    (timeInfo,values) = results
    (start,end,step) = timeInfo
    series = TimeSeries(dbFile.metric_path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)

  return seriesList
Example #2
0
def fetchData(requestContext, pathExpr):
    if pathExpr.lower().startswith('graphite.'):
        pathExpr = pathExpr[9:]

    seriesList = []
    startTime = requestContext['startTime']
    endTime = requestContext['endTime']

    if requestContext['localOnly']:
        store = LOCAL_STORE
    else:
        store = STORE

    for dbFile in store.find(pathExpr):
        log.metric_access(dbFile.metric_path)
        getCacheResults = CarbonLink.sendRequest(dbFile.real_metric)
        dbResults = dbFile.fetch(timestamp(startTime), timestamp(endTime))
        results = mergeResults(dbResults, getCacheResults())

        if not results:
            continue

        (timeInfo, values) = results
        (start, end, step) = timeInfo
        series = TimeSeries(dbFile.metric_path, start, end, step, values)
        series.pathExpression = pathExpr  #hack to pass expressions through to render functions
        seriesList.append(series)

    return seriesList
Example #3
0
def fetchData(requestContext, pathExpr):
    seriesList = []
    startTime = requestContext['startTime']
    endTime = requestContext['endTime']

    if requestContext['localOnly']:
        store = LOCAL_STORE
    else:
        store = STORE

    for dbFile in store.find(pathExpr):
        log.metric_access(dbFile.metric_path)
        dbResults = dbFile.fetch(timestamp(startTime), timestamp(endTime))
        try:
            cachedResults = CarbonLink.query(dbFile.real_metric)
            results = mergeResults(dbResults, cachedResults)
        except:
            log.exception()
            results = dbResults

        if not results:
            continue

        (timeInfo, values) = results
        (start, end, step) = timeInfo
        series = TimeSeries(dbFile.metric_path, start, end, step, values)
        series.pathExpression = pathExpr  #hack to pass expressions through to render functions
        seriesList.append(series)

    return seriesList
def evaluateTokens(tokens, timeInterval, originalTime = None):
  if tokens.expression:
    if tokens[0][0][0] == "timeShift":
      delta = timeInterval[0] - parseATTime(tokens[0][0][1][1]['string'].strip('"'))
      delta += timeInterval[1] - timeInterval[0]
      originalTime = timeInterval
      timeInterval = (timeInterval[0] - delta, timeInterval[1] - delta)
    return evaluateTokens(tokens.expression, timeInterval, originalTime)

  elif tokens.pathExpression:
    pathExpr = tokens.pathExpression
    if pathExpr.lower().startswith('graphite.'):
      pathExpr = pathExpr[9:]

    seriesList = []
    (startTime,endTime) = originalTime or timeInterval

    for dbFile in settings.STORE.find(pathExpr):
      log.metric_access(dbFile.metric_path)
      getCacheResults = CarbonLink.sendRequest(dbFile.real_metric)
      dbResults = dbFile.fetch( timestamp(startTime), timestamp(endTime) )
      results = mergeResults(dbResults, getCacheResults())

      if not results:
        continue

      (timeInfo,values) = results
      (start,end,step) = timeInfo
      series = TimeSeries(dbFile.metric_path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      series.start = time.mktime(startTime.timetuple())
      series.end = time.mktime(endTime.timetuple())
      seriesList.append(series)

    return seriesList

  elif tokens.call:
    func = SeriesFunctions[tokens.call.func]
    args = [evaluateTokens(arg, timeInterval, originalTime) for arg in tokens.call.args]
    return func(*args)

  elif tokens.number:
    if tokens.number.integer:
      return int(tokens.number.integer)

    elif tokens.number.float:
      return float(tokens.number.float)

  elif tokens.string:
    return str(tokens.string)[1:-1]
Example #5
0
def evaluateTokens(tokens, timeInterval):
  if tokens.expression:
    return evaluateTokens(tokens.expression, timeInterval)

  elif tokens.pathExpression:
    pathExpr = tokens.pathExpression

    if pathExpr.lower().startswith('graphite.'):
      pathExpr = pathExpr[9:]

    seriesList = []
    (startTime,endTime) = timeInterval

    for dbFile in settings.STORE.find(pathExpr):
      log.metric_access(dbFile.metric_path)
      getCacheResults = CarbonLink.sendRequest(dbFile.real_metric)
      dbResults = dbFile.fetch( timestamp(startTime), timestamp(endTime) )
      results = mergeResults(dbResults, getCacheResults())

      if not results:
        continue

      (timeInfo,values) = results
      (start,end,step) = timeInfo
      series = TimeSeries(dbFile.metric_path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      seriesList.append(series)

    return seriesList

  elif tokens.call:
    func = SeriesFunctions[tokens.call.func]
    args = [evaluateTokens(arg, timeInterval) for arg in tokens.call.args]
    try:
      return func(*args)
    except:
      return []

  elif tokens.number:
    if tokens.number.integer:
      return int(tokens.number.integer)

    elif tokens.number.float:
      return float(tokens.number.float)

  elif tokens.string:
    return str(tokens.string)[1:-1]
Example #6
0
def fetchData(requestContext, pathExpr):
  seriesList = []
  startTime = requestContext['startTime']
  endTime = requestContext['endTime']

  if requestContext['localOnly']:
    store = LOCAL_STORE
  else:
    store = STORE

  for dbFile in store.find(pathExpr):
    log.metric_access(dbFile.metric_path)
    dbResults = dbFile.fetch( timestamp(startTime), timestamp(endTime) )
    try:
      # kenshin format
      (timeInfo,values,min_step) = dbResults
      dbResults = (timeInfo,values)
    except:
      # whisper format
      min_step = None

    STEP_IDX = 2
    if (not min_step) or (timeInfo[STEP_IDX] == min_step):
      # 我们只在 min_step 为空, 或者获取最高精度的数据时,获取缓存中的数据.
      try:
        cachedResults = CarbonLink.query(dbFile.real_metric)
        results = mergeResults(dbResults, cachedResults)
      except:
        log.exception()
        results = dbResults
    else:
      results = dbResults

    if not results:
      continue

    (timeInfo,values) = results
    (start,end,step) = timeInfo
    series = TimeSeries(dbFile.metric_path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)

  return seriesList
Example #7
0
 def test_metric_log(self):
     """ Test writing to a not configured logger. """
     message = 'Test Info Message'
     log.metric_access(message)
     file_name = os.path.join(settings.LOG_DIR, 'metricaccess.log')
     self.assertFalse(os.path.exists(file_name))
 def test_metric_log(self):
     """ Test writing to a not configured logger. """
     message = 'Test Info Message'
     log.metric_access(message)
     file_name = os.path.join(settings.LOG_DIR, 'metricaccess.log')
     self.assertFalse(os.path.exists(file_name))
Example #9
0
def fetchData(requestContext, pathExpr):
  seriesList = {}
  (startTime, endTime, now) = _timebounds(requestContext)

  dbFiles = [dbFile for dbFile in LOCAL_STORE.find(pathExpr)]

  if settings.CARBONLINK_QUERY_BULK:
    cacheResultsByMetric = CarbonLink.query_bulk([dbFile.real_metric for dbFile in dbFiles])

  for dbFile in dbFiles:
    log.metric_access(dbFile.metric_path)
    dbResults = dbFile.fetch(startTime, endTime, now)

    if dbFile.isLocal():
      try:
        if settings.CARBONLINK_QUERY_BULK:
          cachedResults = cacheResultsByMetric.get(dbFile.real_metric,[])
        else:
          cachedResults = CarbonLink.query(dbFile.real_metric)
        if cachedResults:
          meta_info = dbFile.getInfo()
          lowest_step = min([i['secondsPerPoint'] for i in meta_info['archives']])
          dbResults = mergeResults(dbResults, cachedResults, lowest_step)
      except:
        log.exception("Failed CarbonLink query '%s'" % dbFile.real_metric)

    if not dbResults:
      continue

    (timeInfo,values) = dbResults
    (start,end,step) = timeInfo
    series = TimeSeries(dbFile.metric_path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList[series.name] = series

  if not requestContext['localOnly']:
    result_queue = fetchRemoteData(requestContext, pathExpr)

    # Used as a cache to avoid recounting series None values below.
    series_best_nones = {}

    # Once we've waited for the threads to return, process the results. We could theoretically
    # start processing results right away, but that's a relatively minor optimization compared
    # to not waiting for remote hosts sequentially.
    while not result_queue.empty():
      try:
        (node, results) = result_queue.get(False)
      except:
        log.exception("result_queue not empty, but unable to retrieve results")

      for series in results:
        ts = TimeSeries(series['name'], series['start'], series['end'], series['step'], series['values'])
        ts.pathExpression = pathExpr # hack as above

        if ts.name in seriesList:
          # This counts the Nones in each series, and is unfortunately O(n) for each
          # series, which may be worth further optimization. The value of doing this
          # at all is to avoid the "flipping" effect of loading a graph multiple times
          # and having inconsistent data returned if one of the backing stores has
          # inconsistent data. This is imperfect as a validity test, but in practice
          # nicely keeps us using the "most complete" dataset available. Think of it
          # as a very weak CRDT resolver.
          candidate_nones = 0
          if not settings.REMOTE_STORE_MERGE_RESULTS:
            candidate_nones = len([val for val in series['values'] if val is None])

          known = seriesList[ts.name]
          # To avoid repeatedly recounting the 'Nones' in series we've already seen,
          # cache the best known count so far in a dict.
          if known.name in series_best_nones:
            known_nones = series_best_nones[known.name]
          else:
            known_nones = len([val for val in known if val is None])

          if known_nones > candidate_nones:
            if settings.REMOTE_STORE_MERGE_RESULTS:
              # This series has potential data that might be missing from
              # earlier series.  Attempt to merge in useful data and update
              # the cache count.
              log.info("Merging multiple TimeSeries for %s" % known.name)
              for i, j in enumerate(known):
                if j is None and ts[i] is not None:
                  known[i] = ts[i]
                  known_nones -= 1
              # Store known_nones in our cache
              series_best_nones[known.name] = known_nones
            else:
              # Not merging data -
              # we've found a series better than what we've already seen. Update
              # the count cache and replace the given series in the array.
              series_best_nones[known.name] = candidate_nones
              seriesList[known.name] = ts
          else:
              # In case if we are merging data - the existing series has no gaps and there is nothing to merge
              # together.
              #
              # OR - if we picking best serie:
              #
              # We already have this series in the seriesList, and the
              # candidate is 'worse' than what we already have, we don't need
              # to compare anything else.
              continue

        # If we looked at this series above, and it matched a 'known'
        # series already, then it's already in the series list (or ignored).
        # If not, append it here.
        else:
          seriesList[ts.name] = ts

  # Stabilize the order of the results by ordering the resulting series by name.
  # This returns the result ordering to the behavior observed pre PR#1010.
  return [ seriesList[k] for k in sorted(seriesList) ]