Пример #1
0
def context_view(request):
  if request.method == 'GET':
    contexts = []

    if not 'metric' not in request.GET:
      return HttpResponse('{ "error" : "missing required parameter \"metric\"" }', mimetype='text/json')

    for metric in request.GET.getlist('metric'):
      try:
        context = STORE.get(metric).context
      except:
        contexts.append({ 'metric' : metric, 'error' : 'failed to retrieve context', 'traceback' : traceback.format_exc() })
      else:
        contexts.append({ 'metric' : metric, 'context' : context })

    content = json.dumps( { 'contexts' : contexts } )
    return HttpResponse(content, mimetype='text/json')

  elif request.method == 'POST':

    if 'metric' not in request.POST:
      return HttpResponse('{ "error" : "missing required parameter \"metric\"" }', mimetype='text/json')

    newContext = dict( item for item in request.POST.items() if item[0] != 'metric' )

    for metric in request.POST.getlist('metric'):
      STORE.get(metric).updateContext(newContext)

    return HttpResponse('{ "success" : true }', mimetype='text/json')

  else:
    return HttpResponseBadRequest("invalid method, must be GET or POST")
Пример #2
0
def context_view(request):
  if request.method == 'GET':
    contexts = []

    if not 'metric' not in request.GET:
      return HttpResponse('{ "error" : "missing required parameter \"metric\"" }', content_type='application/json')

    for metric in request.GET.getlist('metric'):
      try:
        context = STORE.get(metric).context
      except:
        contexts.append({ 'metric' : metric, 'error' : 'failed to retrieve context', 'traceback' : traceback.format_exc() })
      else:
        contexts.append({ 'metric' : metric, 'context' : context })

    return json_response_for(request, { 'contexts' : contexts })

  elif request.method == 'POST':

    if 'metric' not in request.POST:
      return HttpResponse('{ "error" : "missing required parameter \"metric\"" }', content_type='application/json')

    newContext = dict( item for item in request.POST.items() if item[0] != 'metric' )

    for metric in request.POST.getlist('metric'):
      STORE.get(metric).updateContext(newContext)

    return HttpResponse('{ "success" : true }', content_type='application/json')

  else:
    return HttpResponseBadRequest("invalid method, must be GET or POST")
Пример #3
0
  def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
    fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
      if isinstance(results, FetchInProgress):
        results = results.waitForResults()

      if not results:
        log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
        continue

      try:
          (timeInfo, values) = results
      except ValueError as e:
          raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
      (start, end, step) = timeInfo

      series = TimeSeries(node.path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      seriesList.append(series)

    # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
    names = set([ s.name for s in seriesList ])
    for name in names:
      series_with_duplicate_names = [ s for s in seriesList if s.name == name ]
      empty_duplicates = [ s for s in series_with_duplicate_names if not nonempty(s) ]

      if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
        empty_duplicates.pop() # make sure we leave one in seriesList

      for series in empty_duplicates:
        seriesList.remove(series)

    return seriesList
Пример #4
0
def expand_view(request):
    "View for expanding a pattern into matching metric paths"
    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    local_only = int(queryParams.get('local', 0))
    group_by_expr = int(queryParams.get('groupByExpr', 0))
    leaves_only = int(queryParams.get('leavesOnly', 0))
    jsonp = queryParams.get('jsonp', False)

    results = {}
    for query in queryParams.getlist('query'):
        results[query] = set()
        for node in STORE.find(query, local=local_only):
            if node.is_leaf or not leaves_only:
                results[query].add(node.path)

    # Convert our results to sorted lists because sets aren't json-friendly
    if group_by_expr:
        for query, matches in results.items():
            results[query] = sorted(matches)
    else:
        results = sorted(reduce(set.union, results.values(), set()))

    result = {'results': results}

    response = json_response_for(request, result, jsonp=jsonp)
    response['Pragma'] = 'no-cache'
    response['Cache-Control'] = 'no-cache'
    return response
Пример #5
0
def expand_view(request):
    "View for expanding a pattern into matching metric paths"
    local_only = int(request.REQUEST.get('local', 0))
    group_by_expr = int(request.REQUEST.get('groupByExpr', 0))
    leaves_only = int(request.REQUEST.get('leavesOnly', 0))

    results = {}
    for query in request.REQUEST.getlist('query'):
        results[query] = set()
        for node in STORE.find(query, local=local_only):
            if node.is_leaf or not leaves_only:
                results[query].add(node.metric_path)

    # Convert our results to sorted lists because sets aren't json-friendly
    if group_by_expr:
        for query, matches in results.items():
            results[query] = sorted(matches)
    else:
        results = sorted(reduce(set.union, results.values(), set()))

    result = {'results': results}

    response = HttpResponse(json.dumps(result), mimetype='application/json')
    response['Pragma'] = 'no-cache'
    response['Cache-Control'] = 'no-cache'
    return response
Пример #6
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
Пример #7
0
def expand_view(request):
    "View for expanding a pattern into matching metric paths"
    local_only = int(request.REQUEST.get("local", 0))
    group_by_expr = int(request.REQUEST.get("groupByExpr", 0))
    leaves_only = int(request.REQUEST.get("leavesOnly", 0))

    results = {}
    for query in request.REQUEST.getlist("query"):
        results[query] = set()
        for node in STORE.find(query, tenant, local=local_only):
            if node.is_leaf or not leaves_only:
                results[query].add(node.path)

    # Convert our results to sorted lists because sets aren't json-friendly
    if group_by_expr:
        for query, matches in results.items():
            results[query] = sorted(matches)
    else:
        results = sorted(reduce(set.union, results.values(), set()))

    result = {"results": results}

    response = json_response_for(request, result)
    response["Pragma"] = "no-cache"
    response["Cache-Control"] = "no-cache"
    return response
Пример #8
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
Пример #9
0
    def _fetchData(pathExpr, startTime, endTime, requestContext, seriesList):
        matching_nodes = STORE.find(pathExpr,
                                    startTime,
                                    endTime,
                                    local=requestContext['localOnly'])
        fetches = [(node, node.fetch(startTime, endTime))
                   for node in matching_nodes if node.is_leaf]

        for node, results in fetches:
            if isinstance(results, FetchInProgress):
                results = results.waitForResults()

            if not results:
                log.info(
                    "render.datalib.fetchData :: no results for %s.fetch(%s, %s)"
                    % (node, startTime, endTime))
                continue

            try:
                (timeInfo, values) = results
            except ValueError, e:
                e = sys.exc_info()[1]
                raise Exception(
                    "could not parse timeInfo/values from metric '%s': %s" %
                    (node.path, e))
            (start, end, step) = timeInfo

            series = TimeSeries(node.path, start, end, step, values)
            series.pathExpression = pathExpr  #hack to pass expressions through to render functions
            seriesList.append(series)
Пример #10
0
def fetchData(requestContext, pathExpr):

  seriesList = []
  startTime = int( time.mktime( requestContext['startTime'].timetuple() ) )
  endTime   = int( time.mktime( requestContext['endTime'].timetuple() ) )

  matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
  fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

  for node, results in fetches:
    if isinstance(results, FetchInProgress):
      results = results.waitForResults()

    if not results:
      log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
      continue

    try:
        (timeInfo, values) = results
    except ValueError, e:
        raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
    (start, end, step) = timeInfo

    series = TimeSeries(node.path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)
Пример #11
0
def _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList):
  result_queue = []
  remote_done = False

  if settings.REMOTE_PREFETCH_DATA:
    prefetched = requestContext['prefetched'].get((startTime, endTime, now), None)
    if prefetched is not None:
      for result in prefetched[pathExpr]:
        result_queue.append(result)
      # Since we pre-fetched remote data only, now we can get local data only.
      remote_done = True

  local = remote_done or requestContext['localOnly']
  matching_nodes = STORE.find(
    pathExpr, startTime, endTime,
    local=local,
    headers=requestContext['forwardHeaders'],
    leaves_only=True,
  )

  for node in matching_nodes:
    result_queue.append(
      (node.path, node.fetch(startTime, endTime, now, requestContext)))

  return _merge_results(pathExpr, startTime, endTime, result_queue, seriesList)
Пример #12
0
def _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList):
  result_queue = []
  remote_done = False

  if settings.REMOTE_PREFETCH_DATA:
    prefetched = requestContext['prefetched'].get((startTime, endTime, now), None)
    if prefetched is not None:
      for result in prefetched[pathExpr]:
        result_queue.append(result)
      # Since we pre-fetched remote data only, now we can get local data only.
      remote_done = True

  local = remote_done or requestContext['localOnly']
  matching_nodes = STORE.find(
    pathExpr, startTime, endTime,
    local=local,
    headers=requestContext['forwardHeaders'],
    leaves_only=True,
  )

  for node in matching_nodes:
    result_queue.append(
      (node.path, node.fetch(startTime, endTime, now, requestContext)))

  return _merge_results(pathExpr, startTime, endTime, result_queue, seriesList)
Пример #13
0
def expand_view(request):
  "View for expanding a pattern into matching metric paths"
  local_only    = int( request.REQUEST.get('local', 0) )
  group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
  leaves_only   = int( request.REQUEST.get('leavesOnly', 0) )
  jsonp = request.REQUEST.get('jsonp', False)

  results = {}
  for query in request.REQUEST.getlist('query'):
    results[query] = set()
    for node in STORE.find(query, local=local_only):
      if node.is_leaf or not leaves_only:
        results[query].add( node.path )

  # Convert our results to sorted lists because sets aren't json-friendly
  if group_by_expr:
    for query, matches in results.items():
      results[query] = sorted(matches)
  else:
    results = sorted( reduce(set.union, results.values(), set()) )

  result = {
    'results' : results
  }

  response = json_response_for(request, result, jsonp=jsonp)
  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
Пример #14
0
    def _fetchData(pathExpr, startTime, endTime, requestContext, seriesList):
        matching_nodes = STORE.find(pathExpr,
                                    startTime,
                                    endTime,
                                    local=requestContext['localOnly'],
                                    reqkey=requestContext['request_key'])
        matching_nodes = list(matching_nodes)
        if len(matching_nodes) > 1:
            request_hash = md5("%s_%s_%s" %
                               (pathExpr, startTime, endTime)).hexdigest()
            cached_result = cache.get(request_hash)
            if cached_result:
                log.info(
                    "DEBUG:fetchData: got result from cache for %s_%s_%s" %
                    (pathExpr, startTime, endTime))
                fetches = cached_result
            else:
                log.info("DEBUG:fetchData: no cache for %s_%s_%s" %
                         (pathExpr, startTime, endTime))
                fetches = MultiReader(
                    matching_nodes,
                    reqkey=requestContext['request_key']).fetch(
                        startTime, endTime)
                try:
                    cache.add(request_hash, fetches)
                except Exception as err:
                    log.exception("Failed save data in memcached: %s" %
                                  str(err))
        elif len(matching_nodes) == 1:
            fetches = [(matching_nodes[0],
                        matching_nodes[0].fetch(startTime, endTime))]
        else:
            fetches = []

        for node, results in fetches:
            if isinstance(results, FetchInProgress):
                results = results.waitForResults()

            if not results:
                log.info(
                    "render.datalib.fetchData :: no results for %s.fetch(%s, %s)"
                    % (node, startTime, endTime))
                continue

            try:
                (timeInfo, values) = results
            except ValueError, e:
                e = sys.exc_info()[1]
                raise Exception(
                    "could not parse timeInfo/values from metric '%s': %s" %
                    (node.path, e))
            (start, end, step) = timeInfo

            series = TimeSeries(node.path, start, end, step, values)
            series.pathExpression = pathExpr  #hack to pass expressions through to render functions
            seriesList.append(series)
Пример #15
0
def find_metric(request):
    """Autocomplete helper on metric names."""
    try:
        query = str(request.REQUEST["q"])
    except:
        return HttpResponseBadRequest(content="Missing required parameter 'q'", content_type="text/plain")

    matches = list(STORE.find(query + "*"))
    content = "\n".join([node.path for node in matches])
    response = HttpResponse(content, content_type="text/plain")

    return response
Пример #16
0
def find_metric(request):
    """Autocomplete helper on metric names."""
    try:
        query = str( request.REQUEST['q'] )
    except:
        return HttpResponseBadRequest(
            content="Missing required parameter 'q'", mimetype="text/plain")

    matches = list( STORE.find(query+"*") )
    content = "\n".join([node.metric_path for node in matches ])
    response = HttpResponse(content, mimetype='text/plain')

    return response
Пример #17
0
def prefetchRemoteData(requestContext, targets):
    """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
    log.rendering("Prefetching remote data")
    pathExpressions = extractPathExpressions(targets)
    results = STORE.fetch_remote(pathExpressions, requestContext)

    # TODO: instead of doing that it would be wait better to use
    # the shared cache to cache pathExpr instead of full queries.
    requestContext['prefetched'] = PrefetchedData(results)
Пример #18
0
def prefetchData(requestContext, pathExpressions):
    """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
    if not pathExpressions:
        return

    start = time.time()
    log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

    (startTime, endTime, now) = timebounds(requestContext)

    prefetched = collections.defaultdict(list)

    for result in STORE.fetch(pathExpressions, startTime, endTime, now,
                              requestContext):
        if result is None:
            continue

        prefetched[result['pathExpression']].append((
            result['name'],
            (
                result['time_info'],
                result['values'],
            ),
        ))

    # Several third-party readers including rrdtool and biggraphite return values in a
    # generator which can only be iterated on once. These must be converted to a list.
    for pathExpression, items in prefetched.items():
        for i, (name, (time_info, values)) in enumerate(items):
            if isinstance(values, types.GeneratorType):
                prefetched[pathExpression][i] = (name, (time_info,
                                                        list(values)))

    if not requestContext.get('prefetched'):
        requestContext['prefetched'] = {}

    if (startTime, endTime, now) in requestContext['prefetched']:
        requestContext['prefetched'][(startTime, endTime,
                                      now)].update(prefetched)
    else:
        requestContext['prefetched'][(startTime, endTime, now)] = prefetched

    log.rendering("Fetched data for [%s] in %fs" %
                  (', '.join(pathExpressions), time.time() - start))
Пример #19
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  pathExpressions = extractPathExpressions(targets)
  log.rendering("Prefetching remote data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
Пример #20
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  log.rendering("Prefetching remote data")
  pathExpressions = extractPathExpressions(targets)

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
Пример #21
0
def autoCompleteTags(request, queryParams):
    if request.method not in ['GET', 'POST']:
        return HttpResponse(status=405)

    exprs = []
    # Normal format: ?expr=tag1=value1&expr=tag2=value2
    if len(queryParams.getlist('expr')) > 0:
        exprs = queryParams.getlist('expr')
    # Rails/PHP/jQuery common practice format: ?expr[]=tag1=value1&expr[]=tag2=value2
    elif len(queryParams.getlist('expr[]')) > 0:
        exprs = queryParams.getlist('expr[]')

    return STORE.tagdb_auto_complete_tags(
        exprs,
        tagPrefix=queryParams.get('tagPrefix'),
        limit=queryParams.get('limit'),
        requestContext=_requestContext(request, queryParams))
Пример #22
0
def index_json(request):
    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    try:
        jsonp = queryParams.get('jsonp', False)

        requestContext = {
            'localOnly': int(queryParams.get('local', 0)),
            'forwardHeaders': extractForwardHeaders(request),
        }

        matches = STORE.get_index(requestContext)
    except Exception:
        log.exception()
        return json_response_for(request, [], jsonp=jsonp, status=500)

    return json_response_for(request, matches, jsonp=jsonp)
Пример #23
0
def autoCompleteTags(request, queryParams):
  if request.method not in ['GET', 'POST']:
    return HttpResponse(status=405)

  exprs = []
  # Normal format: ?expr=tag1=value1&expr=tag2=value2
  if len(queryParams.getlist('expr')) > 0:
    exprs = queryParams.getlist('expr')
  # Rails/PHP/jQuery common practice format: ?expr[]=tag1=value1&expr[]=tag2=value2
  elif len(queryParams.getlist('expr[]')) > 0:
    exprs = queryParams.getlist('expr[]')

  return STORE.tagdb_auto_complete_tags(
    exprs,
    tagPrefix=queryParams.get('tagPrefix'),
    limit=queryParams.get('limit'),
    requestContext=_requestContext(request)
  )
Пример #24
0
def index_json(request):
  queryParams = request.GET.copy()
  queryParams.update(request.POST)

  try:
    jsonp = queryParams.get('jsonp', False)

    requestContext = {
      'localOnly': int( queryParams.get('local', 0) ),
      'forwardHeaders': extractForwardHeaders(request),
    }

    matches = STORE.get_index(requestContext)
  except Exception:
    log.exception()
    return json_response_for(request, [], jsonp=jsonp, status=500)

  return json_response_for(request, matches, jsonp=jsonp)
Пример #25
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  # Several third-party readers including rrdtool and biggraphite return values in a
  # generator which can only be iterated on once. These must be converted to a list.
  for pathExpression, items in prefetched.items():
    for i, (name, (time_info, values)) in enumerate(items):
      if isinstance(values, types.GeneratorType):
        prefetched[pathExpression][i] = (name, (time_info, list(values)))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
Пример #26
0
def completePath(path, shortnames=False):
    # Have to extract the path expression from the command
    for prefix in ('draw ', 'add ', 'remove '):
        if path.startswith(prefix):
            path = path[len(prefix):]
            break

    pattern = re.sub('\w+\(', '', path).replace(')', '') + '*'

    results = []

    for match in STORE.find(pattern):
        if shortnames:
            results.append(match.name)
        else:
            results.append(match.metric_path)

    list_items = ["<li>%s</li>" % r for r in results]
    list_element = "<ul>" + '\n'.join(list_items) + "</ul>"
    return list_element
Пример #27
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  # only prefetch if there is at least one active remote finder
  # this is to avoid the overhead of tagdb lookups in extractPathExpressions
  if len([finder for finder in STORE.finders if not getattr(finder, 'local', True) and not getattr(finder, 'disabled', False)]) < 1:
    return

  pathExpressions = extractPathExpressions(targets)
  log.rendering("Prefetching remote data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
Пример #28
0
def completePath(path, shortnames=False):
  # Have to extract the path expression from the command
  for prefix in ('draw ','add ','remove '):
    if path.startswith(prefix):
      path = path[len(prefix):]
      break

  pattern = re.sub('\w+\(','',path).replace(')','') + '*'

  results = []
  
  for match in STORE.find(pattern):
    if shortnames:
      results.append(match.name)
    else:
      results.append(match.metric_path)

  list_items = ["<li>%s</li>" % r for r in results]
  list_element = "<ul>" + '\n'.join(list_items) + "</ul>"
  return list_element
Пример #29
0
def fetchData(requestContext, pathExpr):

  seriesList = []
  startTime = int( time.mktime( requestContext['startTime'].timetuple() ) )
  endTime   = int( time.mktime( requestContext['endTime'].timetuple() ) )

  matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
  fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

  for node, results in fetches:
    if isinstance(results, FetchInProgress):
      results = results.waitForResults()

    if not results:
      log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
      continue

    (timeInfo, values) = results
    (start, end, step) = timeInfo

    values = [value if value else 0 for value in values] # hack to turn nulls into 0s

    series = TimeSeries(node.path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)

  # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
  names = set([ series.name for series in seriesList ])
  for name in names:
    series_with_duplicate_names = [ series for series in seriesList if series.name == name ]
    empty_duplicates = [ series for series in series_with_duplicate_names if not nonempty(series) ]

    if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
      empty_duplicates.pop() # make sure we leave one in seriesList

    for series in empty_duplicates:
      seriesList.remove(series)

  return seriesList
Пример #30
0
def fetchData(requestContext, pathExpr):

    seriesList = []
    startTime = int(time.mktime(requestContext["startTime"].timetuple()))
    endTime = int(time.mktime(requestContext["endTime"].timetuple()))

    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext["localOnly"])
    fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
        if isinstance(results, FetchInProgress):
            results = results.waitForResults()

        if not results:
            log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
            continue

        (timeInfo, values) = results
        (start, end, step) = timeInfo

        values = [value if value else 0 for value in values]  # hack to turn nulls into 0s

        series = TimeSeries(node.path, start, end, step, values)
        series.pathExpression = pathExpr  # hack to pass expressions through to render functions
        seriesList.append(series)

    # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
    names = set([series.name for series in seriesList])
    for name in names:
        series_with_duplicate_names = [series for series in seriesList if series.name == name]
        empty_duplicates = [series for series in series_with_duplicate_names if not nonempty(series)]

        if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0:  # if they're all empty
            empty_duplicates.pop()  # make sure we leave one in seriesList

        for series in empty_duplicates:
            seriesList.remove(series)

    return seriesList
Пример #31
0
  def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'], reqkey=requestContext['request_key'])
    matching_nodes = list(matching_nodes)
    if len(matching_nodes) > 1:
        request_hash = md5("%s_%s_%s" % (pathExpr, startTime, endTime)).hexdigest()
        cached_result = cache.get(request_hash)
        if cached_result:
	    log.info("DEBUG:fetchData: got result from cache for %s_%s_%s" % (pathExpr, startTime, endTime))
            fetches = cached_result
        else:
	    log.info("DEBUG:fetchData: no cache for %s_%s_%s" % (pathExpr, startTime, endTime))
            fetches = MultiReader(matching_nodes, reqkey=requestContext['request_key']).fetch(startTime, endTime)
            try:
                cache.add(request_hash, fetches)
            except Exception as err:
                log.exception("Failed save data in memcached: %s" % str(err))
    elif len(matching_nodes) == 1:
        fetches = [(matching_nodes[0], matching_nodes[0].fetch(startTime, endTime))]
    else:
        fetches = []

    for node, results in fetches:
      if isinstance(results, FetchInProgress):
        results = results.waitForResults()

      if not results:
        log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
        continue

      try:
          (timeInfo, values) = results
      except ValueError, e:
          e = sys.exc_info()[1]
          raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
      (start, end, step) = timeInfo

      series = TimeSeries(node.path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      seriesList.append(series)
Пример #32
0
def autoCompleteValues(request, queryParams):
    if request.method not in ['GET', 'POST']:
        return HttpResponse(status=405)

    exprs = []
    # Normal format: ?expr=tag1=value1&expr=tag2=value2
    if len(queryParams.getlist('expr')) > 0:
        exprs = queryParams.getlist('expr')
    # Rails/PHP/jQuery common practice format: ?expr[]=tag1=value1&expr[]=tag2=value2
    elif len(queryParams.getlist('expr[]')) > 0:
        exprs = queryParams.getlist('expr[]')

    tag = queryParams.get('tag')
    if not tag:
        raise HttpError('no tag specified', status=400)

    return STORE.tagdb_auto_complete_values(
        exprs,
        tag,
        valuePrefix=queryParams.get('valuePrefix'),
        limit=queryParams.get('limit'),
        requestContext=_requestContext(request))
Пример #33
0
def autoCompleteValues(request, queryParams):
  if request.method not in ['GET', 'POST']:
    return HttpResponse(status=405)

  exprs = []
  # Normal format: ?expr=tag1=value1&expr=tag2=value2
  if len(queryParams.getlist('expr')) > 0:
    exprs = queryParams.getlist('expr')
  # Rails/PHP/jQuery common practice format: ?expr[]=tag1=value1&expr[]=tag2=value2
  elif len(queryParams.getlist('expr[]')) > 0:
    exprs = queryParams.getlist('expr[]')

  tag = queryParams.get('tag')
  if not tag:
    raise HttpError('no tag specified', status=400)

  return STORE.tagdb_auto_complete_values(
    exprs,
    tag,
    valuePrefix=queryParams.get('valuePrefix'),
    limit=queryParams.get('limit'),
    requestContext=_requestContext(request, queryParams)
  )
Пример #34
0
def find_view(request):
    "View for finding metrics matching a given pattern"
    profile = getProfile(request)

    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    format = queryParams.get('format', 'treejson')
    local_only = int(queryParams.get('local', 0))
    wildcards = int(queryParams.get('wildcards', 0))
    fromTime = int(queryParams.get('from', -1))
    untilTime = int(queryParams.get('until', -1))
    nodePosition = int(queryParams.get('position', -1))
    jsonp = queryParams.get('jsonp', False)

    if fromTime == -1:
        fromTime = None
    if untilTime == -1:
        untilTime = None

    automatic_variants = int(queryParams.get('automatic_variants', 0))

    try:
        query = str(queryParams['query'])
    except:
        return HttpResponseBadRequest(
            content="Missing required parameter 'query'",
            content_type='text/plain')

    if '.' in query:
        base_path = query.rsplit('.', 1)[0] + '.'
    else:
        base_path = ''

    if format == 'completer':
        query = query.replace('..', '*.')
        if not query.endswith('*'):
            query += '*'

        if automatic_variants:
            query_parts = query.split('.')
            for i, part in enumerate(query_parts):
                if ',' in part and '{' not in part:
                    query_parts[i] = '{%s}' % part
            query = '.'.join(query_parts)

    try:
        matches = list(STORE.find(query, fromTime, untilTime,
                                  local=local_only))
    except:
        log.exception()
        raise

    log.info('find_view query=%s local_only=%s matches=%d' %
             (query, local_only, len(matches)))
    matches.sort(key=lambda node: node.name)
    log.info(
        "received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d"
        % (query, fromTime, untilTime, local_only, format, len(matches)))

    if format == 'treejson':
        content = tree_json(matches,
                            base_path,
                            wildcards=profile.advancedUI or wildcards)
        response = json_response_for(request, content, jsonp=jsonp)

    elif format == 'nodelist':
        content = nodes_by_position(matches, nodePosition)
        response = json_response_for(request, content, jsonp=jsonp)

    elif format == 'pickle':
        content = pickle_nodes(matches)
        response = HttpResponse(content, content_type='application/pickle')

    elif format == 'completer':
        results = []
        for node in matches:
            node_info = dict(path=node.path,
                             name=node.name,
                             is_leaf=str(int(node.is_leaf)))
            if not node.is_leaf:
                node_info['path'] += '.'
            results.append(node_info)

        if len(results) > 1 and wildcards:
            wildcardNode = {'name': '*'}
            results.append(wildcardNode)

        response = json_response_for(request, {'metrics': results},
                                     jsonp=jsonp)

    else:
        return HttpResponseBadRequest(
            content="Invalid value for 'format' parameter",
            content_type='text/plain')

    response['Pragma'] = 'no-cache'
    response['Cache-Control'] = 'no-cache'
    return response
Пример #35
0
def find_view(request):
    "View for finding metrics matching a given pattern"
    profile = getProfile(request)
    format = request.REQUEST.get("format", "treejson")
    local_only = int(request.REQUEST.get("local", 0))
    wildcards = int(request.REQUEST.get("wildcards", 0))
    fromTime = int(request.REQUEST.get("from", -1))
    untilTime = int(request.REQUEST.get("until", -1))

    if fromTime == -1:
        fromTime = None
    if untilTime == -1:
        untilTime = None

    automatic_variants = int(request.REQUEST.get("automatic_variants", 0))

    try:
        query = str(request.REQUEST["query"])
    except:
        return HttpResponseBadRequest(content="Missing required parameter 'query'", content_type="text/plain")

    if "." in query:
        base_path = query.rsplit(".", 1)[0] + "."
    else:
        base_path = ""

    if format == "completer":
        query = query.replace("..", "*.")
        if not query.endswith("*"):
            query += "*"

        if automatic_variants:
            query_parts = query.split(".")
            for i, part in enumerate(query_parts):
                if "," in part and "{" not in part:
                    query_parts[i] = "{%s}" % part
            query = ".".join(query_parts)
    tenant = request.session["tenant"]
    try:
        matches = list(STORE.find(query, tenant, fromTime, untilTime, local=local_only))
    except:
        log.exception()
        raise

    log.info("find_view query=%s local_only=%s matches=%d" % (query, local_only, len(matches)))
    matches.sort(key=lambda node: node.name)
    log.info(
        "received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d"
        % (query, fromTime, untilTime, local_only, format, len(matches))
    )

    if format == "treejson":
        content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
        response = json_response_for(request, content)

    elif format == "pickle":
        content = pickle_nodes(matches)
        response = HttpResponse(content, content_type="application/pickle")

    elif format == "completer":
        results = []
        for node in matches:
            node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
            if not node.is_leaf:
                node_info["path"] += "."
            results.append(node_info)

        if len(results) > 1 and wildcards:
            wildcardNode = {"name": "*"}
            results.append(wildcardNode)

        response = json_response_for(request, {"metrics": results})

    else:
        return HttpResponseBadRequest(content="Invalid value for 'format' parameter", content_type="text/plain")

    response["Pragma"] = "no-cache"
    response["Cache-Control"] = "no-cache"
    return response
Пример #36
0
def find_view(request):
  "View for finding metrics matching a given pattern"

  queryParams = request.GET.copy()
  queryParams.update(request.POST)

  format = queryParams.get('format', 'treejson')
  leaves_only = int( queryParams.get('leavesOnly', 0) )
  local_only = int( queryParams.get('local', 0) )
  wildcards = int( queryParams.get('wildcards', 0) )

  tzinfo = pytz.timezone(settings.TIME_ZONE)
  if 'tz' in queryParams:
    try:
      tzinfo = pytz.timezone(queryParams['tz'])
    except pytz.UnknownTimeZoneError:
      pass

  if 'now' in queryParams:
    now = parseATTime(queryParams['now'], tzinfo)
  else:
    now = datetime.now(tzinfo)

  if 'from' in queryParams and str(queryParams['from']) != '-1':
    fromTime = int(epoch(parseATTime(queryParams['from'], tzinfo, now)))
  else:
    fromTime = -1

  if 'until' in queryParams and str(queryParams['from']) != '-1':
    untilTime = int(epoch(parseATTime(queryParams['until'], tzinfo, now)))
  else:
    untilTime = -1

  nodePosition = int( queryParams.get('position', -1) )
  jsonp = queryParams.get('jsonp', False)
  forward_headers = extractForwardHeaders(request)

  if fromTime == -1:
    fromTime = None
  if untilTime == -1:
    untilTime = None

  automatic_variants = int( queryParams.get('automatic_variants', 0) )

  try:
    query = str(queryParams['query'])
  except KeyError:
    return HttpResponseBadRequest(content="Missing required parameter 'query'",
                                  content_type='text/plain')

  if query == '':
    return HttpResponseBadRequest(content="Required parameter 'query' is empty",
                                  content_type='text/plain')

  if '.' in query:
    base_path = query.rsplit('.', 1)[0] + '.'
  else:
    base_path = ''

  if format == 'completer':
    query = query.replace('..', '*.')
    if not query.endswith('*'):
      query += '*'

    if automatic_variants:
      query_parts = query.split('.')
      for i,part in enumerate(query_parts):
        if ',' in part and '{' not in part:
          query_parts[i] = '{%s}' % part
      query = '.'.join(query_parts)

  try:
    matches = list(STORE.find(
      query, fromTime, untilTime,
      local=local_only,
      headers=forward_headers,
      leaves_only=leaves_only,
    ))
  except Exception:
    log.exception()
    raise

  log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
  matches.sort(key=lambda node: node.name)
  log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))

  if format == 'treejson':
    profile = getProfile(request)
    content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
    response = json_response_for(request, content, jsonp=jsonp)

  elif format == 'nodelist':
    content = nodes_by_position(matches, nodePosition)
    response = json_response_for(request, content, jsonp=jsonp)

  elif format == 'pickle':
    content = pickle_nodes(matches)
    response = HttpResponse(content, content_type='application/pickle')

  elif format == 'msgpack':
    content = msgpack_nodes(matches)
    response = HttpResponse(content, content_type='application/x-msgpack')

  elif format == 'json':
    content = json_nodes(matches)
    response = json_response_for(request, content, jsonp=jsonp)

  elif format == 'completer':
    results = []
    for node in matches:
      node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
      if not node.is_leaf:
        node_info['path'] += '.'
      results.append(node_info)

    if len(results) > 1 and wildcards:
      wildcardNode = {'name' : '*'}
      results.append(wildcardNode)

    response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)

  else:
    return HttpResponseBadRequest(
        content="Invalid value for 'format' parameter",
        content_type='text/plain')

  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
Пример #37
0
def find_view(request):
    "View for finding metrics matching a given pattern"

    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    format = queryParams.get('format', 'treejson')
    leaves_only = queryParamAsInt(queryParams, 'leavesOnly', 0)
    local_only = queryParamAsInt(queryParams, 'local', 0)
    wildcards = queryParamAsInt(queryParams, 'wildcards', 0)

    tzinfo = pytz.timezone(settings.TIME_ZONE)
    if 'tz' in queryParams:
        try:
            value = queryParams['tz']
            tzinfo = pytz.timezone(value)
        except pytz.UnknownTimeZoneError:
            pass
        except Exception as e:
            raise InputParameterError(
                'Invalid value {value} for param tz: {err}'.format(
                    value=repr(value), err=str(e)))

    if 'now' in queryParams:
        try:
            value = queryParams['now']
            now = parseATTime(value, tzinfo)
        except Exception as e:
            raise InputParameterError(
                'Invalid value {value} for param now: {err}'.format(
                    value=repr(value), err=str(e)))
    else:
        now = datetime.now(tzinfo)

    if 'from' in queryParams and str(queryParams['from']) != '-1':
        try:
            value = queryParams['from']
            fromTime = int(epoch(parseATTime(value, tzinfo, now)))
        except Exception as e:
            raise InputParameterError(
                'Invalid value {value} for param from: {err}'.format(
                    value=repr(value), err=str(e)))
    else:
        fromTime = -1

    if 'until' in queryParams and str(queryParams['until']) != '-1':
        try:
            value = queryParams['until']
            untilTime = int(epoch(parseATTime(value, tzinfo, now)))
        except Exception as e:
            raise InputParameterError(
                'Invalid value {value} for param until: {err}'.format(
                    value=repr(value), err=str(e)))
    else:
        untilTime = -1

    nodePosition = queryParamAsInt(queryParams, 'position', -1)
    jsonp = queryParams.get('jsonp', False)
    forward_headers = extractForwardHeaders(request)

    if fromTime == -1:
        fromTime = None
    if untilTime == -1:
        untilTime = None

    automatic_variants = queryParamAsInt(queryParams, 'automatic_variants', 0)

    try:
        query = str(queryParams['query'])
    except KeyError:
        raise InputParameterError('Missing required parameter \'query\'')

    if query == '':
        raise InputParameterError('Required parameter \'query\' is empty')

    if '.' in query:
        base_path = query.rsplit('.', 1)[0] + '.'
    else:
        base_path = ''

    if format == 'completer':
        query = query.replace('..', '*.')
        if not query.endswith('*'):
            query += '*'

        if automatic_variants:
            query_parts = query.split('.')
            for i, part in enumerate(query_parts):
                if ',' in part and '{' not in part:
                    query_parts[i] = '{%s}' % part
            query = '.'.join(query_parts)

    try:
        matches = list(
            STORE.find(
                query,
                fromTime,
                untilTime,
                local=local_only,
                headers=forward_headers,
                leaves_only=leaves_only,
            ))
    except Exception:
        log.exception()
        raise

    log.info('find_view query=%s local_only=%s matches=%d' %
             (query, local_only, len(matches)))
    matches.sort(key=lambda node: node.name)
    log.info(
        "received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d"
        % (query, fromTime, untilTime, local_only, format, len(matches)))

    if format == 'treejson':
        profile = getProfile(request)
        content = tree_json(matches,
                            base_path,
                            wildcards=profile.advancedUI or wildcards)
        response = json_response_for(request, content, jsonp=jsonp)

    elif format == 'nodelist':
        content = nodes_by_position(matches, nodePosition)
        response = json_response_for(request, content, jsonp=jsonp)

    elif format == 'pickle':
        content = pickle_nodes(matches)
        response = HttpResponse(content, content_type='application/pickle')

    elif format == 'msgpack':
        content = msgpack_nodes(matches)
        response = HttpResponse(content, content_type='application/x-msgpack')

    elif format == 'json':
        content = json_nodes(matches)
        response = json_response_for(request, content, jsonp=jsonp)

    elif format == 'completer':
        results = []
        for node in matches:
            node_info = dict(path=node.path,
                             name=node.name,
                             is_leaf=str(int(node.is_leaf)))
            if not node.is_leaf:
                node_info['path'] += '.'
            results.append(node_info)

        if len(results) > 1 and wildcards:
            wildcardNode = {'name': '*'}
            results.append(wildcardNode)

        response = json_response_for(request, {'metrics': results},
                                     jsonp=jsonp)

    else:
        return HttpResponseBadRequest(
            content="Invalid value for 'format' parameter",
            content_type='text/plain')

    response['Pragma'] = 'no-cache'
    response['Cache-Control'] = 'no-cache'
    return response
Пример #38
0
def _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr,
                                startTime,
                                endTime,
                                local=requestContext['localOnly'],
                                headers=requestContext.get('forwardHeaders'))
    fetches = [(node.path, node.fetch(startTime, endTime, now, requestContext))
               for node in matching_nodes if node.is_leaf]

    for path, results in fetches:
        if isinstance(results, FetchInProgress):
            results = results.waitForResults()

        if not results:
            log.info(
                "render.datalib.fetchData :: no results for %s.fetch(%s, %s)" %
                (path, startTime, endTime))
            continue

        try:
            (timeInfo, values) = results
        except ValueError as e:
            raise Exception(
                "could not parse timeInfo/values from metric '%s': %s" %
                (path, e))
        (start, end, step) = timeInfo

        series = TimeSeries(path, start, end, step, values)

        # hack to pass expressions through to render functions
        series.pathExpression = pathExpr

        # Used as a cache to avoid recounting series None values below.
        series_best_nones = {}

        if series.name in seriesList:
            # This counts the Nones in each series, and is unfortunately O(n) for each
            # series, which may be worth further optimization. The value of doing this
            # at all is to avoid the "flipping" effect of loading a graph multiple times
            # and having inconsistent data returned if one of the backing stores has
            # inconsistent data. This is imperfect as a validity test, but in practice
            # nicely keeps us using the "most complete" dataset available. Think of it
            # as a very weak CRDT resolver.
            candidate_nones = 0
            if not settings.REMOTE_STORE_MERGE_RESULTS:
                candidate_nones = len([val for val in values if val is None])

            known = seriesList[series.name]
            # To avoid repeatedly recounting the 'Nones' in series we've already seen,
            # cache the best known count so far in a dict.
            if known.name in series_best_nones:
                known_nones = series_best_nones[known.name]
            else:
                known_nones = len([val for val in known if val is None])

            if known_nones > candidate_nones:
                if settings.REMOTE_STORE_MERGE_RESULTS:
                    # This series has potential data that might be missing from
                    # earlier series.  Attempt to merge in useful data and update
                    # the cache count.
                    log.info("Merging multiple TimeSeries for %s" % known.name)
                    for i, j in enumerate(known):
                        if j is None and series[i] is not None:
                            known[i] = series[i]
                            known_nones -= 1
                    # Store known_nones in our cache
                    series_best_nones[known.name] = known_nones
                else:
                    # Not merging data -
                    # we've found a series better than what we've already seen. Update
                    # the count cache and replace the given series in the array.
                    series_best_nones[known.name] = candidate_nones
                    seriesList[known.name] = series
            else:
                # In case if we are merging data - the existing series has no gaps and
                # there is nothing to merge together.  Save ourselves some work here.
                #
                # OR - if we picking best serie:
                #
                # We already have this series in the seriesList, and the
                # candidate is 'worse' than what we already have, we don't need
                # to compare anything else. Save ourselves some work here.
                break

                # If we looked at this series above, and it matched a 'known'
                # series already, then it's already in the series list (or ignored).
                # If not, append it here.
        else:
            seriesList[series.name] = series

    # Stabilize the order of the results by ordering the resulting series by name.
    # This returns the result ordering to the behavior observed pre PR#1010.
    return [seriesList[k] for k in sorted(seriesList)]
Пример #39
0
  def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
    fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
      if isinstance(results, FetchInProgress):
        results = results.waitForResults()

      if not results:
        log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
        continue

      try:
          (timeInfo, values) = results
      except ValueError as e:
          raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
      (start, end, step) = timeInfo

      series = TimeSeries(node.path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions

      # Used as a cache to avoid recounting series None values below.
      series_best_nones = {}

      if series.name in seriesList:
        # This counts the Nones in each series, and is unfortunately O(n) for each
        # series, which may be worth further optimization. The value of doing this
        # at all is to avoid the "flipping" effect of loading a graph multiple times
        # and having inconsistent data returned if one of the backing stores has
        # inconsistent data. This is imperfect as a validity test, but in practice
        # nicely keeps us using the "most complete" dataset available. Think of it
        # as a very weak CRDT resolver.
        candidate_nones = 0
        if not settings.REMOTE_STORE_MERGE_RESULTS:
          candidate_nones = len(
            [val for val in series['values'] if val is None])

        known = seriesList[series.name]
        # To avoid repeatedly recounting the 'Nones' in series we've already seen,
        # cache the best known count so far in a dict.
        if known.name in series_best_nones:
          known_nones = series_best_nones[known.name]
        else:
          known_nones = len([val for val in known if val is None])

        if known_nones > candidate_nones:
          if settings.REMOTE_STORE_MERGE_RESULTS:
            # This series has potential data that might be missing from
            # earlier series.  Attempt to merge in useful data and update
            # the cache count.
            log.info("Merging multiple TimeSeries for %s" % known.name)
            for i, j in enumerate(known):
              if j is None and series[i] is not None:
                known[i] = series[i]
                known_nones -= 1
            # Store known_nones in our cache
            series_best_nones[known.name] = known_nones
          else:
            # Not merging data -
            # we've found a series better than what we've already seen. Update
            # the count cache and replace the given series in the array.
            series_best_nones[known.name] = candidate_nones
            seriesList[known.name] = series
        else:
          # In case if we are merging data - the existing series has no gaps and there is nothing to merge
          # together.  Save ourselves some work here.
          #
          # OR - if we picking best serie:
          #
          # We already have this series in the seriesList, and the
          # candidate is 'worse' than what we already have, we don't need
          # to compare anything else. Save ourselves some work here.
          break

          # If we looked at this series above, and it matched a 'known'
          # series already, then it's already in the series list (or ignored).
          # If not, append it here.
      else:
        seriesList[series.name] = series

    # Stabilize the order of the results by ordering the resulting series by name.
    # This returns the result ordering to the behavior observed pre PR#1010.
    return [seriesList[k] for k in sorted(seriesList)]
Пример #40
0
def find_view(request):
  "View for finding metrics matching a given pattern"
  profile = getProfile(request)
  format = request.REQUEST.get('format', 'treejson')
  local_only = int( request.REQUEST.get('local', 0) )
  wildcards = int( request.REQUEST.get('wildcards', 0) )
  fromTime = int( request.REQUEST.get('from', -1) )
  untilTime = int( request.REQUEST.get('until', -1) )
  jsonp = request.REQUEST.get('jsonp', False)

  if fromTime == -1:
    fromTime = None
  if untilTime == -1:
    untilTime = None

  automatic_variants = int( request.REQUEST.get('automatic_variants', 0) )

  try:
    query = str( request.REQUEST['query'] )
  except:
    return HttpResponseBadRequest(content="Missing required parameter 'query'",
                                  content_type="text/plain")

  if '.' in query:
    base_path = query.rsplit('.', 1)[0] + '.'
  else:
    base_path = ''

  if format == 'completer':
    query = query.replace('..', '*.')
    if not query.endswith('*'):
      query += '*'

    if automatic_variants:
      query_parts = query.split('.')
      for i,part in enumerate(query_parts):
        if ',' in part and '{' not in part:
          query_parts[i] = '{%s}' % part
      query = '.'.join(query_parts)

  try:
    matches = list( STORE.find(query, fromTime, untilTime, local=local_only) )
  except:
    log.exception()
    raise

  log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
  matches.sort(key=lambda node: node.name)
  log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))

  if format == 'treejson':
    content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
    response = json_response_for(request, content, jsonp=jsonp)

  elif format == 'pickle':
    content = pickle_nodes(matches)
    response = HttpResponse(content, content_type='application/pickle')

  elif format == 'completer':
    results = []
    for node in matches:
      node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
      if not node.is_leaf:
        node_info['path'] += '.'
      results.append(node_info)

    if len(results) > 1 and wildcards:
      wildcardNode = {'name' : '*'}
      results.append(wildcardNode)

    response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)

  else:
    return HttpResponseBadRequest(
        content="Invalid value for 'format' parameter",
        content_type="text/plain")

  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
Пример #41
0
def _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList):
  if settings.REMOTE_PREFETCH_DATA:
    matching_nodes = [node for node in STORE.find(pathExpr, startTime, endTime, local=True)]

    # inflight_requests is only present if at least one remote store
    # has been queried
    if 'inflight_requests' in requestContext:
      fetches = requestContext['inflight_requests']
    else:
      fetches = {}

    def result_queue_generator():
      for node in matching_nodes:
        if node.is_leaf:
          yield (node.path, node.fetch(startTime, endTime, now, requestContext))

      log.info(
        'render.datalib.fetchData:: result_queue_generator got {count} fetches'
        .format(count=len(fetches)),
      )
      for key, fetch in fetches.iteritems():
        log.info(
          'render.datalib.fetchData:: getting results of {host}'
          .format(host=key),
        )

        if isinstance(fetch, FetchInProgress):
          fetch = fetch.waitForResults()

        if fetch is None:
          log.info('render.datalib.fetchData:: fetch is None')
          continue

        for result in fetch:
          if result['pathExpression'] == pathExpr:
            yield (
              result['path'],
              (
                (result['start'], result['end'], result['step']),
                result['values'],
              ),
            )

    result_queue = result_queue_generator()
  else:
    matching_nodes = [node for node in STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])]
    result_queue = [
      (node.path, node.fetch(startTime, endTime, now, requestContext))
      for node in matching_nodes
      if node.is_leaf
    ]

  log.info("render.datalib.fetchData :: starting to merge")
  for path, results in result_queue:
    if isinstance(results, FetchInProgress):
      results = results.waitForResults()

    if not results:
      log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (path, startTime, endTime))
      continue

    try:
      (timeInfo, values) = results
    except ValueError as e:
      raise Exception("could not parse timeInfo/values from metric '%s': %s" % (path, e))
    (start, end, step) = timeInfo

    series = TimeSeries(path, start, end, step, values)

    # hack to pass expressions through to render functions
    series.pathExpression = pathExpr

    # Used as a cache to avoid recounting series None values below.
    series_best_nones = {}

    if series.name in seriesList:
      # This counts the Nones in each series, and is unfortunately O(n) for each
      # series, which may be worth further optimization. The value of doing this
      # at all is to avoid the "flipping" effect of loading a graph multiple times
      # and having inconsistent data returned if one of the backing stores has
      # inconsistent data. This is imperfect as a validity test, but in practice
      # nicely keeps us using the "most complete" dataset available. Think of it
      # as a very weak CRDT resolver.
      candidate_nones = 0
      if not settings.REMOTE_STORE_MERGE_RESULTS:
        candidate_nones = len(
          [val for val in values if val is None])

      known = seriesList[series.name]
      # To avoid repeatedly recounting the 'Nones' in series we've already seen,
      # cache the best known count so far in a dict.
      if known.name in series_best_nones:
        known_nones = series_best_nones[known.name]
      else:
        known_nones = len([val for val in known if val is None])

      if known_nones > candidate_nones:
        if settings.REMOTE_STORE_MERGE_RESULTS:
          # This series has potential data that might be missing from
          # earlier series.  Attempt to merge in useful data and update
          # the cache count.
          log.info("Merging multiple TimeSeries for %s" % known.name)
          for i, j in enumerate(known):
            if j is None and series[i] is not None:
              known[i] = series[i]
              known_nones -= 1
          # Store known_nones in our cache
          series_best_nones[known.name] = known_nones
        else:
          # Not merging data -
          # we've found a series better than what we've already seen. Update
          # the count cache and replace the given series in the array.
          series_best_nones[known.name] = candidate_nones
          seriesList[known.name] = series
      else:
        if settings.REMOTE_PREFETCH_DATA:
          # if we're using REMOTE_PREFETCH_DATA we can save some time by skipping
          # find, but that means we don't know how many nodes to expect so we
          # have to iterate over all returned results
          continue

        # In case if we are merging data - the existing series has no gaps and
        # there is nothing to merge together.  Save ourselves some work here.
        #
        # OR - if we picking best serie:
        #
        # We already have this series in the seriesList, and the
        # candidate is 'worse' than what we already have, we don't need
        # to compare anything else. Save ourselves some work here.
        break

    else:
      # If we looked at this series above, and it matched a 'known'
      # series already, then it's already in the series list (or ignored).
      # If not, append it here.
      seriesList[series.name] = series

  # Stabilize the order of the results by ordering the resulting series by name.
  # This returns the result ordering to the behavior observed pre PR#1010.
  return [seriesList[k] for k in sorted(seriesList)]