예제 #1
0
def delegateRenderIOW(graphOptions,graphType,tenant):
  start = time()
  log.rendering(graphOptions['data'])
  post_data = {'target': [], 'from': '-24hours'}
  if 'data' in graphOptions:
    for series in graphOptions['data']:
      log.rendering(series.name)
      post_data['target'].append(series.name)
      post_data['from'] = series.start
      post_data['until'] = series.end
  post_data['tenant']=tenant
  post_data['graphType']=graphType
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
  shuffle(servers)
  for server in servers:
    start2 = time()
    try: 
      response = requests.post("%s/render/" % server, data=post_data)
      assert response.status_code == 200, "Bad response code %d from %s" % (response.status_code,server)
      contentType = response.headers['Content-Type']
      imageData = response.content
      assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
      assert imageData, "Received empty response from %s" % server
      # Wrap things up
      log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
      log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
      return imageData
    except:
      log.exception("Exception while attempting remote rendering request on %s" % server)
      log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
      continue
예제 #2
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
예제 #3
0
    def find_nodes(self, query):
        """Find nodes matching a query."""
        # TODO: we should probably consider query.startTime and query.endTime
        #  to filter out metrics that had no points in this interval.

        cache_key = "find:%s" % (hashing.compactHash(query.pattern))
        results = self.django_cache().get(cache_key)
        if results:
            cache_hit = True
        else:
            find_start = time.time()
            results = glob_utils.graphite_glob(self.accessor(), query.pattern)
            log.rendering(
                'find(%s) - %f secs' % (query.pattern, time.time() - find_start))
            cache_hit = False

        metric_names, directories = results

        for metric_name in metric_names:
            reader = Reader(
                self.accessor(), self.cache(), self.carbonlink(), metric_name)
            yield node.LeafNode(metric_name, reader)

        for directory in directories:
            yield node.BranchNode(directory)

        if not cache_hit:
            self.django_cache().set(cache_key, results, self._cache_timeout)
예제 #4
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  post_data  = {
    'from' : int((requestOptions['startTime']-datetime(1970,1,1,tzinfo=requestOptions['tzinfo'])).total_seconds()),
    'until' : int((requestOptions['endTime']-datetime(1970,1,1,tzinfo=requestOptions['tzinfo'])).total_seconds()),
    'tenant' : requestOptions['tenant'],
    'format' : requestOptions.get('format'),
    'target': [t for t in requestOptions['targets'] if t.strip()],
  }
  post_data.update(graphOptions)
  log.rendering(post_data)
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
  shuffle(servers)
  for server in servers:
    start2 = time()
    try:
      response = requests.post("%s/render/" % server, data=post_data)
      assert response.status_code == 200, "Bad response code %d from %s" % (response.status_code,server)
      contentType = response.headers['Content-Type']
      imageData = response.content
      assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
      assert imageData, "Received empty response from %s" % server
      # Wrap things up
      log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
      log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
    except:
      log.exception("Exception while attempting remote rendering request on %s" % server)
      log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
      continue
  
  response = buildResponse(imageData, 'image/png')
  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #5
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
예제 #6
0
        def read_points():
            read_start = time.time()

            cached_datapoints = self.__get_cached_datapoints(stage)

            # TODO: Consider wrapping an array (using NaN for None) for
            # speed&memory efficiency
            points = [None] * points_num
            for ts, point in ts_and_points:
                index = stage.step(ts) - start_step
                points[index] = point

            if cached_datapoints:
                points = self._merge_cached_points(stage,
                                                   start_step,
                                                   step,
                                                   aggregation_method,
                                                   points,
                                                   cached_datapoints,
                                                   raw_step=raw_step)

            now = time.time()
            log.rendering(
                'fetch(%s, %d, %d) - %d points - read: %f secs - total: %f secs'
                % (self._metric_name, start_time, end_time, len(points),
                   now - read_start, now - fetch_start))
            return (start_time, end_time, stage.precision), points
예제 #7
0
    def fetch(self, start_time, end_time, now=None):
        """Fetch point for a given interval as per the Graphite API.

        Args:
          start_time: Timestamp to fetch points from, will constrained by retention policy.
          end_time: Timestamp to fetch points until, will constrained by retention policy.
          now: Current timestamp as a float, defaults to time.time(), for tests.

        Returns:
          A tuple made of (rounded start time, rounded end time, stage precision), points
          Points is a list for which missing points are set to None.
        """
        self.__refresh_metric()

        fetch_start = time.time()
        log.rendering('fetch(%s, %d, %d) - start' %
                      (self._metric_name, start_time, end_time))

        self.__refresh_metric()
        if now is None:
            now = time.time()

        start_time, end_time, stage = self.__get_time_info(
            start_time, end_time, now)
        start_step = stage.step(start_time)
        points_num = stage.step(end_time) - start_step

        if not self._metric:
            # The metric doesn't exist, let's fail gracefully.
            ts_and_points = []
        else:
            # This returns a generator which we can iterate on later.
            ts_and_points = self._accessor.fetch_points(
                self._metric, start_time, end_time, stage)

        cached_datapoints = self.__get_cached_datapoints(stage)

        def read_points():
            read_start = time.time()
            # TODO: Consider wrapping an array (using NaN for None) for
            # speed&memory efficiency
            points = [None] * points_num
            for ts, point in ts_and_points:
                index = stage.step(ts) - start_step
                points[index] = point

            if cached_datapoints:
                points = self._merge_cached_points(stage, start_step, points,
                                                   cached_datapoints)

            now = time.time()
            log.rendering(
                'fetch(%s, %d, %d) - %d points - read: %f secs - total: %f secs'
                % (self._metric_name, start_time, end_time, len(points),
                   now - read_start, now - fetch_start))
            return (start_time, end_time, stage.precision), points

        log.rendering('fetch(%s, %d, %d) - started' %
                      (self._metric_name, start_time, end_time))
        return FetchInProgress(read_points)
예제 #8
0
def doImageRender(graphClass, graphOptions):
  pngData = StringIO()
  t = time()
  img = graphClass(**graphOptions)
  img.output(pngData)
  log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
  imageData = pngData.getvalue()
  pngData.close()
  return imageData
예제 #9
0
def doImageRender(graphClass, graphOptions):
    pngData = StringIO()
    t = time()
    img = graphClass(**graphOptions)
    img.output(pngData)
    log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
    imageData = pngData.getvalue()
    pngData.close()
    return imageData
예제 #10
0
    def find_nodes(self, query):
        """Find nodes matching a query."""
        leaves_only = hasattr(query, "leaves_only") and query.leaves_only
        cache_key = "find_nodes:%s" % self._hash(query)
        cached = self.django_cache().get(cache_key)
        if cached is not None:
            cache_hit = True
            success, results = cached
        else:
            find_start = time.time()
            try:
                if query.startTime is None:
                    start_time = None
                else:
                    start_time = datetime.fromtimestamp(query.startTime)

                if query.endTime is None:
                    end_time = None
                else:
                    end_time = datetime.fromtimestamp(query.endTime)

                results = glob_utils.graphite_glob(
                    self.accessor(),
                    query.pattern,
                    metrics=True,
                    directories=not leaves_only,
                    start_time=start_time,
                    end_time=end_time,
                )
                success = True
            except bg_accessor.Error as e:
                success = False
                results = e

            log.rendering(
                "find(%s) - %f secs" % (query.pattern, time.time() - find_start)
            )
            cache_hit = False

        if not cache_hit:
            self.django_cache().set(cache_key, (success, results), self._cache_timeout)

        if not success:
            raise results

        metric_names, directories = results

        for metric_name in metric_names:
            reader = Reader(
                self.accessor(), self.cache(), self.carbonlink(), metric_name
            )
            yield node.LeafNode(metric_name, reader)

        for directory in directories:
            yield node.BranchNode(directory)
예제 #11
0
def prefetchRemoteData(requestContext, targets):
    """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
    log.rendering("Prefetching remote data")
    pathExpressions = extractPathExpressions(targets)
    results = STORE.fetch_remote(pathExpressions, requestContext)

    # TODO: instead of doing that it would be wait better to use
    # the shared cache to cache pathExpr instead of full queries.
    requestContext['prefetched'] = PrefetchedData(results)
예제 #12
0
def prefetchData(requestContext, pathExpressions):
    """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
    if not pathExpressions:
        return

    start = time.time()
    log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

    (startTime, endTime, now) = timebounds(requestContext)

    prefetched = collections.defaultdict(list)

    for result in STORE.fetch(pathExpressions, startTime, endTime, now,
                              requestContext):
        if result is None:
            continue

        prefetched[result['pathExpression']].append((
            result['name'],
            (
                result['time_info'],
                result['values'],
            ),
        ))

    # Several third-party readers including rrdtool and biggraphite return values in a
    # generator which can only be iterated on once. These must be converted to a list.
    for pathExpression, items in prefetched.items():
        for i, (name, (time_info, values)) in enumerate(items):
            if isinstance(values, types.GeneratorType):
                prefetched[pathExpression][i] = (name, (time_info,
                                                        list(values)))

    if not requestContext.get('prefetched'):
        requestContext['prefetched'] = {}

    if (startTime, endTime, now) in requestContext['prefetched']:
        requestContext['prefetched'][(startTime, endTime,
                                      now)].update(prefetched)
    else:
        requestContext['prefetched'][(startTime, endTime, now)] = prefetched

    log.rendering("Fetched data for [%s] in %fs" %
                  (', '.join(pathExpressions), time.time() - start))
예제 #13
0
def renderLocalView(request):
  try:
    start = time()
    reqParams = StringIO(request.raw_post_data)
    graphType = reqParams.readline().strip()
    optionsPickle = reqParams.read()
    reqParams.close()
    graphClass = GraphTypes[graphType]
    options = unpickle.loads(optionsPickle)
    image = doImageRender(graphClass, options)
    log.rendering("Delegated rendering request took %.6f seconds" % (time() -  start))
    return buildResponse(image)
  except:
    log.exception("Exception in graphite.render.views.rawrender")
    return HttpResponseServerError()
예제 #14
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  log.rendering("Prefetching remote data")
  pathExpressions = extractPathExpressions(targets)

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
예제 #15
0
def renderLocalView(request):
  try:
    start = time()
    reqParams = StringIO(request.body)
    graphType = reqParams.readline().strip()
    optionsPickle = reqParams.read()
    reqParams.close()
    graphClass = GraphTypes[graphType]
    options = unpickle.loads(optionsPickle)
    image = doImageRender(graphClass, options)
    log.rendering("Delegated rendering request took %.6f seconds" % (time() -  start))
    return buildResponse(image)
  except:
    log.exception("Exception in graphite.render.views.rawrender")
    return HttpResponseServerError()
예제 #16
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  pathExpressions = extractPathExpressions(targets)
  log.rendering("Prefetching remote data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
예제 #17
0
def renderLocalView(request):
  try:
    start = time()
    reqParams = BytesIO(request.body)
    graphType = reqParams.readline().strip()
    optionsPickle = reqParams.read()
    reqParams.close()
    graphClass = GraphTypes[graphType]
    options = unpickle.loads(optionsPickle)
    image = doImageRender(graphClass, options)
    log.rendering("Delegated rendering request took %.6f seconds" % (time() -  start))
    response = buildResponse(image)
    add_never_cache_headers(response)
    return response
  except Exception:
    log.exception("Exception in graphite.render.views.rawrender")
    return HttpResponseServerError()
예제 #18
0
    def find_nodes(self, query):
        """Find nodes matching a query."""
        # TODO: we should probably consider query.startTime and query.endTime
        #  to filter out metrics that had no points in this interval.

        leaves_only = hasattr(query, 'leaves_only') and query.leaves_only
        cache_key = "find_nodes:%s" % (hashing.compactHash(query.pattern))
        cached = self.django_cache().get(cache_key)
        if cached:
            cache_hit = True
            success, results = cached
        else:
            find_start = time.time()
            try:
                results = glob_utils.graphite_glob(self.accessor(),
                                                   query.pattern,
                                                   metrics=True,
                                                   directories=not leaves_only)
                success = True
            except bg_accessor.Error as e:
                success = False
                results = e

            log.rendering('find(%s) - %f secs' %
                          (query.pattern, time.time() - find_start))
            cache_hit = False

        if not cache_hit:
            self.django_cache().set(cache_key, (success, results),
                                    self._cache_timeout)

        if not success:
            raise results

        metric_names, directories = results

        for metric_name in metric_names:
            reader = Reader(self.accessor(), self.cache(), self.carbonlink(),
                            metric_name)
            yield node.LeafNode(metric_name, reader)

        for directory in directories:
            yield node.BranchNode(directory)
예제 #19
0
def prefetchData(requestContext, pathExpressions):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching than doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by fetchData.
  """
  if not pathExpressions:
    return

  start = time.time()
  log.debug("Fetching data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  prefetched = collections.defaultdict(list)

  for result in STORE.fetch(pathExpressions, startTime, endTime, now, requestContext):
    if result is None:
      continue

    prefetched[result['pathExpression']].append((
      result['name'],
      (
        result['time_info'],
        result['values'],
      ),
    ))

  # Several third-party readers including rrdtool and biggraphite return values in a
  # generator which can only be iterated on once. These must be converted to a list.
  for pathExpression, items in prefetched.items():
    for i, (name, (time_info, values)) in enumerate(items):
      if isinstance(values, types.GeneratorType):
        prefetched[pathExpression][i] = (name, (time_info, list(values)))

  if not requestContext.get('prefetched'):
    requestContext['prefetched'] = {}

  requestContext['prefetched'][(startTime, endTime, now)] = prefetched

  log.rendering("Fetched data for [%s] in %fs" % (', '.join(pathExpressions), time.time() - start))
예제 #20
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  post_data  = {
    'from' : int(requestOptions['startTime'].strftime('%s')),
    'until' : int(requestOptions['endTime'].strftime('%s')),
    'tenant' : requestOptions['tenant'],
    'format' : requestOptions.get('format'),
    'target': [t for t in requestOptions['targets'] if t.strip()],
    'tz': str(requestOptions['tzinfo']),
    'graphType': str(requestOptions['graphType']),
    'pieMode': str(requestOptions['pieMode']),
  }

  if 'maxDataPoints' in requestOptions:
    post_data['maxDataPoints'] = str(requestOptions['maxDataPoints'])

  post_data.update(graphOptions)
  log.rendering(post_data)
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely

  shuffle(servers)
  start2 = time()
  try:
    response = requests.post("%s/render/" % servers[0], data=post_data)
    if response.status_code != 200:
      return HttpResponse(bytes(response.content), status=response.status_code)
    contentType = response.headers['Content-Type']
    imageData = response.content
    # This assert is not needed - it's perfectly OK to get empty response when there is no data
    #assert imageData, "Received empty response from %s" % servers[0]
    # Wrap things up
    log.rendering('Remotely rendered image on %s in %.6f seconds' % (servers[0],time() - start2))
    log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
  except Exception as e:
    log.exception("Exception while attempting remote rendering request on %s" % servers[0])
    log.rendering('Exception while remotely rendering on %s wasted %.6f' % (servers[0],time() - start2))
    return HttpResponse(bytes(type(e)), status=500)

  response = buildResponse(imageData, contentType)
  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #21
0
def prefetchRemoteData(requestContext, targets):
  """Prefetch a bunch of path expressions and stores them in the context.

  The idea is that this will allow more batching that doing a query
  each time evaluateTarget() needs to fetch a path. All the prefetched
  data is stored in the requestContext, to be accessed later by datalib.
  """
  # only prefetch if there is at least one active remote finder
  # this is to avoid the overhead of tagdb lookups in extractPathExpressions
  if len([finder for finder in STORE.finders if not getattr(finder, 'local', True) and not getattr(finder, 'disabled', False)]) < 1:
    return

  pathExpressions = extractPathExpressions(targets)
  log.rendering("Prefetching remote data for [%s]" % (', '.join(pathExpressions)))

  (startTime, endTime, now) = timebounds(requestContext)

  results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)

  requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
예제 #22
0
        def read_points():
            read_start = time.time()

            cached_datapoints = self.__get_cached_datapoints(stage)
            tracing.add_attr_to_trace('metric.name', self._metric_name)
            tracing.add_attr_to_trace('points.num', points_num)

            # TODO: Consider wrapping an array (using NaN for None) for
            # speed&memory efficiency
            points = [None] * points_num
            for ts, point in ts_and_points:
                index = stage.step(ts) - start_step
                points[index] = point

            if cached_datapoints:
                points = self._merge_cached_points(
                    stage,
                    start_time,
                    step,
                    aggregation_method,
                    points,
                    cached_datapoints,
                    raw_step=raw_step,
                )

            now = time.time()
            log.rendering(
                "fetch(%s, %d, %d) - %d points - read: %f secs - total: %f secs"
                % (
                    self._metric_name,
                    start_time,
                    end_time,
                    len(points),
                    now - read_start,
                    now - fetch_start,
                )
            )
            return (start_time, end_time, stage.precision), points
예제 #23
0
def delegateRendering(graphType, graphOptions, headers=None):
    if headers is None:
        headers = {}
    start = time()
    postData = graphType + '\n' + pickle.dumps(graphOptions)
    servers = settings.RENDERING_HOSTS[:]  #make a copy so we can shuffle it safely
    shuffle(servers)
    connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
    for server in servers:
        start2 = time()
        try:
            # Get a connection
            try:
                pool = connectionPools[server]
            except KeyError:  #happens the first time
                pool = connectionPools[server] = set()
            try:
                connection = pool.pop()
            except KeyError:  #No available connections, have to make a new one
                connection = connector_class(server)
                connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
            # Send the request
            try:
                connection.request('POST', '/render/local/', postData, headers)
            except httplib.CannotSendRequest:
                connection = connector_class(server)  #retry once
                connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
                connection.request('POST', '/render/local/', postData, headers)
            # Read the response
            try:  # Python 2.7+, use buffering of HTTP responses
                response = connection.getresponse(buffering=True)
            except TypeError:  # Python 2.6 and older
                response = connection.getresponse()
            assert response.status == 200, "Bad response code %d from %s" % (
                response.status, server)
            contentType = response.getheader('Content-Type')
            imageData = response.read()
            assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (
                contentType, server)
            assert imageData, "Received empty response from %s" % server
            # Wrap things up
            log.rendering('Remotely rendered image on %s in %.6f seconds' %
                          (server, time() - start2))
            log.rendering(
                'Spent a total of %.6f seconds doing remote rendering work' %
                (time() - start))
            pool.add(connection)
            return imageData
        except:
            log.exception(
                "Exception while attempting remote rendering request on %s" %
                server)
            log.rendering(
                'Exception while remotely rendering on %s wasted %.6f' %
                (server, time() - start2))
            continue
예제 #24
0
def delegateRendering(graphType, graphOptions):
    start = time()
    postData = graphType + '\n' + pickle.dumps(graphOptions)
    servers = settings.RENDERING_HOSTS[:]  #make a copy so we can shuffle it safely
    shuffle(servers)
    connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
    for server in servers:
        start2 = time()
        try:
            # Get a connection
            try:
                pool = connectionPools[server]
            except KeyError:  #happens the first time
                pool = connectionPools[server] = set()
            try:
                connection = pool.pop()
            except KeyError:  #No available connections, have to make a new one
                connection = connector_class(server)
                connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
            # Send the request
            try:
                connection.request('POST', '/render/local/', postData)
            except CannotSendRequest:
                connection = connector_class(server)  #retry once
                connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
                connection.request('POST', '/render/local/', postData)
            # Read the response
            response = connection.getresponse()
            assert response.status == 200, "Bad response code %d from %s" % (
                response.status, server)
            contentType = response.getheader('Content-Type')
            imageData = response.read()
            assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (
                contentType, server)
            assert imageData, "Received empty response from %s" % server
            # Wrap things up
            log.rendering('Remotely rendered image on %s in %.6f seconds' %
                          (server, time() - start2))
            log.rendering(
                'Spent a total of %.6f seconds doing remote rendering work' %
                (time() - start))
            pool.add(connection)
            return imageData
        except:
            log.exception(
                "Exception while attempting remote rendering request on %s" %
                server)
            log.rendering(
                'Exception while remotely rendering on %s wasted %.6f' %
                (server, time() - start2))
            continue
예제 #25
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'localOnly' : requestOptions['localOnly'],
    'data' : []
  }
  data = requestContext['data']

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError, "Invalid target '%s'" % target
        data.append( (name,value) )
      else:
        seriesList = evaluateTarget(requestContext, target)

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(requestContext, series) or 0 ))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      requestContext['data'] = data = cachedData
    else: # Have to actually retrieve the data now
      for target in requestOptions['targets']:
        t = time()
        seriesList = evaluateTarget(requestContext, target)
        log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
        data.extend(seriesList)

    if useCache:
      cache.set(dataKey, data, cacheTimeout)

    format = requestOptions.get('format')
    if format == 'csv':
      response = HttpResponse(mimetype='text/csv')
      writer = csv.writer(response, dialect='excel')

      for series in data:
        for i, value in enumerate(series):
          timestamp = localtime( series.start + (i * series.step) )
          writer.writerow( (series.name, strftime("%Y-%m-%d %H:%M:%S", timestamp), value) )

      return response

    if format == 'json':
      series_data = []
      for series in data:
        timestamps = range(series.start, series.end, series.step)
        datapoints = zip(series, timestamps)
        series_data.append( dict(target=series.name, datapoints=datapoints) )

      if 'jsonp' in requestOptions:
        response = HttpResponse(
          content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
          mimetype='text/javascript')
      else:
        response = HttpResponse(content=json.dumps(series_data), mimetype='application/json')

      response['Pragma'] = 'no-cache'
      response['Cache-Control'] = 'no-cache'
      return response

    if format == 'raw':
      response = HttpResponse(mimetype='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(str,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      return response

    if format == 'svg':
      graphOptions['outputFormat'] = 'svg'

    if format == 'pickle':
      response = HttpResponse(mimetype='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      return response


  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions)
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  useSVG = graphOptions.get('outputFormat') == 'svg'
  if useSVG and 'jsonp' in requestOptions:
    response = HttpResponse(
      content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
      mimetype='text/javascript')
  else:
    response = buildResponse(image, useSVG and 'image/svg+xml' or 'image/png')

  if useCache:
    cache.set(requestKey, response, cacheTimeout)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #26
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    data = []
    for target in requestOptions['targets']:
      try:
        name,value = target.split(':',1)
        value = float(value)
      except:
        raise ValueError, "Invalid target '%s'" % target
      data.append( (name,value) )

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      data = cachedData
    else: # Have to actually retrieve the data now
      data = []
      timeInterval = (requestOptions['startTime'], requestOptions['endTime'])
      for target in requestOptions['targets']:
        t = time()
        seriesList = evaluateTarget(target, timeInterval)
        data.extend(seriesList)
        log.rendering('Retrieval of %s took %.6f' % (target,time() - t))

    if useCache:
      cache.set(dataKey, data)

    # If data is all we needed, we're done
    if 'pickle' in requestOptions:
      response = HttpResponse(mimetype='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      return response

    if 'rawData' in requestOptions:
      response = HttpResponse(mimetype='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(str,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      return response

  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions)
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  response = buildResponse(image)

  if useCache:
    cache.set(requestKey, response)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #27
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = "noCache" not in requestOptions
    cacheTimeout = requestOptions["cacheTimeout"]
    requestContext = {
        "startTime": requestOptions["startTime"],
        "endTime": requestOptions["endTime"],
        "localOnly": requestOptions["localOnly"],
        "template": requestOptions["template"],
        "data": [],
    }
    data = requestContext["data"]

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache("Request-Cache hit [%s]" % requestKey)
            log.rendering("Returned cached response in %.6f" % (time() - start))
            return cachedResponse
        else:
            log.cache("Request-Cache miss [%s]" % requestKey)

    # Now we prepare the requested data
    if requestOptions["graphType"] == "pie":
        for target in requestOptions["targets"]:
            if target.find(":") >= 0:
                try:
                    name, value = target.split(":", 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions["pieMode"]]
                    data.append((series.name, func(requestContext, series) or 0))

    elif requestOptions["graphType"] == "line":
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions["targets"]
            startTime = requestOptions["startTime"]
            endTime = requestOptions["endTime"]
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext["data"] = data = cachedData
        else:  # Have to actually retrieve the data now
            for target in requestOptions["targets"]:
                if not target.strip():
                    continue
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
                data.extend(seriesList)

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get("format")
        if format == "csv":
            response = HttpResponse(content_type="text/csv")
            writer = csv.writer(response, dialect="excel")

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions["tzinfo"])
                    writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))

            return response

        if format == "json":
            series_data = []
            if "maxDataPoints" in requestOptions and any(data):
                startTime = min([series.start for series in data])
                endTime = max([series.end for series in data])
                timeRange = endTime - startTime
                maxDataPoints = requestOptions["maxDataPoints"]
                for series in data:
                    numberOfDataPoints = timeRange / series.step
                    if maxDataPoints < numberOfDataPoints:
                        valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
                        secondsPerPoint = int(valuesPerPoint * series.step)
                        # Nudge start over a little bit so that the consolidation bands align with each call
                        # removing 'jitter' seen when refreshing.
                        nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
                        series.start = series.start + nudge
                        valuesToLose = int(nudge / series.step)
                        for r in range(1, valuesToLose):
                            del series[0]
                        series.consolidate(valuesPerPoint)
                        timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
                    else:
                        timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(dict(target=series.name, datapoints=datapoints))
            else:
                for series in data:
                    timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(dict(target=series.name, datapoints=datapoints))

            if "jsonp" in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" % (requestOptions["jsonp"], json.dumps(series_data)),
                    content_type="text/javascript",
                )
            else:
                response = HttpResponse(content=json.dumps(series_data), content_type="application/json")

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            return response

        if format == "raw":
            response = HttpResponse(content_type="text/plain")
            for series in data:
                response.write("%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step))
                response.write(",".join(map(str, series)))
                response.write("\n")

            log.rendering("Total rawData rendering time %.6f" % (time() - start))
            return response

        if format == "svg":
            graphOptions["outputFormat"] = "svg"
        elif format == "pdf":
            graphOptions["outputFormat"] = "pdf"

        if format == "pickle":
            response = HttpResponse(content_type="application/pickle")
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering("Total pickle rendering time %.6f" % (time() - start))
            return response

    # We've got the data, now to render it
    graphOptions["data"] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions["graphType"], graphOptions)
    else:
        image = doImageRender(requestOptions["graphClass"], graphOptions)

    useSVG = graphOptions.get("outputFormat") == "svg"
    if useSVG and "jsonp" in requestOptions:
        response = HttpResponse(
            content="%s(%s)" % (requestOptions["jsonp"], json.dumps(image)), content_type="text/javascript"
        )
    elif graphOptions.get("outputFormat") == "pdf":
        response = buildResponse(image, "application/x-pdf")
    else:
        response = buildResponse(image, "image/svg+xml" if useSVG else "image/png")

    if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
    else:
        add_never_cache_headers(response)

    log.rendering("Total rendering time %.6f seconds" % (time() - start))
    return response
예제 #28
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        data = []
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError, "Invalid target '%s'" % target
                data.append((name, value))
            else:
                timeInterval = (requestOptions['startTime'],
                                requestOptions['endTime'])
                seriesList = evaluateTarget(target, timeInterval)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(series) or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            data = cachedData
        else:  # Have to actually retrieve the data now
            data = []
            timeInterval = (requestOptions['startTime'],
                            requestOptions['endTime'])

            for timer in requestOptions['timers']:

                colors = ['#FFDDDD', '#FF9999', '#99FF99', '#DDFFDD']
                if timer.startswith('^'):
                    timer = timer[1:]
                    colors.reverse()

                series_90 = evaluateTarget(
                    "averageSeries(" + timer + ".percent_90)", timeInterval)[0]
                series_50 = evaluateTarget(
                    "averageSeries(" + timer + ".percent_50)", timeInterval)[0]
                series_10 = evaluateTarget(
                    "averageSeries(" + timer + ".percent_10)", timeInterval)[0]
                series_lo = evaluateTarget("averageSeries(" + timer + ".min)",
                                           timeInterval)[0]

                series_90.options['lowBound'] = series_50
                series_90.options['areaFill'] = True
                series_90.options['fixedColor'] = colors[1]
                series_90.options['noLegend'] = True
                data.append(series_90)

                series_50.options['lowBound'] = series_10
                series_50.options['areaFill'] = True
                series_50.options['fixedColor'] = colors[2]
                series_50.options['noLegend'] = True
                data.append(series_50)

                series_10.options['lowBound'] = series_lo
                series_10.options['areaFill'] = True
                series_10.options['fixedColor'] = colors[3]
                series_10.options['noLegend'] = True
                data.append(series_10)

                series_lo.options['noDraw'] = True
                series_lo.options['noLegend'] = True
                data.append(series_lo)

                series_mid = evaluateTarget(
                    "averageSeries(" + timer + ".percent_50)", timeInterval)[0]
                series_mid.options['fixedColor'] = '#474747'
                series_mid.name = 'timer: ' + timer
                data.append(series_mid)

            for target in requestOptions['targets']:
                t = time()
                seriesList = evaluateTarget(target, timeInterval)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

        if useCache:
            cache.set(dataKey, data)

        # If data is all we needed, we're done
        if 'pickle' in requestOptions:
            response = HttpResponse(mimetype='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

        if 'rawData' in requestOptions:
            response = HttpResponse(mimetype='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(str, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions)
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    response = buildResponse(image)

    if useCache:
        cache.set(requestKey, response)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
예제 #29
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'localOnly' : requestOptions['localOnly'],
    'data' : []
  }
  data = requestContext['data']

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError, "Invalid target '%s'" % target
        data.append( (name,value) )
      else:
        seriesList = evaluateTarget(requestContext, target)

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(requestContext, series) or 0 ))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      requestContext['data'] = data = cachedData
    else: # Have to actually retrieve the data now
      for target in requestOptions['targets']:
        t = time()
        seriesList = evaluateTarget(requestContext, target)
        log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
        data.extend(seriesList)

    if useCache:
      cache.set(dataKey, data, cacheTimeout)

    # If data is all we needed, we're done
    if 'pickle' in requestOptions:
      response = HttpResponse(mimetype='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      return response

    format = requestOptions.get('format')
    if format == 'csv':
      response = HttpResponse(mimetype='text/csv')
      writer = csv.writer(response, dialect='excel')

      for series in data:
        for i, value in enumerate(series):
          timestamp = localtime( series.start + (i * series.step) )
          writer.writerow( (series.name, strftime("%Y-%m-%d %H:%M:%S", timestamp), value) )

      return response

    if format == 'json':
      # Render graph to obtain yStep, yUnitSystem info
      graphOptions['data'] = data
      graph = requestOptions['graphClass'](**graphOptions)
      # Prepare series data for JSON
      series_data = []
      for series in data:
        timestamps = range(series.start, series.end, series.step)
        datapoints = zip(series, timestamps)
        cleaned = filter(lambda x: x is not None, series)
        info = {
            'target': series.name,
            'datapoints': datapoints,
        }
        raw_stats = {
            'min': min(cleaned) if cleaned else None,
            'max': max(cleaned) if cleaned else None,
            'mean': (sum(cleaned) / len(cleaned)) if cleaned else None
        }
        formatted_stats = {}
        for key, value in raw_stats.items():
            label = graph.makeLabel(value, ignore_step=True) if value else None
            formatted_stats[key] = label
        info['stats'] = {'raw': raw_stats, 'formatted': formatted_stats}
        series_data.append(info)
      if 'jsonp' in requestOptions:
        response = HttpResponse(
          content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
          mimetype='text/javascript')
      else:
        response = HttpResponse(content=json.dumps(series_data), mimetype='application/json')
      response['Pragma'] = 'no-cache'
      response['Cache-Control'] = 'no-cache'
      return response

    if format == 'raw':
      response = HttpResponse(mimetype='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(str,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      return response
  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions)
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  response = buildResponse(image)

  if useCache:
    cache.set(requestKey, response, cacheTimeout)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #30
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  # TODO: Make that a namedtuple or a class.
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'now': requestOptions['now'],
    'localOnly' : requestOptions['localOnly'],
    'template' : requestOptions['template'],
    'tzinfo' : requestOptions['tzinfo'],
    'forwardHeaders': requestOptions['forwardHeaders'],
    'data' : [],
    'prefetched' : {},
    'xFilesFactor' : requestOptions['xFilesFactor'],
  }
  data = requestContext['data']

  response = None

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    response = cache.get(requestKey)
    if response:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return response

    log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError("Invalid target '%s'" % target)
        data.append( (name,value) )
      else:
        seriesList = evaluateTarget(requestContext, target)

        for series in seriesList:
          func = PieFunction(requestOptions['pieMode'])
          data.append( (series.name, func(requestContext, series) or 0 ))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    cachedData = None
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime, requestOptions['xFilesFactor'])
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)

    if cachedData is not None:
      requestContext['data'] = data = cachedData
    else: # Have to actually retrieve the data now
      targets = requestOptions['targets']

      data.extend(evaluateTarget(requestContext, targets))

      if useCache:
        cache.add(dataKey, data, cacheTimeout)

    renderStart = time()

    format = requestOptions.get('format')
    if format == 'csv':
      response = renderViewCsv(requestOptions, data)
    elif format == 'json':
      response = renderViewJson(requestOptions, data)
    elif format == 'dygraph':
      response = renderViewDygraph(requestOptions, data)
    elif format == 'rickshaw':
      response = renderViewRickshaw(requestOptions, data)
    elif format == 'raw':
      response = renderViewRaw(requestOptions, data)
    elif format == 'pickle':
      response = renderViewPickle(requestOptions, data)
    elif format == 'msgpack':
      response = renderViewMsgPack(requestOptions, data)

  # if response wasn't generated above, render a graph image
  if not response:
    format = 'image'
    renderStart = time()
    response = renderViewGraph(graphOptions, requestOptions, data)

  if useCache:
    cache.add(requestKey, response, cacheTimeout)
    patch_response_headers(response, cache_timeout=cacheTimeout)
  else:
    add_never_cache_headers(response)

  log.rendering('%s rendering time %6f' % (format, time() - renderStart))
  log.rendering('Total request processing time %6f' % (time() - start))

  return response
예제 #31
0
파일: views.py 프로젝트: r3cgm/graphite-web
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    # TODO: Make that a namedtuple or a class.
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'now': requestOptions['now'],
        'localOnly': requestOptions['localOnly'],
        'template': requestOptions['template'],
        'tzinfo': requestOptions['tzinfo'],
        'forwardHeaders': requestOptions['forwardHeaders'],
        'data': [],
        'prefetched': {},
        'xFilesFactor': requestOptions['xFilesFactor'],
    }
    data = requestContext['data']

    response = None

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        response = cache.get(requestKey)
        if response:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return response

        log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        cachedData = None
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime,
                               requestOptions['xFilesFactor'])
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            targets = requestOptions['targets']

            data.extend(evaluateTarget(requestContext, targets))

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        renderStart = time()

        format = requestOptions.get('format')
        if format == 'csv':
            response = renderViewCsv(requestOptions, data)
        elif format == 'json':
            response = renderViewJson(requestOptions, data)
        elif format == 'dygraph':
            response = renderViewDygraph(requestOptions, data)
        elif format == 'rickshaw':
            response = renderViewRickshaw(requestOptions, data)
        elif format == 'raw':
            response = renderViewRaw(requestOptions, data)
        elif format == 'pickle':
            response = renderViewPickle(requestOptions, data)

    # if response wasn't generated above, render a graph image
    if not response:
        format = 'image'
        renderStart = time()
        response = renderViewGraph(graphOptions, requestOptions, data)

    if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
    else:
        add_never_cache_headers(response)

    log.rendering('%s rendering time %6f' % (format, time() - renderStart))
    log.rendering('Total request processing time %6f' % (time() - start))

    return response
예제 #32
0
def renderView(request):
    start = time()

    try:
        global_timeout_duration = getattr(settings, 'RENDER_DURATION_TIMEOUT')
    except:
        global_timeout_duration = 60

    if request.REQUEST.has_key('json_request'):
        (graphOptions,
         requestOptions) = parseDataOptions(request.REQUEST['json_request'])
    elif request.is_ajax() and request.method == 'POST':
        (graphOptions,
         requestOptions) = parseDataOptions(request.raw_post_data)
    else:
        (graphOptions, requestOptions) = parseOptions(request)

    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'localOnly': requestOptions['localOnly'],
        'data': []
    }
    data = requestContext['data']

    # add template to graphOptions
    try:
        user_profile = getProfile(request, allowDefault=False)
        graphOptions['defaultTemplate'] = user_profile.defaultTemplate
    except:
        graphOptions['defaultTemplate'] = "default"

    if request.method == 'GET':
        cache_request_obj = request.GET.copy()
    else:
        cache_request_obj = request.POST.copy()

    # hack request object to add defaultTemplate param
    cache_request_obj.appendlist("template", graphOptions['defaultTemplate'])

    # First we check the request cache
    requestKey = hashRequest(cache_request_obj)
    requestHash = hashRequestWTime(cache_request_obj)
    requestContext['request_key'] = requestHash
    request_data = ""
    if request.method == "POST":
        for k, v in request.POST.items():
            request_data += "%s=%s&" % (k.replace("\t", ""), v.replace(
                "\t", ""))
    else:
        request_data = request.META['QUERY_STRING']
    log.info("DEBUG:Request_meta:[%s]\t%s\t%s\t%s\t\"%s\"" %\
            (requestHash,\
              request.META['REMOTE_ADDR'],\
              request.META['REQUEST_METHOD'],\
              request_data,\
              request.META['HTTP_USER_AGENT']))
    if useCache:
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestHash)
            log.rendering('[%s] Returned cached response in %.6f' %
                          (requestHash, (time() - start)))
            log.info("RENDER:[%s]:Timings:Cached %.5f" %
                     (requestHash, time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestHash)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                q = Queue(maxsize=1)
                p = Process(target=evaluateWithQueue,
                            args=(q, requestContext, target))
                p.start()

                seriesList = None
                try:
                    seriesList = q.get(True, global_timeout_duration)
                    p.join()
                except Exception, e:
                    log.info(
                        "DEBUG:[%s] got an exception on trying to get seriesList from queue, error: %s"
                        % (requestHash, e))
                    p.terminate()
                    return errorPage("Failed to fetch data")

                if seriesList == None:
                    log.info("DEBUG:[%s] request timed out" % requestHash)
                    p.terminate()
                    return errorPage("Request timed out")

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))
예제 #33
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'localOnly': requestOptions['localOnly'],
        'data': []
    }
    data = requestContext['data']

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError, "Invalid target '%s'" % target
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            for target in requestOptions['targets']:
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

        if useCache:
            cache.set(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        if 'pickle' in requestOptions:
            response = HttpResponse(mimetype='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(mimetype='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = localtime(series.start + (i * series.step))
                    writer.writerow(
                        (series.name, strftime("%Y-%m-%d %H:%M:%S",
                                               timestamp), value))

            return response

        if format == 'json':
            series_data = []
            for series in data:
                timestamps = range(series.start, series.end, series.step)
                datapoints = zip(series, timestamps)
                series_data.append(
                    dict(target=series.name, datapoints=datapoints))

            if 'jsonp' in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" %
                    (requestOptions['jsonp'], json.dumps(series_data)),
                    mimetype='text/javascript')
            else:
                response = HttpResponse(content=json.dumps(series_data),
                                        mimetype='application/json')

            response['Pragma'] = 'no-cache'
            response['Cache-Control'] = 'no-cache'
            return response

        if format == 'raw':
            response = HttpResponse(mimetype='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(str, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'svg':
            graphOptions['outputFormat'] = 'svg'

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions)
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    useSVG = graphOptions.get('outputFormat') == 'svg'
    if useSVG and 'jsonp' in requestOptions:
        response = HttpResponse(content="%s(%s)" %
                                (requestOptions['jsonp'], json.dumps(image)),
                                mimetype='text/javascript')
    else:
        response = buildResponse(image, useSVG and 'image/svg+xml'
                                 or 'image/png')

    if useCache:
        cache.set(requestKey, response, cacheTimeout)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
예제 #34
0
def renderView(request):
    start = time()

    if request.REQUEST.has_key('json_request'):
        (graphOptions,
         requestOptions) = parseDataOptions(request.REQUEST['json_request'])
    elif request.is_ajax() and request.method == 'POST':
        (graphOptions,
         requestOptions) = parseDataOptions(request.raw_post_data)
    else:
        (graphOptions, requestOptions) = parseOptions(request)

    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'localOnly': requestOptions['localOnly'],
        'data': []
    }
    data = requestContext['data']

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            for target in requestOptions['targets']:
                if not target.strip():
                    continue
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(content_type='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(
                        series.start + (i * series.step),
                        requestOptions['tzinfo'])
                    writer.writerow(
                        (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"),
                         value))

            return response

        if format == 'json':
            series_data = []
            if 'maxDataPoints' in requestOptions and any(data):
                startTime = min([series.start for series in data])
                endTime = max([series.end for series in data])
                timeRange = endTime - startTime
                maxDataPoints = requestOptions['maxDataPoints']
                for series in data:
                    numberOfDataPoints = timeRange / series.step
                    if maxDataPoints < numberOfDataPoints:
                        valuesPerPoint = math.ceil(
                            float(numberOfDataPoints) / float(maxDataPoints))
                        secondsPerPoint = int(valuesPerPoint * series.step)
                        # Nudge start over a little bit so that the consolidation bands align with each call
                        # removing 'jitter' seen when refreshing.
                        nudge = secondsPerPoint + (
                            series.start % series.step) - (series.start %
                                                           secondsPerPoint)
                        series.start = series.start + nudge
                        valuesToLose = int(nudge / series.step)
                        for r in range(1, valuesToLose):
                            del series[0]
                        series.consolidate(valuesPerPoint)
                        timestamps = range(series.start, series.end,
                                           secondsPerPoint)
                    else:
                        timestamps = range(series.start, series.end,
                                           series.step)
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name, datapoints=datapoints))
            else:
                for series in data:
                    timestamps = range(series.start, series.end, series.step)
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name, datapoints=datapoints))

            if 'jsonp' in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" %
                    (requestOptions['jsonp'], json.dumps(series_data)),
                    content_type='text/javascript')
            else:
                response = HttpResponse(content=json.dumps(series_data),
                                        content_type='application/json')

            response['Pragma'] = 'no-cache'
            response['Cache-Control'] = 'no-cache'
            return response

        if format == 'raw':
            response = HttpResponse(content_type='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(str, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'svg':
            graphOptions['outputFormat'] = 'svg'

        if format == 'pickle':
            response = HttpResponse(content_type='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

    # add template to graphOptions
    try:
        user_profile = getProfile(request, allowDefault=False)
        graphOptions['defaultTemplate'] = user_profile.defaultTemplate
    except:
        graphOptions['defaultTemplate'] = "default"

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions)
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    useSVG = graphOptions.get('outputFormat') == 'svg'
    if useSVG and 'jsonp' in requestOptions:
        response = HttpResponse(content="%s(%s)" %
                                (requestOptions['jsonp'], json.dumps(image)),
                                content_type='text/javascript')
    else:
        response = buildResponse(image,
                                 'image/svg+xml' if useSVG else 'image/png')

    if useCache:
        cache.set(requestKey, response, cacheTimeout)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
예제 #35
0
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
            log.rendering("[%s] got data cache Retrieval" % requestHash)
        else:  # Have to actually retrieve the data now
            # best place for multiprocessing
            log.info("DEBUG:render:[%s] targets [ %s ]" %
                     (requestHash, requestOptions['targets']))
            start_t = time()
            for target in requestOptions['targets']:
                if not target.strip():
                    continue
                t = time()

                q = Queue(maxsize=1)
                p = Process(target=evaluateWithQueue,
                            args=(q, requestContext, target))
                p.start()
예제 #36
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    data = []
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError, "Invalid target '%s'" % target
        data.append( (name,value) )
      else:
        timeInterval = (requestOptions['startTime'], requestOptions['endTime'])
        seriesList = evaluateTarget(target, timeInterval)

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(series) or 0 ))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      data = cachedData
    else: # Have to actually retrieve the data now
      data = []
      timeInterval = (requestOptions['startTime'], requestOptions['endTime'])

      for timer in requestOptions['timers']:

        colors = ['#FFDDDD', '#FF9999', '#99FF99', '#DDFFDD']
        if timer.startswith('^'):
          timer = timer[1:]
          colors.reverse()

        series_90 = evaluateTarget("averageSeries("+timer+".percent_90)", timeInterval)[0]
        series_50 = evaluateTarget("averageSeries("+timer+".percent_50)", timeInterval)[0]
        series_10 = evaluateTarget("averageSeries("+timer+".percent_10)", timeInterval)[0]
        series_lo = evaluateTarget("averageSeries("+timer+".min)", timeInterval)[0]

        series_90.options['lowBound'] = series_50
        series_90.options['areaFill'] = True
        series_90.options['fixedColor'] = colors[1]
        series_90.options['noLegend'] = True
        data.append(series_90)

        series_50.options['lowBound'] = series_10
        series_50.options['areaFill'] = True
        series_50.options['fixedColor'] = colors[2]
        series_50.options['noLegend'] = True
        data.append(series_50)

        series_10.options['lowBound'] = series_lo
        series_10.options['areaFill'] = True
        series_10.options['fixedColor'] = colors[3]
        series_10.options['noLegend'] = True
        data.append(series_10)

	series_lo.options['noDraw'] = True
        series_lo.options['noLegend'] = True
	data.append(series_lo)

        series_mid = evaluateTarget("averageSeries("+timer+".percent_50)", timeInterval)[0]
        series_mid.options['fixedColor'] = '#474747'
        series_mid.name = 'timer: '+timer
        data.append(series_mid)

      for target in requestOptions['targets']:
        t = time()
        seriesList = evaluateTarget(target, timeInterval)
        log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
        data.extend(seriesList)

    if useCache:
      cache.set(dataKey, data)

    # If data is all we needed, we're done
    if 'pickle' in requestOptions:
      response = HttpResponse(mimetype='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      return response

    if 'rawData' in requestOptions:
      response = HttpResponse(mimetype='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(str,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      return response

  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions)
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  response = buildResponse(image)

  if useCache:
    cache.set(requestKey, response)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #37
0
 def run(self):
   t = time()
   self.seriesList = evaluateTarget(self.requestContext, self.target)
   log.rendering("Retrieval of %s took %.6f" % (self.target, time() - t))
예제 #38
0
    def fetch_async(self, start_time, end_time, now=None, requestContext=None):
        """Fetch point for a given interval as per the Graphite API.

        Args:
          start_time: Timestamp to fetch points from, will constrained by retention policy.
          end_time: Timestamp to fetch points until, will constrained by retention policy.
          now: Current timestamp as a float, defaults to time.time(), for tests.

        Returns:
          A callable that returns a tuple made of (rounded start time,
          rounded end time, stage precision), points
          Points is a list for which missing points are set to None.
        """
        fetch_start = time.time()
        log.rendering(
            "fetch(%s, %d, %d) - start" % (self._metric_name, start_time, end_time)
        )

        self.__refresh_metric()
        if now is None:
            now = time.time()

        metadata = self.__get_metadata()
        start_time, end_time, stage = self.__get_time_info(start_time, end_time, now)
        start_step = stage.step(start_time)
        points_num = stage.step(end_time) - start_step
        step = stage.precision
        aggregation_method = metadata.aggregator.carbon_name
        raw_step = metadata.retention.stage0.precision

        if not self._metric:
            # The metric doesn't exist, let's fail gracefully.
            ts_and_points = []
        else:
            # This returns a generator which we can iterate on later.
            ts_and_points = self._accessor.fetch_points(
                self._metric, start_time, end_time, stage
            )

        def read_points():
            read_start = time.time()

            cached_datapoints = self.__get_cached_datapoints(stage)

            # TODO: Consider wrapping an array (using NaN for None) for
            # speed&memory efficiency
            points = [None] * points_num
            for ts, point in ts_and_points:
                index = stage.step(ts) - start_step
                points[index] = point

            if cached_datapoints:
                points = self._merge_cached_points(
                    stage,
                    start_step,
                    step,
                    aggregation_method,
                    points,
                    cached_datapoints,
                    raw_step=raw_step,
                )

            now = time.time()
            log.rendering(
                "fetch(%s, %d, %d) - %d points - read: %f secs - total: %f secs"
                % (
                    self._metric_name,
                    start_time,
                    end_time,
                    len(points),
                    now - read_start,
                    now - fetch_start,
                )
            )
            return (start_time, end_time, stage.precision), points

        log.rendering(
            "fetch(%s, %d, %d) - started" % (self._metric_name, start_time, end_time)
        )

        return read_points
예제 #39
0
def renderView(request):
    start = time()
    (graphOptions, requestOptions) = parseOptions(request)
    useCache = 'noCache' not in requestOptions
    cacheTimeout = requestOptions['cacheTimeout']
    # TODO: Make that a namedtuple or a class.
    requestContext = {
        'startTime': requestOptions['startTime'],
        'endTime': requestOptions['endTime'],
        'now': requestOptions['now'],
        'localOnly': requestOptions['localOnly'],
        'template': requestOptions['template'],
        'tzinfo': requestOptions['tzinfo'],
        'forwardHeaders': extractForwardHeaders(request),
        'data': [],
        'prefetched': {},
    }
    data = requestContext['data']

    # First we check the request cache
    if useCache:
        requestKey = hashRequest(request)
        cachedResponse = cache.get(requestKey)
        if cachedResponse:
            log.cache('Request-Cache hit [%s]' % requestKey)
            log.rendering('Returned cached response in %.6f' %
                          (time() - start))
            return cachedResponse
        else:
            log.cache('Request-Cache miss [%s]' % requestKey)

    # Now we prepare the requested data
    if requestOptions['graphType'] == 'pie':
        for target in requestOptions['targets']:
            if target.find(':') >= 0:
                try:
                    name, value = target.split(':', 1)
                    value = float(value)
                except:
                    raise ValueError("Invalid target '%s'" % target)
                data.append((name, value))
            else:
                seriesList = evaluateTarget(requestContext, target)

                for series in seriesList:
                    func = PieFunctions[requestOptions['pieMode']]
                    data.append((series.name, func(requestContext, series)
                                 or 0))

    elif requestOptions['graphType'] == 'line':
        # Let's see if at least our data is cached
        if useCache:
            targets = requestOptions['targets']
            startTime = requestOptions['startTime']
            endTime = requestOptions['endTime']
            dataKey = hashData(targets, startTime, endTime)
            cachedData = cache.get(dataKey)
            if cachedData:
                log.cache("Data-Cache hit [%s]" % dataKey)
            else:
                log.cache("Data-Cache miss [%s]" % dataKey)
        else:
            cachedData = None

        if cachedData is not None:
            requestContext['data'] = data = cachedData
        else:  # Have to actually retrieve the data now
            targets = requestOptions['targets']
            if settings.REMOTE_PREFETCH_DATA and not requestOptions.get(
                    'localOnly'):
                prefetchRemoteData(requestContext, targets)

            for target in targets:
                if not target.strip():
                    continue
                t = time()
                seriesList = evaluateTarget(requestContext, target)
                log.rendering("Retrieval of %s took %.6f" %
                              (target, time() - t))
                data.extend(seriesList)

            if useCache:
                cache.add(dataKey, data, cacheTimeout)

        # If data is all we needed, we're done
        format = requestOptions.get('format')
        if format == 'csv':
            response = HttpResponse(content_type='text/csv')
            writer = csv.writer(response, dialect='excel')

            for series in data:
                for i, value in enumerate(series):
                    timestamp = datetime.fromtimestamp(
                        series.start + (i * series.step),
                        requestOptions['tzinfo'])
                    writer.writerow(
                        (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"),
                         value))

            return response

        if format == 'json':
            jsonStart = time()

            series_data = []
            if 'maxDataPoints' in requestOptions and any(data):
                startTime = min([series.start for series in data])
                endTime = max([series.end for series in data])
                timeRange = endTime - startTime
                maxDataPoints = requestOptions['maxDataPoints']
                for series in data:
                    numberOfDataPoints = timeRange / series.step
                    if maxDataPoints < numberOfDataPoints:
                        valuesPerPoint = math.ceil(
                            float(numberOfDataPoints) / float(maxDataPoints))
                        secondsPerPoint = int(valuesPerPoint * series.step)
                        # Nudge start over a little bit so that the consolidation bands align with each call
                        # removing 'jitter' seen when refreshing.
                        nudge = secondsPerPoint + (
                            series.start % series.step) - (series.start %
                                                           secondsPerPoint)
                        series.start = series.start + nudge
                        valuesToLose = int(nudge / series.step)
                        for r in range(1, valuesToLose):
                            del series[0]
                        series.consolidate(valuesPerPoint)
                        timestamps = range(int(series.start),
                                           int(series.end) + 1,
                                           int(secondsPerPoint))
                    else:
                        timestamps = range(int(series.start),
                                           int(series.end) + 1,
                                           int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name,
                             tags=series.tags,
                             datapoints=datapoints))
            elif 'noNullPoints' in requestOptions and any(data):
                for series in data:
                    values = []
                    for (index, v) in enumerate(series):
                        if v is not None:
                            timestamp = series.start + (index * series.step)
                            values.append((v, timestamp))
                    if len(values) > 0:
                        series_data.append(
                            dict(target=series.name,
                                 tags=series.tags,
                                 datapoints=values))
            else:
                for series in data:
                    timestamps = range(int(series.start),
                                       int(series.end) + 1, int(series.step))
                    datapoints = zip(series, timestamps)
                    series_data.append(
                        dict(target=series.name,
                             tags=series.tags,
                             datapoints=datapoints))

            output = json.dumps(
                series_data,
                indent=(2 if requestOptions['pretty'] else None)).replace(
                    'None,',
                    'null,').replace('NaN,',
                                     'null,').replace('Infinity,', '1e9999,')

            if 'jsonp' in requestOptions:
                response = HttpResponse(content="%s(%s)" %
                                        (requestOptions['jsonp'], output),
                                        content_type='text/javascript')
            else:
                response = HttpResponse(content=output,
                                        content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('JSON rendering time %6f' % (time() - jsonStart))
            log.rendering('Total request processing time %6f' %
                          (time() - start))
            return response

        if format == 'dygraph':
            labels = ['Time']
            result = '{}'
            if data:
                datapoints = [[
                    ts
                ] for ts in range(data[0].start, data[0].end, data[0].step)]
                for series in data:
                    labels.append(series.name)
                    for i, point in enumerate(series):
                        if point is None:
                            point = 'null'
                        elif point == float('inf'):
                            point = 'Infinity'
                        elif point == float('-inf'):
                            point = '-Infinity'
                        elif math.isnan(point):
                            point = 'null'
                        datapoints[i].append(point)
                line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
                lines = [
                    line_template % tuple(points) for points in datapoints
                ]
                result = '{"labels" : %s, "data" : [%s]}' % (
                    json.dumps(labels), ', '.join(lines))
            response = HttpResponse(content=result,
                                    content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('Total dygraph rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'rickshaw':
            series_data = []
            for series in data:
                timestamps = range(series.start, series.end, series.step)
                datapoints = [{
                    'x': x,
                    'y': y
                } for x, y in zip(timestamps, series)]
                series_data.append(
                    dict(target=series.name, datapoints=datapoints))
            if 'jsonp' in requestOptions:
                response = HttpResponse(
                    content="%s(%s)" %
                    (requestOptions['jsonp'], json.dumps(series_data)),
                    mimetype='text/javascript')
            else:
                response = HttpResponse(content=json.dumps(series_data),
                                        content_type='application/json')

            if useCache:
                cache.add(requestKey, response, cacheTimeout)
                patch_response_headers(response, cache_timeout=cacheTimeout)
            else:
                add_never_cache_headers(response)
            log.rendering('Total rickshaw rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'raw':
            response = HttpResponse(content_type='text/plain')
            for series in data:
                response.write(
                    "%s,%d,%d,%d|" %
                    (series.name, series.start, series.end, series.step))
                response.write(','.join(map(repr, series)))
                response.write('\n')

            log.rendering('Total rawData rendering time %.6f' %
                          (time() - start))
            return response

        if format == 'svg':
            graphOptions['outputFormat'] = 'svg'
        elif format == 'pdf':
            graphOptions['outputFormat'] = 'pdf'

        if format == 'pickle':
            response = HttpResponse(content_type='application/pickle')
            seriesInfo = [series.getInfo() for series in data]
            pickle.dump(seriesInfo, response, protocol=-1)

            log.rendering('Total pickle rendering time %.6f' %
                          (time() - start))
            return response

    # We've got the data, now to render it
    graphOptions['data'] = data
    if settings.REMOTE_RENDERING:  # Rendering on other machines is faster in some situations
        image = delegateRendering(requestOptions['graphType'], graphOptions,
                                  requestContext['forwardHeaders'])
    else:
        image = doImageRender(requestOptions['graphClass'], graphOptions)

    useSVG = graphOptions.get('outputFormat') == 'svg'
    if useSVG and 'jsonp' in requestOptions:
        response = HttpResponse(content="%s(%s)" %
                                (requestOptions['jsonp'], json.dumps(image)),
                                content_type='text/javascript')
    elif graphOptions.get('outputFormat') == 'pdf':
        response = buildResponse(image, 'application/x-pdf')
    else:
        response = buildResponse(image,
                                 'image/svg+xml' if useSVG else 'image/png')

    if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
    else:
        add_never_cache_headers(response)

    log.rendering('Total rendering time %.6f seconds' % (time() - start))
    return response
예제 #40
0
    def fetch_async(self, start_time, end_time, now=None, requestContext=None):
        """Fetch point for a given interval as per the Graphite API.

        Args:
          start_time: Timestamp to fetch points from, will constrained by retention policy.
          end_time: Timestamp to fetch points until, will constrained by retention policy.
          now: Current timestamp as a float, defaults to time.time(), for tests.

        Returns:
          A callable that returns a tuple made of (rounded start time,
          rounded end time, stage precision), points
          Points is a list for which missing points are set to None.
        """
        tracing.add_attr_to_trace('metric.name', self._metric_name)
        fetch_start = time.time()
        log.rendering("fetch(%s, %d, %d) - start" %
                      (self._metric_name, start_time, end_time))

        self.__refresh_metric()
        if now is None:
            now = time.time()

        metadata = self.__get_metadata()
        start_time, end_time, stage = self.__get_time_info(
            start_time, end_time, now)
        start_step = stage.step(start_time)
        points_num = stage.step(end_time) - start_step
        step = stage.precision
        aggregation_method = metadata.aggregator.carbon_name
        raw_step = metadata.retention.stage0.precision

        if not self._metric:
            # The metric doesn't exist, let's fail gracefully.
            ts_and_points = []
        else:
            # This returns a generator which we can iterate on later.
            ts_and_points = self._accessor.fetch_points(
                self._metric, start_time, end_time, stage)

        def read_points():
            read_start = time.time()

            cached_datapoints = self.__get_cached_datapoints(stage)
            tracing.add_attr_to_trace('metric.name', self._metric_name)
            tracing.add_attr_to_trace('points.num', points_num)

            # TODO: Consider wrapping an array (using NaN for None) for
            # speed&memory efficiency
            points = [None] * points_num
            for ts, point in ts_and_points:
                index = stage.step(ts) - start_step
                points[index] = point

            if cached_datapoints:
                points = self._merge_cached_points(
                    stage,
                    start_time,
                    step,
                    aggregation_method,
                    points,
                    cached_datapoints,
                    raw_step=raw_step,
                )

            now = time.time()
            log.rendering(
                "fetch(%s, %d, %d) - %d points - read: %f secs - total: %f secs"
                % (
                    self._metric_name,
                    start_time,
                    end_time,
                    len(points),
                    now - read_start,
                    now - fetch_start,
                ))
            return (start_time, end_time, stage.precision), points

        log.rendering("fetch(%s, %d, %d) - started" %
                      (self._metric_name, start_time, end_time))

        return tracing.trace_simple(read_points)
예제 #41
0
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      requestContext['data'] = data = cachedData
      log.rendering("[%s] got data cache Retrieval" % requestHash)
    else: # Have to actually retrieve the data now
      # best place for multiprocessing
      log.info("DEBUG:render:[%s] targets [ %s ]" % (requestHash, requestOptions['targets']))
      start_t = time()
      for target in requestOptions['targets']:
          if not target.strip():
            continue
          t = time()
          
          q = Queue(maxsize=1)
          p = Process(target = evaluateWithQueue, args = (q, requestContext, target))
          p.start()
      
          seriesList = None
          try:
예제 #42
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  # TODO: Make that a namedtuple or a class.
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'now': requestOptions['now'],
    'localOnly' : requestOptions['localOnly'],
    'template' : requestOptions['template'],
    'tzinfo' : requestOptions['tzinfo'],
    'forwardHeaders': extractForwardHeaders(request),
    'data' : [],
    'prefetched' : {},
  }
  data = requestContext['data']

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError("Invalid target '%s'" % target)
        data.append( (name,value) )
      else:
        seriesList = evaluateTarget(requestContext, target)

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(requestContext, series) or 0 ))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      requestContext['data'] = data = cachedData
    else: # Have to actually retrieve the data now
      targets = requestOptions['targets']
      if settings.REMOTE_PREFETCH_DATA and not requestOptions.get('localOnly'):
        prefetchRemoteData(requestContext, targets)

      for target in targets:
        if not target.strip():
          continue
        t = time()
        seriesList = evaluateTarget(requestContext, target)
        log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
        data.extend(seriesList)

      if useCache:
        cache.add(dataKey, data, cacheTimeout)

    # If data is all we needed, we're done
    format = requestOptions.get('format')
    if format == 'csv':
      response = HttpResponse(content_type='text/csv')
      writer = csv.writer(response, dialect='excel')

      for series in data:
        for i, value in enumerate(series):
          timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
          writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))

      return response

    if format == 'json':
      jsonStart = time()

      series_data = []
      if 'maxDataPoints' in requestOptions and any(data):
        startTime = min([series.start for series in data])
        endTime = max([series.end for series in data])
        timeRange = endTime - startTime
        maxDataPoints = requestOptions['maxDataPoints']
        for series in data:
          numberOfDataPoints = timeRange/series.step
          if maxDataPoints < numberOfDataPoints:
            valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
            secondsPerPoint = int(valuesPerPoint * series.step)
            # Nudge start over a little bit so that the consolidation bands align with each call
            # removing 'jitter' seen when refreshing.
            nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
            series.start = series.start + nudge
            valuesToLose = int(nudge/series.step)
            for r in range(1, valuesToLose):
              del series[0]
            series.consolidate(valuesPerPoint)
            timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
          else:
            timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
          datapoints = zip(series, timestamps)
          series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints))
      elif 'noNullPoints' in requestOptions and any(data):
        for series in data:
          values = []
          for (index,v) in enumerate(series):
            if v is not None:
              timestamp = series.start + (index * series.step)
              values.append((v,timestamp))
          if len(values) > 0:
            series_data.append(dict(target=series.name, tags=series.tags, datapoints=values))
      else:
        for series in data:
          timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
          datapoints = zip(series, timestamps)
          series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints))

      output = json.dumps(series_data, indent=(2 if requestOptions['pretty'] else None)).replace('None,', 'null,').replace('NaN,', 'null,').replace('Infinity,', '1e9999,')

      if 'jsonp' in requestOptions:
        response = HttpResponse(
          content="%s(%s)" % (requestOptions['jsonp'], output),
          content_type='text/javascript')
      else:
        response = HttpResponse(
          content=output,
          content_type='application/json')

      if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
      else:
        add_never_cache_headers(response)
      log.rendering('JSON rendering time %6f' % (time() - jsonStart))
      log.rendering('Total request processing time %6f' % (time() - start))
      return response

    if format == 'dygraph':
      labels = ['Time']
      result = '{}'
      if data:
        datapoints = [[ts] for ts in range(data[0].start, data[0].end, data[0].step)]
        for series in data:
          labels.append(series.name)
          for i, point in enumerate(series):
            if point is None:
              point = 'null'
            elif point == float('inf'):
              point = 'Infinity'
            elif point == float('-inf'):
              point = '-Infinity'
            elif math.isnan(point):
              point = 'null'
            datapoints[i].append(point)
        line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
        lines = [line_template % tuple(points) for points in datapoints]
        result = '{"labels" : %s, "data" : [%s]}' % (json.dumps(labels), ', '.join(lines))
      response = HttpResponse(content=result, content_type='application/json')

      if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
      else:
        add_never_cache_headers(response)
      log.rendering('Total dygraph rendering time %.6f' % (time() - start))
      return response

    if format == 'rickshaw':
      series_data = []
      for series in data:
        timestamps = range(series.start, series.end, series.step)
        datapoints = [{'x' : x, 'y' : y} for x, y in zip(timestamps, series)]
        series_data.append( dict(target=series.name, datapoints=datapoints) )
      if 'jsonp' in requestOptions:
        response = HttpResponse(
          content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
          mimetype='text/javascript')
      else:
        response = HttpResponse(content=json.dumps(series_data),
                                content_type='application/json')

      if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
      else:
        add_never_cache_headers(response)
      log.rendering('Total rickshaw rendering time %.6f' % (time() - start))
      return response

    if format == 'raw':
      response = HttpResponse(content_type='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(repr,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      return response

    if format == 'svg':
      graphOptions['outputFormat'] = 'svg'
    elif format == 'pdf':
      graphOptions['outputFormat'] = 'pdf'

    if format == 'pickle':
      response = HttpResponse(content_type='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      return response


  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions, requestContext['forwardHeaders'])
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  useSVG = graphOptions.get('outputFormat') == 'svg'
  if useSVG and 'jsonp' in requestOptions:
    response = HttpResponse(
      content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
      content_type='text/javascript')
  elif graphOptions.get('outputFormat') == 'pdf':
    response = buildResponse(image, 'application/x-pdf')
  else:
    response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')

  if useCache:
    cache.add(requestKey, response, cacheTimeout)
    patch_response_headers(response, cache_timeout=cacheTimeout)
  else:
    add_never_cache_headers(response)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
예제 #43
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'localOnly' : requestOptions['localOnly'],
    'template' : requestOptions['template'],
    'targets': [],
    'data' : []
  }
  data = requestContext['data']

  # First we check the request cache
  if useCache:
    requestKey = hashRequest(request)
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestKey)
      log.rendering('Returned cached response in %.6f' % (time() - start))
      requestContext['cachedResponse'] = True
      requestContext['targets'].append((requestOptions['targets'], time() - start))
      log_query(request, requestOptions, requestContext, time() - start)
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestKey)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError("Invalid target '%s'" % target)
        data.append( (name,value) )
      else:
        t = time()
        seriesList = evaluateTarget(requestContext, target)

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(requestContext, series) or 0 ))
        requestContext['targets'].append((target, time() - t))

  elif requestOptions['graphType'] == 'line':
    # Let's see if at least our data is cached
    if useCache:
      t = time()
      targets = requestOptions['targets']
      startTime = requestOptions['startTime']
      endTime = requestOptions['endTime']
      dataKey = hashData(targets, startTime, endTime)
      cachedData = cache.get(dataKey)
      if cachedData:
        log.cache("Data-Cache hit [%s]" % dataKey)
        requestContext['cachedData'] = True
        requestContext['targets'].append((targets, time() - t))
      else:
        log.cache("Data-Cache miss [%s]" % dataKey)
    else:
      cachedData = None

    if cachedData is not None:
      requestContext['data'] = data = cachedData
    else: # Have to actually retrieve the data now
      for target in requestOptions['targets']:
        if not target.strip():
          continue
        t = time()
        seriesList = evaluateTarget(requestContext, target)
        log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
        requestContext['targets'].append((target, time() - t))
        data.extend(seriesList)

      if useCache:
        cache.add(dataKey, data, cacheTimeout)

    # If data is all we needed, we're done
    format = requestOptions.get('format')
    if format == 'csv':
      response = HttpResponse(content_type='text/csv')
      writer = csv.writer(response, dialect='excel')

      for series in data:
        for i, value in enumerate(series):
          timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
          writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))

      log_query(request, requestOptions, requestContext, time() - start)
      return response

    if format == 'json':
      series_data = []
      if 'maxDataPoints' in requestOptions and any(data):
        startTime = min([series.start for series in data])
        endTime = max([series.end for series in data])
        timeRange = endTime - startTime
        maxDataPoints = requestOptions['maxDataPoints']
        for series in data:
          numberOfDataPoints = timeRange/series.step
          if maxDataPoints < numberOfDataPoints:
            valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
            secondsPerPoint = int(valuesPerPoint * series.step)
            # Nudge start over a little bit so that the consolidation bands align with each call
            # removing 'jitter' seen when refreshing.
            nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
            series.start = series.start + nudge
            valuesToLose = int(nudge/series.step)
            for r in range(1, valuesToLose):
              del series[0]
            series.consolidate(valuesPerPoint)
            timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
          else:
            timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
          datapoints = zip(series, timestamps)
          series_data.append(dict(target=series.name, datapoints=datapoints))
      else:
        for series in data:
          timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
          datapoints = zip(series, timestamps)
          series_data.append(dict(target=series.name, datapoints=datapoints))

      if 'jsonp' in requestOptions:
        response = HttpResponse(
          content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
          content_type='text/javascript')
      else:
        response = HttpResponse(content=json.dumps(series_data),
                                content_type='application/json')

      if useCache:
        cache.add(requestKey, response, cacheTimeout)
        patch_response_headers(response, cache_timeout=cacheTimeout)
      else:
        add_never_cache_headers(response)
      log_query(request, requestOptions, requestContext, time() - start)
      return response

    if format == 'raw':
      response = HttpResponse(content_type='text/plain')
      for series in data:
        response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
        response.write( ','.join(map(str,series)) )
        response.write('\n')

      log.rendering('Total rawData rendering time %.6f' % (time() - start))
      log_query(request, requestOptions, requestContext, time() - start)
      return response

    if format == 'svg':
      graphOptions['outputFormat'] = 'svg'

    if format == 'pickle':
      response = HttpResponse(content_type='application/pickle')
      seriesInfo = [series.getInfo() for series in data]
      pickle.dump(seriesInfo, response, protocol=-1)

      log.rendering('Total pickle rendering time %.6f' % (time() - start))
      log_query(request, requestOptions, requestContext, time() - start)
      return response


  # We've got the data, now to render it
  graphOptions['data'] = data
  if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
    image = delegateRendering(requestOptions['graphType'], graphOptions)
  else:
    image = doImageRender(requestOptions['graphClass'], graphOptions)

  useSVG = graphOptions.get('outputFormat') == 'svg'
  if useSVG and 'jsonp' in requestOptions:
    response = HttpResponse(
      content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
      content_type='text/javascript')
  else:
    response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')

  if useCache:
    cache.add(requestKey, response, cacheTimeout)
    patch_response_headers(response, cache_timeout=cacheTimeout)
  else:
    add_never_cache_headers(response)

  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  log_query(request, requestOptions, requestContext, time() - start)
  return response
예제 #44
0
def renderView(request):
  start = time()

  try:
    global_timeout_duration = getattr(settings, 'RENDER_DURATION_TIMEOUT')
  except:
    global_timeout_duration = 60

  if request.REQUEST.has_key('json_request'):
    (graphOptions, requestOptions) = parseDataOptions(request.REQUEST['json_request'])
  elif request.is_ajax() and request.method == 'POST':
    (graphOptions, requestOptions) = parseDataOptions(request.raw_post_data)
  else:
    (graphOptions, requestOptions) = parseOptions(request)

  useCache = 'noCache' not in requestOptions
  cacheTimeout = requestOptions['cacheTimeout']
  requestContext = {
    'startTime' : requestOptions['startTime'],
    'endTime' : requestOptions['endTime'],
    'localOnly' : requestOptions['localOnly'],
    'data' : []
  }
  data = requestContext['data']

  # add template to graphOptions
  try:
    user_profile = getProfile(request, allowDefault=False)
    graphOptions['defaultTemplate'] = user_profile.defaultTemplate
  except:
    graphOptions['defaultTemplate'] = "default" 

  if request.method == 'GET':
    cache_request_obj = request.GET.copy()
  else:
    cache_request_obj = request.POST.copy()

  # hack request object to add defaultTemplate param
  cache_request_obj.appendlist("template", graphOptions['defaultTemplate'])

  # First we check the request cache
  requestKey = hashRequest(cache_request_obj)
  requestHash = hashRequestWTime(cache_request_obj)
  requestContext['request_key'] = requestHash
  request_data = ""
  if request.method == "POST":
    for k,v in request.POST.items():
        request_data += "%s=%s&" % (k.replace("\t",""),v.replace("\t",""))
  else:
    request_data = request.META['QUERY_STRING']
  log.info("DEBUG:Request_meta:[%s]\t%s\t%s\t%s\t\"%s\"" %\
          (requestHash,\
            request.META['REMOTE_ADDR'],\
            request.META['REQUEST_METHOD'],\
            request_data,\
            request.META['HTTP_USER_AGENT']))
  if useCache:
    cachedResponse = cache.get(requestKey)
    if cachedResponse:
      log.cache('Request-Cache hit [%s]' % requestHash)
      log.rendering('[%s] Returned cached response in %.6f' % (requestHash, (time() - start)))
      log.info("RENDER:[%s]:Timings:Cached %.5f" % (requestHash, time() - start))
      return cachedResponse
    else:
      log.cache('Request-Cache miss [%s]' % requestHash)

  # Now we prepare the requested data
  if requestOptions['graphType'] == 'pie':
    for target in requestOptions['targets']:
      if target.find(':') >= 0:
        try:
          name,value = target.split(':',1)
          value = float(value)
        except:
          raise ValueError("Invalid target '%s'" % target)
        data.append( (name,value) )
      else:
        q = Queue(maxsize=1)
        p = Process(target = evaluateWithQueue, args = (q, requestContext, target))
        p.start()
    
        seriesList = None
        try:
            seriesList = q.get(True, global_timeout_duration)
            p.join()
        except Exception, e:
            log.info("DEBUG:[%s] got an exception on trying to get seriesList from queue, error: %s" % (requestHash,e))
            p.terminate()
            return errorPage("Failed to fetch data")

        if seriesList == None:
            log.info("DEBUG:[%s] request timed out" % requestHash)
            p.terminate()
            return errorPage("Request timed out")

        for series in seriesList:
          func = PieFunctions[requestOptions['pieMode']]
          data.append( (series.name, func(requestContext, series) or 0 ))