Ejemplo n.º 1
0
    def deserialize(self, result):
        """
        Based on configuration, either stream-deserialize a response in settings.REMOTE_BUFFER_SIZE chunks,
        or read the entire payload and use inline deserialization.
        :param result: an http response object
        :return: deserialized response payload from cluster server
        """
        start = time.time()
        try:
            should_buffer = settings.REMOTE_BUFFER_SIZE > 0
            measured_reader = MeasuredReader(BufferedHTTPReader(result, settings.REMOTE_BUFFER_SIZE))

            if should_buffer:
                log.debug("Using streaming deserializer.")
                reader = BufferedHTTPReader(measured_reader, settings.REMOTE_BUFFER_SIZE)
                return self._deserialize_stream(reader, result.getheader('content-type'))

            log.debug("Using inline deserializer for small payload")
            return self._deserialize_buffer(measured_reader.read(), result.getheader('content-type'))
        except Exception as err:
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error decoding response from %s: %s" %
                (self.host, result.url_full, err))
            raise Exception("Error decoding response from %s: %s" % (result.url_full, err))
        finally:
            log.debug("Processed %d bytes in %f seconds." % (measured_reader.bytes_read, time.time() - start))
            result.release_conn()
Ejemplo n.º 2
0
def set_metadata_view(request):
  results = {}

  if request.method == 'GET':
    metric = request.GET['metric']
    key = request.GET['key']
    value = request.GET['value']
    try:
      results[metric] = CarbonLink.set_metadata(metric, key, value)
    except:
      log.exception()
      results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))

  elif request.method == 'POST':
    if request.META.get('CONTENT_TYPE') == 'application/json':
      operations = json.loads( request.body )
    else:
      operations = json.loads( request.POST['operations'] )

    for op in operations:
      metric = None
      try:
        metric, key, value = op['metric'], op['key'], op['value']
        results[metric] = CarbonLink.set_metadata(metric, key, value)
      except:
        log.exception()
        if metric:
          results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)

  else:
    results = dict(error="Invalid request method")

  return json_response_for(request, results)
Ejemplo n.º 3
0
  def get_results(self):
    if self.failed:
      return

    if self.cachedResult is not None:
      results = self.cachedResult
    else:
      if self.connection is None:
        self.send()

      try:
        response = self.connection.getresponse()
        assert response.status == 200, "received error response %s - %s" % (response.status, response.reason)
        result_data = response.read()
        results = unpickle.loads(result_data)

      except:
        log.exception("FindRequest.get_results(host=%s, query=%s) exception processing response" % (self.store.host, self.query))
        self.store.fail()
        return

      cache.set(self.cacheKey, results, settings.FIND_CACHE_DURATION)

    for node_info in results:
      if node_info.get('is_leaf'):
        reader = RemoteReader(self.store, node_info, bulk_query=self.query.pattern)
        node = LeafNode(node_info['path'], reader)
      else:
        node = BranchNode(node_info['path'])

      node.local = False
      yield node
Ejemplo n.º 4
0
def _dosave(request,viewName):
  profile = getProfile(request)
  #First find our View
  log.info("Saving view '%s' under profile '%s'" % (viewName,profile.user.username))
  try:
    view = profile.view_set.get(name=viewName)
  except ObjectDoesNotExist:
    view = View(profile=profile,name=viewName)
    view.save()
  #Now re-associate the view with the correct Windows
  view.window_set.all().delete()
  for windowName,encodedString in request.GET.items():
    try:
      if windowName in ('_','commandInput'): continue
      paramString = urllib.unquote_plus(encodedString)
      queryParams = cgi.parse_qs(paramString)
      modelParams = {}
      for key,value in queryParams.items(): #Clean up the window params
        key = str(key)
        value = str(value[0])
        if key in ('top','left'):
          value = int(float( value.replace('px','') ))
        if key in ('width','height','interval'):
          value = int(float(value))
        modelParams[key] = value
      if 'interval' not in modelParams:
        modelParams['interval'] = None
      win = Window(view=view,name=windowName,**modelParams)
      win.save()
    except:
      log.exception("Failed to process parameters for window '%s'" % windowName)
  return stdout('Saved view %s' % viewName)
Ejemplo n.º 5
0
  def fetch(self, startTime, endTime):
    data = whisper.fetch(self.fs_path, startTime, endTime)
    if not data:
      return None

    time_info, values = data
    (start,end,step) = time_info

    # Merge in data from carbon's cache
    try:
      cached_datapoints = CarbonLink.query(self.real_metric_path)
    except:
      log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
      cached_datapoints = []

    for (timestamp, value) in cached_datapoints:
      interval = timestamp - (timestamp % step)

      try:
        i = int(interval - start) / step
        values[i] = value
      except:
        pass

    return (time_info, values)
Ejemplo n.º 6
0
def evaluateTokens(requestContext, tokens):
  if tokens.expression:
    return evaluateTokens(requestContext, tokens.expression)

  elif tokens.pathExpression:
    return fetchData(requestContext, tokens.pathExpression)

  elif tokens.call:
    try:
      func = SeriesFunctions[tokens.call.func]
      args = [evaluateTokens(requestContext, arg) for arg in tokens.call.args]
      return func(requestContext, *args)
    except ValueError:
      log.exception('value error when render') 
      return []

  elif tokens.number:
    if tokens.number.integer:
      return int(tokens.number.integer)
    elif tokens.number.float:
      return float(tokens.number.float)
    elif tokens.number.scientific:
      return float(tokens.number.scientific[0])

  elif tokens.string:
    return tokens.string[1:-1]

  elif tokens.boolean:
    return tokens.boolean[0] == 'true'
Ejemplo n.º 7
0
  def fetch(self, startTime, endTime, now=None, requestContext=None):
    # Start the fetch on each node
    fetches = []

    for n in self.nodes:
      try:
        fetches.append(n.fetch(startTime, endTime, now, requestContext))
      except:
        log.exception("Failed to initiate subfetch for %s" % str(n))

    def merge_results():
      results = {}

      # Wait for any asynchronous operations to complete
      for i, result in enumerate(fetches):
        if isinstance(result, FetchInProgress):
          try:
            results[i] = result.waitForResults()
          except:
            log.exception("Failed to complete subfetch")
            results[i] = None
        else:
          results[i] = result

      results = [r for r in results.values() if r is not None]
      if not results:
        raise Exception("All sub-fetches failed")

      return reduce(self.merge, results)

    return FetchInProgress(merge_results)
Ejemplo n.º 8
0
    def request_series():
      if request_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion
        try:
          log.info("RemoteReader.request_data :: requesting %s" % url)
          connection = HTTPConnectionWithTimeout(self.store.host)
          connection.timeout = settings.REMOTE_FETCH_TIMEOUT
          connection.request('GET', urlpath)
          response = connection.getresponse()
          if response.status != 200:
            raise Exception("Error response %d %s from %s" % (response.status, response.reason, url))
          pickled_response = response.read()
          results = unpickle.loads(pickled_response)
          self.cache_lock.acquire()
          self.request_cache[url] = results
          self.cache_lock.release()
          completion_event.set()
          return results
        except:
          completion_event.set()
          self.store.fail()
          log.exception("Error requesting %s" % url)
          raise

      else: # otherwise we just wait on the completion_event
        completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
        cached_results = self.request_cache.get(url)
        if cached_results is None:
          raise Exception("Passive remote fetch failed to find cached results")
        else:
          return cached_results
Ejemplo n.º 9
0
    def fetch(self, startTime, endTime):
        data = whisper.fetch(self.fs_path, startTime, endTime)
        if not data:
            return None

        time_info, values = data
        (start, end, step) = time_info

        meta_info = whisper.info(self.fs_path)
        lowest_step = min([i['secondsPerPoint'] for i in meta_info['archives']])
        # Merge in data from carbon's cache
        cached_datapoints = []
        try:
            if step == lowest_step:
                cached_datapoints = CarbonLink.query(self.real_metric_path)
        except:
            log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
            cached_datapoints = []

        if isinstance(cached_datapoints, dict):
            cached_datapoints = cached_datapoints.items()

        for (timestamp, value) in cached_datapoints:
            interval = timestamp - (timestamp % step)

            try:
                i = int(interval - start) / step
                values[i] = value
            except:
                pass

        return (time_info, values)
Ejemplo n.º 10
0
    def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
        """
        Return auto-complete suggestions for tags and values based on the matches for the specified expressions, optionally filtered by tag and/or value prefix
        """
        if limit is None:
            limit = settings.TAGDB_AUTOCOMPLETE_LIMIT

        fields = [
            ('tag', tag or ''),
            ('valuePrefix', valuePrefix or ''),
            ('limit', str(limit)),
            ('local', self.params.get('local', '1')),
        ]
        for expr in exprs:
            fields.append(('expr', expr))

        result = self.request(
            '/tags/autoComplete/values',
            fields,
            headers=requestContext.get('forwardHeaders') if requestContext else None,
            timeout=settings.FIND_TIMEOUT)
        try:
            reader = codecs.getreader('utf-8')
            results = json.load(reader(result))
        except Exception as err:
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error decoding autocomplete values response from %s: %s" %
                (self.host, result.url_full, err))
            raise Exception("Error decoding autocomplete values response from %s: %s" % (result.url_full, err))
        finally:
            result.release_conn()

        return results
Ejemplo n.º 11
0
    def wait_for_results():
      connection_event.wait(1)
      connection = self.request_connections.get(url)
      if not connection:
        log.exception("RemoteReader.wait_for_results :: no connection found")
      if wait_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion
        try:
          response = connection.getresponse()
          if response.status != 200:
            raise Exception("Error response %d %s from %s" % (response.status, response.reason, url))

          pickled_response = response.read()
          results = unpickle.loads(pickled_response)
          self.cache_lock.acquire()
          self.request_cache[url] = results
          self.cache_lock.release()
          completion_event.set()
          return results
        except:
          completion_event.set()
          self.store.fail()
          log.exception("Error requesting %s" % url)
          raise

      else: # otherwise we just wait on the completion_event
        completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
        cached_results = self.request_cache.get(url)
        if cached_results is None:
          raise Exception("Passive remote fetch failed to find cached results")
        else:
          return cached_results
Ejemplo n.º 12
0
def delegateRenderIOW(graphOptions,graphType,tenant):
  start = time()
  log.rendering(graphOptions['data'])
  post_data = {'target': [], 'from': '-24hours'}
  if 'data' in graphOptions:
    for series in graphOptions['data']:
      log.rendering(series.name)
      post_data['target'].append(series.name)
      post_data['from'] = series.start
      post_data['until'] = series.end
  post_data['tenant']=tenant
  post_data['graphType']=graphType
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
  shuffle(servers)
  for server in servers:
    start2 = time()
    try: 
      response = requests.post("%s/render/" % server, data=post_data)
      assert response.status_code == 200, "Bad response code %d from %s" % (response.status_code,server)
      contentType = response.headers['Content-Type']
      imageData = response.content
      assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
      assert imageData, "Received empty response from %s" % server
      # Wrap things up
      log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
      log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
      return imageData
    except:
      log.exception("Exception while attempting remote rendering request on %s" % server)
      log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
      continue
Ejemplo n.º 13
0
def set_metadata_view(request):
    results = {}

    if request.method == "GET":
        metric = request.GET["metric"]
        key = request.GET["key"]
        value = request.GET["value"]
        try:
            results[metric] = CarbonLink.set_metadata(metric, key, value)
        except:
            log.exception()
            results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))

    elif request.method == "POST":
        if request.META.get("CONTENT_TYPE") == "application/json":
            operations = json.loads(request.body)
        else:
            operations = json.loads(request.POST["operations"])

        for op in operations:
            metric = None
            try:
                metric, key, value = op["metric"], op["key"], op["value"]
                results[metric] = CarbonLink.set_metadata(metric, key, value)
            except:
                log.exception()
                if metric:
                    results[metric] = dict(
                        error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric
                    )

    else:
        results = dict(error="Invalid request method")

    return json_response_for(request, results)
Ejemplo n.º 14
0
    def send(self):
        log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))

        self.cachedResult = cache.get(self.cacheKey)
        if self.cachedResult is not None:
            log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
            return

        self.connection = HTTPConnectionWithTimeout(self.store.host)
        self.connection.timeout = settings.REMOTE_FIND_TIMEOUT

        query_params = [("local", "1"), ("format", "pickle"), ("query", self.query.pattern)]
        if self.query.startTime:
            query_params.append(("from", self.query.startTime))

        if self.query.endTime:
            query_params.append(("until", self.query.endTime))

        query_string = urlencode(query_params)

        try:
            self.connection.request("GET", "/metrics/find/?" + query_string)
        except:
            log.exception(
                "FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query)
            )
            self.store.fail()
            self.failed = True
Ejemplo n.º 15
0
def is_local_interface(host):
  is_ipv6 = False
  if ':' in host:
    try:
      if host.find('[', 0, 2) != -1:
        last_bracket_position  = host.rfind(']')
        last_colon_position = host.rfind(':')
        if last_colon_position > last_bracket_position:
          host = host.rsplit(':', 1)[0]
        host = host.strip('[]')
      socket.inet_pton(socket.AF_INET6, host)
      is_ipv6 = True
    except socket.error:
      host = host.split(':',1)[0]

  try:
    if is_ipv6:
      sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
    else:
      sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.connect( (host, 4242) )
    local_ip = sock.getsockname()[0]
    sock.close()
  except:
    log.exception("Failed to open socket with %s" % host)
    raise

  if local_ip == host:
    return True

  return False
Ejemplo n.º 16
0
    def request(self, path, fields=None, headers=None, timeout=None):
        url = "%s%s" % (self.url, path)
        url_full = "%s?%s" % (url, urlencode(fields))

        try:
            result = http.request(
                'POST' if settings.REMOTE_STORE_USE_POST else 'GET',
                url,
                fields=fields,
                headers=headers,
                timeout=timeout,
                preload_content=False)
        except BaseException as err:
            self.fail()
            log.exception("RemoteFinder[%s] Error requesting %s: %s" % (self.host, url_full, err))
            raise Exception("Error requesting %s: %s" % (url_full, err))

        if result.status != 200:
            result.release_conn()
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error response %d from %s" % (self.host, result.status, url_full))
            raise Exception("Error response %d from %s" % (result.status, url_full))

        result.url_full = url_full

        # reset last failure time so that retried fetches can re-enable a remote
        self.last_failure = 0

        log.debug("RemoteFinder[%s] Fetched %s" % (self.host, url_full))
        return result
Ejemplo n.º 17
0
  def fetch(self, startTime, endTime):
    data = whisper.fetch(self.fs_path, startTime, endTime)
    if not data:
      return None
    consolidationFunc = ""
    whisper_info = whisper.info(self.fs_path)
    if "aggregationMethod" in whisper_info:
      aggregationMethod = whisper_info["aggregationMethod"]
      if aggregationMethod == 'min' or aggregationMethod == 'max':
        consolidationFunc = aggregationMethod
    time_info, values = data
    (start,end,step) = time_info

    # Merge in data from carbon's cache
    try:
      cached_datapoints = CarbonLink.query(self.real_metric_path)
    except:
      log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
      cached_datapoints = []

    for (timestamp, value) in cached_datapoints:
      interval = timestamp - (timestamp % step)

      try:
        i = int(interval - start) / step
        values[i] = value
      except:
        pass

    return (time_info, values, consolidationFunc)
Ejemplo n.º 18
0
      def receiveResponse():
        try:
          buf = ''
          remaining = 4
          message_size = None

          while remaining:
            packet = connection.recv(remaining)
            assert packet, "CarbonLink lost connection to %s:%d" % host

            buf += packet

            if message_size is None:
              if len(buf) == 4:
                remaining = message_size = struct.unpack("!L", buf)[0]
                buf = ''
                continue

            remaining -= len(packet)

          # We're done with the connection for this request, put it in the pool
          self.putConnectionInPool(host, connection)

          # Now parse the response
          points = pickle.loads(buf)
          log.cache("CarbonLink to %s, retrieved %d points for %s" % (host,len(points),metric))

          for point in points:
            yield point

        except:
          log.exception("CarbonLink to %s, exception while getting response" % str(host))
          self.removeConnectionFromPool(host, connection)
Ejemplo n.º 19
0
def fetchDataLocal(requestContext, pathExpr):
  seriesList = []
  startTime = requestContext['startTime']
  endTime = requestContext['endTime']

  if requestContext['localOnly']:
    store = LOCAL_STORE
  else:
    store = STORE

  for dbFile in store.find(pathExpr):
    log.metric_access(dbFile.metric_path)
    dbResults = dbFile.fetch( timestamp(startTime), timestamp(endTime) )
    try:
      cachedResults = CarbonLink.query(dbFile.real_metric)
      results = mergeResults(dbResults, cachedResults)
    except:
      log.exception()
      results = dbResults

    if not results:
      continue

    (timeInfo,values) = results
    (start,end,step) = timeInfo
    series = TimeSeries(dbFile.metric_path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)

  return seriesList
Ejemplo n.º 20
0
  def fetch(self, startTime, endTime):
    try:
      data = whisper.fetch(self.fs_path, startTime, endTime)
    except IOError:
      log.exception("Failed fetch of whisper file '%s'" % self.fs_path)
      return None
    if not data:
      return None

    time_info, values = data
    (start,end,step) = time_info

    meta_info = whisper.info(self.fs_path)
    aggregation_method = meta_info['aggregationMethod']
    lowest_step = min([i['secondsPerPoint'] for i in meta_info['archives']])
    # Merge in data from carbon's cache
    cached_datapoints = []
    try:
      cached_datapoints = CarbonLink().query(self.real_metric_path)
    except:
      log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
      cached_datapoints = []

    if isinstance(cached_datapoints, dict):
      cached_datapoints = cached_datapoints.items()

    values = merge_with_cache(cached_datapoints,
                              start,
                              step,
                              values,
                              aggregation_method)

    return time_info, values
Ejemplo n.º 21
0
def mygraph(request):
  profile = getProfile(request,allowDefault=False)
  if not profile: return HttpResponse( "You are not logged in!" )
  graphName = request.GET['graphName']
  if not graphName:
    return HttpResponse("You must type in a graph name.")
  action = request.GET['action']
  url = request.GET['url']
  if action == 'save':
    try:
      existingGraph = profile.mygraph_set.get(name=graphName)
      existingGraph.url = url
      existingGraph.save()
    except ObjectDoesNotExist:
      try:
        newGraph = MyGraph(profile=profile,name=graphName,url=url)
        newGraph.save()
      except:
        log.exception("Failed to create new MyGraph in /composer/mygraph/, graphName=%s" % graphName)
        return HttpResponse("Failed to save graph %s" % graphName)
    return HttpResponse("SAVED")
  elif action == 'delete':
    try:
      existingGraph = profile.mygraph_set.get(name=graphName)
      existingGraph.delete()
    except ObjectDoesNotExist:
      return HttpResponse("No such graph '%s'" % graphName)
    return HttpResponse("DELETED")
  else:
    return HttpResponse("Invalid operation '%s'" % action)
Ejemplo n.º 22
0
    def get_index(self, requestContext):
        url = '/metrics/index.json'

        headers = requestContext.get('forwardHeaders')

        result = self.request(
            url,
            fields=[
              ('local', self.params.get('local', '1')),
            ],
            headers=headers,
            timeout=settings.FIND_TIMEOUT)

        try:
            reader = codecs.getreader('utf-8')
            results = json.load(reader(result))
        except Exception as err:
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error decoding index response from %s: %s" %
                (self.host, result.url_full, err))
            raise Exception("Error decoding index response from %s: %s" % (result.url_full, err))
        finally:
            result.release_conn()

        return results
Ejemplo n.º 23
0
def renderView(request):
  start = time()
  (graphOptions, requestOptions) = parseOptions(request)
  post_data  = {
    'from' : int((requestOptions['startTime']-datetime(1970,1,1,tzinfo=requestOptions['tzinfo'])).total_seconds()),
    'until' : int((requestOptions['endTime']-datetime(1970,1,1,tzinfo=requestOptions['tzinfo'])).total_seconds()),
    'tenant' : requestOptions['tenant'],
    'format' : requestOptions.get('format'),
    'target': [t for t in requestOptions['targets'] if t.strip()],
  }
  post_data.update(graphOptions)
  log.rendering(post_data)
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
  shuffle(servers)
  for server in servers:
    start2 = time()
    try:
      response = requests.post("%s/render/" % server, data=post_data)
      assert response.status_code == 200, "Bad response code %d from %s" % (response.status_code,server)
      contentType = response.headers['Content-Type']
      imageData = response.content
      assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
      assert imageData, "Received empty response from %s" % server
      # Wrap things up
      log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
      log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
    except:
      log.exception("Exception while attempting remote rendering request on %s" % server)
      log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
      continue
  
  response = buildResponse(imageData, 'image/png')
  log.rendering('Total rendering time %.6f seconds' % (time() - start))
  return response
Ejemplo n.º 24
0
  def send(self):
    log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))

    self.cachedResult = cache.get(self.cacheKey)
    if self.cachedResult is not None:
      log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
      return

    self.connection = HTTPConnectionWithTimeout(self.store.host)
    self.connection.timeout = settings.REMOTE_FIND_TIMEOUT

    query_params = [
      ('local', '1'),
      ('format', 'pickle'),
      ('query', self.query.pattern),
    ]
    if self.query.startTime:
      query_params.append( ('from', self.query.startTime) )

    if self.query.endTime:
      query_params.append( ('until', self.query.endTime) )

    query_string = urlencode(query_params)

    try:
      self.connection.request('GET', '/metrics/find/?' + query_string)
    except:
      log.exception("FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query))
      self.store.fail()
      self.failed = True
Ejemplo n.º 25
0
 def find_nodes(self, query):
     log.info("q:" + repr(query))
     try:
         for node in self._find_nodes_from_pattern(self.kudu_table, query.pattern):
             yield node
     except Exception, e:
         log.exception(e)
         raise
Ejemplo n.º 26
0
def index_json(request):
  queryParams = request.GET.copy()
  queryParams.update(request.POST)

  jsonp = queryParams.get('jsonp', False)
  cluster = queryParams.get('cluster', False)

  def find_matches():
    matches = []

    for root, dirs, files in os.walk(settings.WHISPER_DIR):
      root = root.replace(settings.WHISPER_DIR, '')
      for basename in files:
        if fnmatch.fnmatch(basename, '*.wsp'):
          matches.append(os.path.join(root, basename))

    for root, dirs, files in os.walk(settings.CERES_DIR):
      root = root.replace(settings.CERES_DIR, '')
      for filename in files:
        if filename == '.ceres-node':
          matches.append(root)

    # unlike 0.9.x, we're going to use os.walk with followlinks
    # since we require Python 2.7 and newer that supports it
    if RRDReader.supported:
      for root, dirs, files in os.walk(settings.RRD_DIR, followlinks=True):
        root = root.replace(settings.RRD_DIR, '')
        for basename in files:
          if fnmatch.fnmatch(basename, '*.rrd'):
            absolute_path = os.path.join(settings.RRD_DIR, root, basename)
            (basename,extension) = os.path.splitext(basename)
            metric_path = os.path.join(root, basename)
            rrd = RRDReader(absolute_path, metric_path)
            for datasource_name in rrd.get_datasources(absolute_path):
              matches.append(os.path.join(metric_path, datasource_name))

    matches = [
      m
      .replace('.wsp', '')
      .replace('.rrd', '')
      .replace('/', '.')
      .lstrip('.')
      for m in sorted(matches)
    ]
    return matches

  matches = []
  if cluster and len(settings.CLUSTER_SERVERS) >= 1:
    try:
      matches = reduce( lambda x, y: list(set(x + y)), \
        [json.loads(urllib.urlopen('http://' + cluster_server + '/metrics/index.json').read()) \
        for cluster_server in settings.CLUSTER_SERVERS])
    except urllib.URLError:
      log.exception()
      return json_response_for(request, matches, jsonp=jsonp, status=500)
  else:
    matches = find_matches()
  return json_response_for(request, matches, jsonp=jsonp)
Ejemplo n.º 27
0
def fetchData(requestContext, pathExpr):

  seriesList = []
  startTime = int( time.mktime( requestContext['startTime'].timetuple() ) )
  endTime   = int( time.mktime( requestContext['endTime'].timetuple() ) )

  def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
    fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
      if isinstance(results, FetchInProgress):
        results = results.waitForResults()

      if not results:
        log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
        continue

      try:
          (timeInfo, values, conso) = results
      except ValueError as e:
          raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
      (start, end, step) = timeInfo

      if conso != "":
          series = TimeSeries(node.path, start, end, step, values, conso)
      else:
          series = TimeSeries(node.path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      seriesList.append(series)

    # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
    names = set([ s.name for s in seriesList ])
    for name in names:
      series_with_duplicate_names = [ s for s in seriesList if s.name == name ]
      empty_duplicates = [ s for s in series_with_duplicate_names if not nonempty(s) ]

      if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
        empty_duplicates.pop() # make sure we leave one in seriesList

      for series in empty_duplicates:
        seriesList.remove(series)

    return seriesList

  retries = 1 # start counting at one to make log output and settings more readable
  while True:
    try:
      seriesList = _fetchData(pathExpr,startTime, endTime, requestContext, seriesList)
      return seriesList
    except Exception, e:
      if retries >= settings.MAX_FETCH_RETRIES:
        log.exception("Failed after %i retry! See: %s" % (settings.MAX_FETCH_RETRIES, e))
        raise Exception("Failed after %i retry! See: %s" % (settings.MAX_FETCH_RETRIES, e))
      else:
        log.exception("Got an exception when fetching data! See: %s Will do it again! Run: %i of %i" %
                     (e, retries, settings.MAX_FETCH_RETRIES))
        retries += 1
Ejemplo n.º 28
0
    def _find_paths(self, current_dir, patterns):
        """Recursively generates absolute paths whose components underneath current_dir
    match the corresponding pattern in patterns"""
        pattern = patterns[0]
        patterns = patterns[1:]

        has_wildcard = (
            pattern.find("{") > -1 or pattern.find("[") > -1 or pattern.find("*") > -1 or pattern.find("?") > -1
        )
        using_globstar = pattern == "**"

        if has_wildcard:  # this avoids os.listdir() for performance
            try:
                entries = os.listdir(current_dir)
            except OSError as e:
                log.exception(e)
                entries = []
        else:
            entries = [pattern]

        if using_globstar:
            matching_subdirs = map(operator.itemgetter(0), os.walk(current_dir))
        else:
            subdirs = [entry for entry in entries if isdir(join(current_dir, entry))]
            matching_subdirs = match_entries(subdirs, pattern)

        # if this is a terminal globstar, add a pattern for all files in subdirs
        if using_globstar and not patterns:
            patterns = ["*"]

        if len(patterns) == 1 and RRDReader.supported:  # the last pattern may apply to RRD data sources
            if not has_wildcard:
                entries = [pattern + ".rrd"]
            files = [entry for entry in entries if isfile(join(current_dir, entry))]
            rrd_files = match_entries(files, pattern + ".rrd")

            if rrd_files:  # let's assume it does
                datasource_pattern = patterns[0]

                for rrd_file in rrd_files:
                    absolute_path = join(current_dir, rrd_file)
                    yield absolute_path + self.DATASOURCE_DELIMITER + datasource_pattern

        if patterns:  # we've still got more directories to traverse
            for subdir in matching_subdirs:

                absolute_path = join(current_dir, subdir)
                for match in self._find_paths(absolute_path, patterns):
                    yield match

        else:  # we've got the last pattern
            if not has_wildcard:
                entries = [pattern + ".wsp", pattern + ".wsp.gz", pattern + ".rrd"]
            files = [entry for entry in entries if isfile(join(current_dir, entry))]
            matching_files = match_entries(files, pattern + ".*")

            for base_name in matching_files + matching_subdirs:
                yield join(current_dir, base_name)
Ejemplo n.º 29
0
 def __get_cached_datapoints(self, stage):
     cached_datapoints = []
     try:
         # TODO: maybe this works with non stage0 now, need to check.
         if stage.stage0 and self._carbonlink:
             cached_datapoints = self._carbonlink.query(self._metric_name)
     except Exception:
         log.exception("Failed CarbonLink query '%s'" % self._metric_name)
     return cached_datapoints
Ejemplo n.º 30
0
def wait_for_result(result):
    """Helper to read the various result types."""
    if isinstance(result, FetchInProgress):
        try:
            return result.waitForResults()
        except BaseException:
            log.exception("Failed to complete subfetch")
            return None
    else:
        return result
Ejemplo n.º 31
0
  def fetch(self, startTime, endTime):
    data = self.ceres_node.read(startTime, endTime)
    time_info = (data.startTime, data.endTime, data.timeStep)
    values = list(data.values)

    # Merge in data from carbon's cache
    try:
      cached_datapoints = CarbonLink.query(self.real_metric_path)
    except:
      log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
      cached_datapoints = []

    values = merge_with_cache(cached_datapoints,
                              data.startTime,
                              data.timeStep,
                              values)

    return time_info, values
Ejemplo n.º 32
0
def renderLocalView(request):
    try:
        start = time()
        reqParams = StringIO(request.body)
        graphType = reqParams.readline().strip()
        optionsPickle = reqParams.read()
        reqParams.close()
        graphClass = GraphTypes[graphType]
        options = unpickle.loads(optionsPickle)
        image = doImageRender(graphClass, options)
        log.rendering("Delegated rendering request took %.6f seconds" %
                      (time() - start))
        response = buildResponse(image)
        add_never_cache_headers(response)
        return response
    except:
        log.exception("Exception in graphite.render.views.rawrender")
        return HttpResponseServerError()
Ejemplo n.º 33
0
    def fetch(self, startTime, endTime):
        # Start the fetch on each node
        results = [n.fetch(startTime, endTime) for n in self.nodes]

        # Wait for any asynchronous operations to complete
        for i, result in enumerate(results):
            if isinstance(result, FetchInProgress):
                try:
                    results[i] = result.waitForResults()
                except:
                    log.exception("Failed to complete subfetch")
                    results[i] = None

        results = [r for r in results if r is not None]
        if not results:
            raise Exception("All sub-fetches failed")

        return reduce(self.merge, results)
Ejemplo n.º 34
0
def index_json(request):
  queryParams = request.GET.copy()
  queryParams.update(request.POST)

  try:
    jsonp = queryParams.get('jsonp', False)

    requestContext = {
      'localOnly': int( queryParams.get('local', 0) ),
      'forwardHeaders': extractForwardHeaders(request),
    }

    matches = STORE.get_index(requestContext)
  except Exception:
    log.exception()
    return json_response_for(request, [], jsonp=jsonp, status=500)

  return json_response_for(request, matches, jsonp=jsonp)
Ejemplo n.º 35
0
def mygraph(request):
    profile = getProfile(request, allowDefault=False)

    if not profile:
        return HttpResponse("You are not logged in!")

    action = request.GET['action']
    graphName = request.GET['graphName']

    if not graphName:
        return HttpResponse("You must type in a graph name.")

    if action == 'save':
        url = request.GET['url']

        try:
            existingGraph = profile.mygraph_set.get(name=graphName)
            existingGraph.url = url
            existingGraph.save()

        except ObjectDoesNotExist:
            try:
                newGraph = MyGraph(profile=profile, name=graphName, url=url)
                newGraph.save()
            except Exception:
                log.exception(
                    "Failed to create new MyGraph in /composer/mygraph/, graphName=%s"
                    % graphName)
                return HttpResponse("Failed to save graph %s" % graphName)

        return HttpResponse("SAVED")

    elif action == 'delete':
        try:
            existingGraph = profile.mygraph_set.get(name=graphName)
            existingGraph.delete()

        except ObjectDoesNotExist:
            return HttpResponse("No such graph '%s'" % graphName)

        return HttpResponse("DELETED")

    else:
        return HttpResponse("Invalid operation '%s'" % action)
Ejemplo n.º 36
0
        def merge_results():
            results = {}

            # Wait for any asynchronous operations to complete
            for i, result in enumerate(fetches):
                if isinstance(result, FetchInProgress):
                    try:
                        results[i] = result.waitForResults()
                    except:
                        log.exception("Failed to complete subfetch")
                        results[i] = None
                else:
                    results[i] = result

            results = [r for r in results.values() if r is not None]
            if not results:
                raise Exception("All sub-fetches failed")

            return reduce(self.merge, results)
Ejemplo n.º 37
0
def get_metadata_view(request):
    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    key = queryParams.get('key')
    metrics = queryParams.getlist('metric')
    jsonp = queryParams.get('jsonp', False)
    results = {}
    for metric in metrics:
        try:
            results[metric] = CarbonLink.get_metadata(metric, key)
        except:
            log.exception()
            results[metric] = dict(
                error=
                "Unexpected error occurred in CarbonLink.get_metadata(%s, %s)"
                % (metric, key))

    return json_response_for(request, results, jsonp=jsonp)
Ejemplo n.º 38
0
 def read_response(self):  # called under self.lock
     try:
         self.has_done_response_read = True
         response = self.connection.getresponse(
         )  # safe if self.connection.timeout works as advertised
         if response.status != 200:
             raise Exception("Error response %d %s from http://%s%s" %
                             (response.status, response.reason,
                              self.store.host, self.urlpath))
         pickled_response = response.read()
         self.result = unpickle.loads(pickled_response)
         return self.result
     except:
         self.store.fail()
         log.exception("Error requesting http://%s%s" %
                       (self.store.host, self.urlpath))
         raise
     finally:
         self.done_cb()
Ejemplo n.º 39
0
def _dogsave(request, graphName):
    profile = getProfile(request, allowDefault=False)
    if not profile: return stderr("You must be logged in to save graphs")
    url = request.GET.get('url')
    if not url: return stderr("No url specified!")
    try:
        existingGraph = profile.mygraph_set.get(name=graphName)
        existingGraph.url = url
        existingGraph.save()
    except ObjectDoesNotExist:
        try:
            newGraph = MyGraph(profile=profile, name=graphName, url=url)
            newGraph.save()
        except:
            log.exception(
                "Failed to create new MyGraph in _dogsave(), graphName=%s" %
                graphName)
            return stderr("Failed to save graph %s" % graphName)
    return stdout("Saved graph %s" % graphName)
Ejemplo n.º 40
0
def delegateRendering(graphType, graphOptions):
  start = time()
  postData = graphType + '\n' + pickle.dumps(graphOptions)
  servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
  shuffle(servers)
  connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
  for server in servers:
    start2 = time()
    try:
      # Get a connection
      try:
        pool = connectionPools[server]
      except KeyError: #happens the first time
        pool = connectionPools[server] = set()
      try:
        connection = pool.pop()
      except KeyError: #No available connections, have to make a new one
        connection = connector_class(server)
        connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
      # Send the request
      try:
        connection.request('POST','/render/local/', postData)
      except CannotSendRequest:
        connection = connector_class(server) #retry once
        connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
        connection.request('POST', '/render/local/', postData)
      # Read the response
      response = connection.getresponse()
      assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
      contentType = response.getheader('Content-Type')
      imageData = response.read()
      assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
      assert imageData, "Received empty response from %s" % server
      # Wrap things up
      log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
      log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
      pool.add(connection)
      return imageData
    except:
      log.exception("Exception while attempting remote rendering request on %s" % server)
      log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
      continue
Ejemplo n.º 41
0
def fetchRemoteData(requestContext, pathExpr, usePrefetchCache=settings.REMOTE_PREFETCH_DATA):
  (startTime, endTime, now) = _timebounds(requestContext)
  remote_nodes = [ RemoteNode(store, pathExpr, True) for store in STORE.remote_stores ]

  # Go through all of the remote_nodes, and launch a remote_fetch for each one.
  # Each fetch will take place in its own thread, since it's naturally parallel work.
  # Notable: return the 'seriesList' result from each node.fetch into result_queue
  # instead of directly from the method. Queue.Queue() is threadsafe.
  remote_fetches = []
  result_queue = Queue.Queue()
  for node in remote_nodes:
      need_fetch = True
      if usePrefetchCache:
        series = prefetchLookup(requestContext, node)
        # Will be either:
        #   []: prefetch done, returned no data. Do not fetch
        #   seriesList: prefetch done, returned data, do not fetch
        #   None: prefetch not done, FETCH
        if series is not None:
          result_queue.put( (node, series) )
          need_fetch = False
      if need_fetch:
        fetch_thread = threading.Thread(target=node.fetch, name=node.store.host,
                                        args=(startTime, endTime, now, result_queue))
        fetch_thread.start()
        remote_fetches.append(fetch_thread)

  # Once the remote_fetches have started, wait for them all to finish. Assuming an
  # upper bound of REMOTE_STORE_FETCH_TIMEOUT per thread, this should take about that
  # amount of time (6s by default) at the longest. If every thread blocks permanently,
  # then this could take a horrible REMOTE_STORE_FETCH_TIMEOUT * num(remote_fetches),
  # but then that would imply that remote_storage's HTTPConnectionWithTimeout class isn't
  # working correctly :-)
  for fetch_thread in remote_fetches:
    try:
      fetch_thread.join(settings.REMOTE_STORE_FETCH_TIMEOUT)
      if fetch_thread.is_alive():
        log.exception("Failed to join remote_fetch thread %s within %ss" % (fetch_thread.name, settings.REMOTE_STORE_FETCH_TIMEOUT))
    except:
      log.exception("Exception during remote_fetch thread %s" % (fetch_thread.name))

  return result_queue
Ejemplo n.º 42
0
def fetchData(requestContext, pathExpr):
  seriesList = {}
  (startTime, endTime, now) = timebounds(requestContext)

  retries = 1 # start counting at one to make log output and settings more readable
  while True:
    try:
      seriesList = _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList)
      break
    except Exception:
      if retries >= settings.MAX_FETCH_RETRIES:
        log.exception("Failed after %s retry! Root cause:\n%s" %
            (settings.MAX_FETCH_RETRIES, format_exc()))
        raise
      else:
        log.exception("Got an exception when fetching data! Try: %i of %i. Root cause:\n%s" %
                     (retries, settings.MAX_FETCH_RETRIES, format_exc()))
        retries += 1

  return seriesList
Ejemplo n.º 43
0
    def fetch(self, startTime, endTime, now=None, requestContext=None):
        try:
            data = self.fetch_data(startTime, endTime, now=now)
        except IOError:
            log.exception("Failed fetch of whisper file '%s'" % self.fs_path)
            return None
        if not data:
            return None

        time_info, values = data
        (start, end, step) = time_info

        meta_info = self.info()
        aggregation_method = meta_info['aggregationMethod']

        # Merge in data from carbon's cache
        values = merge_with_carbonlink(
            self.real_metric_path, start, step, values, aggregation_method, self.get_raw_step())

        return time_info, values
Ejemplo n.º 44
0
def index_json(request):
    queryParams = request.GET.copy()
    queryParams.update(request.POST)

    jsonp = queryParams.get('jsonp', False)
    cluster = queryParams.get('cluster', False)

    def find_matches():
        matches = []

        for root, dirs, files in os.walk(settings.WHISPER_DIR):
            root = root.replace(settings.WHISPER_DIR, '')
            for basename in files:
                if fnmatch.fnmatch(basename, '*.wsp'):
                    matches.append(os.path.join(root, basename))

        for root, dirs, files in os.walk(settings.CERES_DIR):
            root = root.replace(settings.CERES_DIR, '')
            for filename in files:
                if filename == '.ceres-node':
                    matches.append(root)

        matches = [
            m.replace('.wsp', '').replace('.rrd', '').replace('/',
                                                              '.').lstrip('.')
            for m in sorted(matches)
        ]
        return matches

    matches = []
    if cluster and len(settings.CLUSTER_SERVERS) >= 1:
        try:
            matches = reduce( lambda x, y: list(set(x + y)), \
              [json.loads(urllib.urlopen('http://' + cluster_server + '/metrics/index.json').read()) \
              for cluster_server in settings.CLUSTER_SERVERS])
        except urllib.URLError:
            log.exception()
            return json_response_for(request, matches, jsonp=jsonp, status=500)
    else:
        matches = find_matches()
    return json_response_for(request, matches, jsonp=jsonp)
Ejemplo n.º 45
0
    def auto_complete_values(self,
                             exprs,
                             tag,
                             valuePrefix=None,
                             limit=None,
                             requestContext=None):
        """
        Return auto-complete suggestions for tags and values based on the matches for the specified expressions, optionally filtered by tag and/or value prefix
        """
        if limit is None:
            limit = settings.TAGDB_AUTOCOMPLETE_LIMIT

        fields = [
            ('tag', tag or ''),
            ('valuePrefix', valuePrefix or ''),
            ('limit', str(limit)),
            ('local', self.params.get('local', '1')),
        ]
        for expr in exprs:
            fields.append(('expr', expr))

        result = self.request('/tags/autoComplete/values',
                              fields,
                              headers=requestContext.get('forwardHeaders')
                              if requestContext else None,
                              timeout=settings.FIND_TIMEOUT)
        try:
            reader = codecs.getreader('utf-8')
            results = json.load(reader(result))
        except Exception as err:
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error decoding autocomplete values response from %s: %s"
                % (self.host, result.url_full, err))
            raise Exception(
                "Error decoding autocomplete values response from %s: %s" %
                (result.url_full, err))
        finally:
            result.release_conn()

        return results
Ejemplo n.º 46
0
def merge_with_carbonlink(metric,
                          start,
                          step,
                          values,
                          aggregation_method=None):
    """Get points from carbonlink and merge them with existing values."""
    cached_datapoints = []
    try:
        cached_datapoints = CarbonLink().query(metric)
    except BaseException:
        log.exception("Failed CarbonLink query '%s'" % metric)
        cached_datapoints = []

    if isinstance(cached_datapoints, dict):
        cached_datapoints = cached_datapoints.items()

    return merge_with_cache(cached_datapoints,
                            start,
                            step,
                            values,
                            func=aggregation_method)
Ejemplo n.º 47
0
    def _find_paths(self, current_dir, patterns):
        """Recursively generates absolute paths whose components underneath current_dir
    match the corresponding pattern in patterns"""
        pattern = patterns[0]
        patterns = patterns[1:]
        try:
            entries = os.listdir(current_dir)
        except OSError as e:
            log.exception(e)
            entries = []

        subdirs = [e for e in entries if isdir(join(current_dir, e))]
        matching_subdirs = match_entries(subdirs, pattern)

        if len(
                patterns
        ) == 1 and RRDReader.supported:  #the last pattern may apply to RRD data sources
            files = [e for e in entries if isfile(join(current_dir, e))]
            rrd_files = match_entries(files, pattern + ".rrd")

            if rrd_files:  #let's assume it does
                datasource_pattern = patterns[0]

                for rrd_file in rrd_files:
                    absolute_path = join(current_dir, rrd_file)
                    yield absolute_path + self.DATASOURCE_DELIMETER + datasource_pattern

        if patterns:  #we've still got more directories to traverse
            for subdir in matching_subdirs:

                absolute_path = join(current_dir, subdir)
                for match in self._find_paths(absolute_path, patterns):
                    yield match

        else:  #we've got the last pattern
            files = [e for e in entries if isfile(join(current_dir, e))]
            matching_files = match_entries(files, pattern + '.*')

            for basename in matching_files + matching_subdirs:
                yield join(current_dir, basename)
Ejemplo n.º 48
0
    def get_results(self):
        if self.failed:
            return

        if self.cachedResult is not None:
            results = self.cachedResult
        else:
            if self.connection is None:
                self.send()

            try:
                try:  # Python 2.7+, use buffering of HTTP responses
                    response = self.connection.getresponse(buffering=True)
                except TypeError:  # Python 2.6 and older
                    response = self.connection.getresponse()
                assert response.status == 200, "received error response %s - %s" % (
                    response.status, response.reason)
                result_data = response.read()
                results = unpickle.loads(result_data)

            except:
                log.exception(
                    "FindRequest.get_results(host=%s, query=%s) exception processing response"
                    % (self.store.host, self.query))
                self.store.fail()
                return

            cache.set(self.cacheKey, results, settings.FIND_CACHE_DURATION)

        for node_info in results:
            if node_info.get('is_leaf'):
                reader = RemoteReader(self.store,
                                      node_info,
                                      bulk_query=self.query.pattern)
                node = LeafNode(node_info['path'], reader)
            else:
                node = BranchNode(node_info['path'])

            node.local = False
            yield node
Ejemplo n.º 49
0
 def accessor(self):
     """Return an accessor."""
     with self._lock:
         if not self._accessor:
             from django.conf import settings as django_settings
             accessor = graphite_utils.accessor_from_settings(
                 django_settings)
             # If connect() fail it will raise an exception that will be caught
             # by the caller. If the plugin is called again, self._accessor will
             # still be None and a new accessor will be created.
             try:
                 accessor.connect()
             except Exception as e:
                 log.exception("failed to connect()")
                 accessor.shutdown()
                 raise e
             accessor.set_cache(
                 bg_accessor_cache.DjangoCache(self.django_cache()),
                 metadata_ttl=django_settings.FIND_CACHE_DURATION,
                 data_ttl=django_settings.DEFAULT_CACHE_DURATION)
             self._accessor = accessor
     return self._accessor
Ejemplo n.º 50
0
    def send(self, headers=None):
        log.info("FindRequest.send(host=%s, query=%s) called" %
                 (self.store.host, self.query))

        if headers is None:
            headers = {}

        self.cachedResult = cache.get(self.cacheKey)
        if self.cachedResult is not None:
            log.info("FindRequest(host=%s, query=%s) using cached result" %
                     (self.store.host, self.query))
            return

        query_params = [
            ('local', '1'),
            ('format', 'pickle'),
            ('query', self.query.pattern),
        ]
        if self.query.startTime:
            query_params.append(('from', self.query.startTime))

        if self.query.endTime:
            query_params.append(('until', self.query.endTime))

        query_string = urlencode(query_params)

        try:
            connector_class = connector_class_selector(
                settings.INTRACLUSTER_HTTPS)
            self.connection = connector_class(self.store.host)
            self.connection.timeout = settings.REMOTE_FIND_TIMEOUT
            self.connection.request('GET', '/metrics/find/?' + query_string,
                                    None, headers)
        except:
            log.exception(
                "FindRequest.send(host=%s, query=%s) exception during request"
                % (self.store.host, self.query))
            self.store.fail()
            self.failed = True
Ejemplo n.º 51
0
def prefetchRemoteData(requestContext, pathExpressions):
  # if required, fetch data from all remote nodes
  # storing the result in a big hash of the form:
  # data[node][hash(originalPathExpression, start, end)] = [ matchingSeries, matchingSeries2, ... ]
  prefetchedRemoteData = {}
  if requestContext['localOnly']:
    return prefetchedRemoteData

  (startTime, endTime, now) = _timebounds(requestContext)
  result_queue = fetchRemoteData(requestContext, pathExpressions, False)
  while not result_queue.empty():
    try:
      (node, results) = result_queue.get_nowait()
    except Queue.Empty:
      log.exception("result_queue not empty, but unable to retrieve results")

    # prefill result with empty list
    # Needed to be able to detect if a query has already been made
    prefetchedRemoteData[node] = {}
    for pe in pathExpressions:
      prefetchedRemoteData[node][_prefetchMetricKey(pe, startTime, endTime)] = []

    for series in results:
      # series.pathExpression is original target, ie. containing wildcards
      # XXX would be nice to disable further prefetch calls to that backend
      try:
        k = _prefetchMetricKey(series['pathExpression'], startTime, endTime)
      except KeyError:
        log.exception("Remote node %s doesn't support prefetching data... upgrade!" % node)
        raise
        
      if prefetchedRemoteData[node].get(k) is None:
        # This should not be needed because of above filling with [],
        # but could happen if backend sends unexpected stuff
        prefetchedRemoteData[node][k] = [series]
      else:
        prefetchedRemoteData[node][k].append(series)

  return prefetchedRemoteData
Ejemplo n.º 52
0
def set_metadata_view(request):
    results = {}

    if request.method == 'GET':
        metric = request.GET['metric']
        key = request.GET['key']
        value = request.GET['value']
        try:
            results[metric] = CarbonLink.set_metadata(metric, key, value)
        except:
            log.exception()
            results[metric] = dict(
                error=
                "Unexpected error occurred in CarbonLink.set_metadata(%s, %s)"
                % (metric, key))

    elif request.method == 'POST':
        if request.META.get('CONTENT_TYPE') == 'application/json':
            operations = json.loads(request.body)
        else:
            operations = json.loads(request.POST['operations'])

        for op in operations:
            metric = None
            try:
                metric, key, value = op['metric'], op['key'], op['value']
                results[metric] = CarbonLink.set_metadata(metric, key, value)
            except:
                log.exception()
                if metric:
                    results[metric] = dict(
                        error=
                        "Unexpected error occurred in bulk CarbonLink.set_metadata(%s)"
                        % metric)

    else:
        results = dict(error='Invalid request method')

    return json_response_for(request, results)
Ejemplo n.º 53
0
    def fetch(self, startTime, endTime):
        try:
            data = self.fetch_data(startTime, endTime)
        except IOError:
            log.exception("Failed fetch of whisper file '%s'" % self.fs_path)
            return None
        if not data:
            return None

        time_info, values = data
        (start, end, step) = time_info

        meta_info = self.info()
        aggregation_method = meta_info['aggregationMethod']
        lowest_step = min([i['secondsPerPoint']
                           for i in meta_info['archives']])

        # Merge in data from carbon's cache
        values = merge_with_carbonlink(
            self.real_metric_path, start, step, values, aggregation_method)

        return time_info, values
Ejemplo n.º 54
0
  def fetch(self, startTime, endTime):
    data = self.ceres_node.read(startTime, endTime)
    time_info = (data.startTime, data.endTime, data.timeStep)
    values = list(data.values)

    # Merge in data from carbon's cache
    try:
      cached_datapoints = CarbonLink.query(self.real_metric_path)
    except:
      log.exception("Failed CarbonLink query '%s'" % self.real_metric_path)
      cached_datapoints = []

    for (timestamp, value) in cached_datapoints:
      interval = timestamp - (timestamp % data.timeStep)

      try:
        i = int(interval - data.startTime) / data.timeStep
        values[i] = value
      except:
        pass

    return (time_info, values)
Ejemplo n.º 55
0
    def wait_for_results():
      if request_lock.acquire(False): # we only send the request the first time we're called
        try:
          log.info("RemoteReader.request_data :: requesting %s" % url)
          self.connection = HTTPConnectionWithTimeout(self.store.host)
          self.connection.timeout = settings.REMOTE_FETCH_TIMEOUT
          self.connection.request('GET', urlpath)
        except:
          log.exception("Error requesting %s" % url)
          wait_lock.acquire(False)
          completion_event.set()
          self.store.fail()
          raise
      if wait_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion
        try:
          response = self.connection.getresponse()
          if response.status != 200:
            raise Exception("Error response %d %s from %s" % (response.status, response.reason, url))

          pickled_response = response.read()
          results = unpickle.loads(pickled_response)
          self.cache_lock.acquire()
          self.request_cache[url] = results
          self.cache_lock.release()
          completion_event.set()
          return results
        except:
          completion_event.set()
          self.store.fail()
          log.exception("Error requesting %s" % url)
          raise

      else: # otherwise we just wait on the completion_event
        completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
        cached_results = self.request_cache.get(url)
        if cached_results is None:
          raise Exception("Passive remote fetch failed to find cached results")
        else:
          return cached_results
Ejemplo n.º 56
0
            def receiveResponse():
                try:
                    buf = ''
                    remaining = 4
                    message_size = None

                    while remaining:
                        packet = connection.recv(remaining)
                        assert packet, "CarbonLink lost connection to %s" % str(
                            host)

                        buf += packet

                        if message_size is None:
                            if len(buf) == 4:
                                remaining = message_size = struct.unpack(
                                    "!L", buf)[0]
                                buf = ''
                                continue

                        remaining -= len(packet)

                    # We're done with the connection for this request, put it in the pool
                    self.putConnectionInPool(host, connection)

                    # Now parse the response
                    points = pickle.loads(buf)
                    log.cache("CarbonLink to %s, retrieved %d points for %s" %
                              (host, len(points), metric))

                    for point in points:
                        yield point

                except:
                    log.exception(
                        "CarbonLink to %s, exception while getting response" %
                        str(host))
                    self.removeConnectionFromPool(host, connection)
Ejemplo n.º 57
0
    def fetch_multi(self, startTime, endTime, now=None, requestContext=None):
        if not self.bulk_query:
            return []

        query_params = [('format', self.finder.params.get('format', 'pickle')),
                        ('local', self.finder.params.get('local', '1')),
                        ('noCache', '1'), ('from', int(startTime)),
                        ('until', int(endTime))]

        for target in self.bulk_query:
            query_params.append(('target', target))

        if now is not None:
            query_params.append(('now', int(now)))

        headers = requestContext.get(
            'forwardHeaders') if requestContext else None

        retries = 1  # start counting at one to make log output and settings more readable
        while True:
            try:
                result = self.finder.request(
                    '/render/',
                    fields=query_params,
                    headers=headers,
                    timeout=settings.FETCH_TIMEOUT,
                )
                break
            except Exception:
                if retries >= settings.MAX_FETCH_RETRIES:
                    log.exception("Failed after %s attempts! Root cause:\n%s" %
                                  (settings.MAX_FETCH_RETRIES, format_exc()))
                    raise
                else:
                    log.exception(
                        "Got an exception when fetching data! Try: %i of %i. Root cause:\n%s"
                        % (retries, settings.MAX_FETCH_RETRIES, format_exc()))
                retries += 1

        data = self.deserialize(result)

        try:
            return [{
                'pathExpression':
                series.get('pathExpression', series['name']),
                'name':
                series['name'],
                'time_info': (series['start'], series['end'], series['step']),
                'values':
                series['values'],
            } for series in data]
        except Exception as err:
            self.finder.fail()
            log.exception(
                "RemoteReader[%s] Invalid render response from %s: %s" %
                (self.finder.host, result.url_full, repr(err)))
            raise Exception("Invalid render response from %s: %s" %
                            (result.url_full, repr(err)))
Ejemplo n.º 58
0
        def wait_for_results():
            connection_event.wait(1)
            connection = self.connections.get(url)
            if not connection:
                log.exception("Wait For Results - No connection found.")
            if wait_lock.acquire(
                    False
            ):  # the FetchInProgress that gets waited on waits for the actual completion
                try:
                    response = connection.getresponse()
                    if response.status != 200:
                        raise Exception(
                            "Error response %d %s from %s" %
                            (response.status, response.reason, url))

                    pickled_response = response.read()
                    results = pickle.loads(pickled_response)
                    self.cache_lock.acquire()
                    self.request_cache[url] = results
                    self.cache_lock.release()
                    completion_event.set()
                    return results
                except:
                    completion_event.set()
                    self.store.fail()
                    log.exception("Wait For Results - Error requesting %s" %
                                  url)
                    raise

            else:  # otherwise we just wait on the completion_event
                completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
                cached_results = self.request_cache.get(url)
                if cached_results is None:
                    raise Exception(
                        "Passive remote fetch failed to find cached results for %s"
                        % url)
                else:
                    return cached_results
Ejemplo n.º 59
0
    def fetch(self, startTime, endTime):
        data = whisper.fetch(self.fs_path, startTime, endTime)
        time_info, values = data
        (start, end, step) = time_info

        # Merge in data from carbon's cache
        try:
            cached_datapoints = CarbonLink.query(self.real_metric_path)
        except:
            log.exception("Failed CarbonLink query '%s'" %
                          self.real_metric_path)
            cached_datapoints = []

        for (timestamp, value) in cached_datapoints:
            interval = timestamp - (timestamp % step)

            try:
                i = int(interval - start) / step
                values[i] = value
            except:
                pass

        return (time_info, values)
Ejemplo n.º 60
0
    def get_index(self, requestContext):
        url = '/metrics/index.json'

        headers = requestContext.get('forwardHeaders')

        result = self.request(
            url,
            fields=[
              ('local', '1'),
            ],
            headers=headers,
            timeout=settings.REMOTE_FIND_TIMEOUT)

        try:
            results = json.loads(result.data)
        except Exception as err:
            self.fail()
            log.exception(
                "RemoteFinder[%s] Error decoding index response from %s: %s" %
                (self.host, result.url_full, err))
            raise Exception("Error decoding index response from %s: %s" % (result.url_full, err))

        return results