예제 #1
0
  def _fetchData(pathExpr,startTime, endTime, requestContext, seriesList):
    matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
    fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
      if isinstance(results, FetchInProgress):
        results = results.waitForResults()

      if not results:
        log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
        continue

      try:
          (timeInfo, values) = results
      except ValueError as e:
          raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
      (start, end, step) = timeInfo

      series = TimeSeries(node.path, start, end, step, values)
      series.pathExpression = pathExpr #hack to pass expressions through to render functions
      seriesList.append(series)

    # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
    names = set([ s.name for s in seriesList ])
    for name in names:
      series_with_duplicate_names = [ s for s in seriesList if s.name == name ]
      empty_duplicates = [ s for s in series_with_duplicate_names if not nonempty(s) ]

      if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
        empty_duplicates.pop() # make sure we leave one in seriesList

      for series in empty_duplicates:
        seriesList.remove(series)

    return seriesList
예제 #2
0
    def request_series():
      if request_lock.acquire(False): # the FetchInProgress that gets waited on waits for the actual completion
        try:
          log.info("RemoteReader.request_data :: requesting %s" % url)
          connection = HTTPConnectionWithTimeout(self.store.host)
          connection.timeout = settings.REMOTE_FETCH_TIMEOUT
          connection.request('GET', urlpath)
          response = connection.getresponse()
          if response.status != 200:
            raise Exception("Error response %d %s from %s" % (response.status, response.reason, url))
          pickled_response = response.read()
          results = unpickle.loads(pickled_response)
          self.cache_lock.acquire()
          self.request_cache[url] = results
          self.cache_lock.release()
          completion_event.set()
          return results
        except:
          completion_event.set()
          self.store.fail()
          log.exception("Error requesting %s" % url)
          raise

      else: # otherwise we just wait on the completion_event
        completion_event.wait(settings.REMOTE_FETCH_TIMEOUT)
        cached_results = self.request_cache.get(url)
        if cached_results is None:
          raise Exception("Passive remote fetch failed to find cached results")
        else:
          return cached_results
예제 #3
0
def fetchData(requestContext, pathExpr):

  seriesList = []
  startTime = int( time.mktime( requestContext['startTime'].timetuple() ) )
  endTime   = int( time.mktime( requestContext['endTime'].timetuple() ) )

  matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
  fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]

  for node, results in fetches:
    if isinstance(results, FetchInProgress):
      results = results.waitForResults()

    if not results:
      log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
      continue

    try:
        (timeInfo, values) = results
    except ValueError, e:
        raise Exception("could not parse timeInfo/values from metric '%s': %s" % (node.path, e))
    (start, end, step) = timeInfo

    series = TimeSeries(node.path, start, end, step, values)
    series.pathExpression = pathExpr #hack to pass expressions through to render functions
    seriesList.append(series)
예제 #4
0
  def doScan(self, spec, table, cb):
    with self.semaphore:
      start = time.time()
      conn = self.getConn()
      namespace = conn.namespace_open('monitor')
      scanner = conn.scanner_open(namespace, table, spec)

      while True:
        buf = conn.scanner_get_cells_serialized(scanner)
        scr = libHyperPython.SerializedCellsReader(buf, len(buf))
        any_rows = False
        while scr.has_next():
          any_rows = True
          cb( scr.row(),
              scr.column_family(),
              scr.column_qualifier(),
              scr.value()[0:scr.value_len()],
              scr.timestamp())
        if not any_rows:
          break

      conn.close_scanner(scanner)
      self.releaseConn(conn)
      log.info(spec)
      log.info('scan-fetch time: %s' % (time.time() - start))
예제 #5
0
def removePrefix(path):
  if settings.HYPERTABLE_PREFIX:
    log.info(path)
    log.info(settings.HYPERTABLE_PREFIX)
    return re.sub('^%s\.' % settings.HYPERTABLE_PREFIX, '', path)
  else:
    return path
예제 #6
0
    def send(self):
        log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))

        self.cachedResult = cache.get(self.cacheKey)
        if self.cachedResult is not None:
            log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
            return

        self.connection = HTTPConnectionWithTimeout(self.store.host)
        self.connection.timeout = settings.REMOTE_FIND_TIMEOUT

        query_params = [("local", "1"), ("format", "pickle"), ("query", self.query.pattern)]
        if self.query.startTime:
            query_params.append(("from", self.query.startTime))

        if self.query.endTime:
            query_params.append(("until", self.query.endTime))

        query_string = urlencode(query_params)

        try:
            self.connection.request("GET", "/metrics/find/?" + query_string)
        except:
            log.exception(
                "FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query)
            )
            self.store.fail()
            self.failed = True
예제 #7
0
    def get_records(self, path_items):
        log.info('get_record: START')

        prev_paths = [path_items]
        records = []

        # Evaluate path
        while True:
            cur_paths = []
            loop_records = []

            for path in prev_paths:
                tmp_paths, tmp_records = self.eval_path(path)
                cur_paths += tmp_paths
                loop_records += tmp_records

            if loop_records or not cur_paths:
                records = loop_records

                # Break if the record is available or if the path is invalid
                break
            else:
                prev_paths = cur_paths

        log.info('get_record: END')

        return records
예제 #8
0
  def reload(self):
    log.info("[IndexSearcher] reading index data from %s" % self.index_path)
    t = time.time()
    total_entries = 0
    tree = (None, {}) # (data, children)
    for line in open(self.index_path):
      line = line.strip()
      if not line:
        continue

      branches = line.split('.')
      leaf = branches.pop()
      parent = None
      cursor = tree
      for branch in branches:
        if branch not in cursor[1]:
          cursor[1][branch] = (None, {}) # (data, children)
        parent = cursor
        cursor = cursor[1][branch]

      cursor[1][leaf] = (line, {})
      total_entries += 1

    self._tree = tree
    self.last_mtime = os.path.getmtime(self.index_path)
    log.info("[IndexSearcher] index reload took %.6f seconds (%d entries)" % (time.time() - t, total_entries))
예제 #9
0
def searchLocal(request):
  query = request.POST['query']
  log.info('query: %s', query)
  if not query:
    return HttpResponse("")

  patterns = query.split()
  regexes = [re.compile(p,re.I) for p in patterns]
  def matches(s):
    for regex in regexes:
      if regex.search(s):
        return True
    return False

  results = []

  index_file = open(settings.INDEX_FILE)
  for line in index_file:
    if matches(line):
      results.append( line.strip() )
    if len(results) >= 100:
      break

  index_file.close()
  result_string = ','.join(results)
  return HttpResponse(result_string, mimetype='text/plain')
예제 #10
0
  def send(self):
    log.info("FindRequest.send(host=%s, query=%s) called" % (self.store.host, self.query))

    self.cachedResult = cache.get(self.cacheKey)
    if self.cachedResult is not None:
      log.info("FindRequest(host=%s, query=%s) using cached result" % (self.store.host, self.query))
      return

    self.connection = HTTPConnectionWithTimeout(self.store.host)
    self.connection.timeout = settings.REMOTE_FIND_TIMEOUT

    query_params = [
      ('local', '1'),
      ('format', 'pickle'),
      ('query', self.query.pattern),
    ]
    if self.query.startTime:
      query_params.append( ('from', self.query.startTime) )

    if self.query.endTime:
      query_params.append( ('until', self.query.endTime) )

    query_string = urlencode(query_params)

    try:
      self.connection.request('GET', '/metrics/find/?' + query_string)
    except:
      log.exception("FindRequest.send(host=%s, query=%s) exception during request" % (self.store.host, self.query))
      self.store.fail()
      self.failed = True
예제 #11
0
 def test_info_log(self):
     """ Testing writing to a log file. """
     message = 'Test Info Message'
     log.info(message)
     lines = [l for l in open(os.path.join(settings.LOG_DIR,
              'info.log')).readlines()]
     self.assertEqual(message, lines[-1].split('::')[1].strip())
예제 #12
0
def _dosave(request,viewName):
  profile = getProfile(request)
  #First find our View
  log.info("Saving view '%s' under profile '%s'" % (viewName,profile.user.username))
  try:
    view = profile.view_set.get(name=viewName)
  except ObjectDoesNotExist:
    view = View(profile=profile,name=viewName)
    view.save()
  #Now re-associate the view with the correct Windows
  view.window_set.all().delete()
  for windowName,encodedString in request.GET.items():
    try:
      if windowName in ('_','commandInput'): continue
      paramString = urllib.unquote_plus(encodedString)
      queryParams = cgi.parse_qs(paramString)
      modelParams = {}
      for key,value in queryParams.items(): #Clean up the window params
        key = str(key)
        value = str(value[0])
        if key in ('top','left'):
          value = int(float( value.replace('px','') ))
        if key in ('width','height','interval'):
          value = int(float(value))
        modelParams[key] = value
      if 'interval' not in modelParams:
        modelParams['interval'] = None
      win = Window(view=view,name=windowName,**modelParams)
      win.save()
    except:
      log.exception("Failed to process parameters for window '%s'" % windowName)
  return stdout('Saved view %s' % viewName)
예제 #13
0
 def stop(self):
   log.info(
     '{name} :: {msg} {sec:.6}s'.format(
       name=self.name,
       msg=self.msg,
       sec=time.time() - self.start_time,
     )
   )
 def find_nodes(self, query):
     log.info("q:" + repr(query))
     try:
         for node in self._find_nodes_from_pattern(self.kudu_table, query.pattern):
             yield node
     except Exception, e:
         log.exception(e)
         raise
예제 #15
0
 def find_nodes(self, query):
     log.info("q:" + repr(query))
     try:
       for node in self._find_nodes_from_pattern(self.kudu_table, query.pattern):
           yield node
     except Exception, e:
       log.exception(e)
       raise
 def get_kairosdb_url(self, kairosdb_uri, url):
     full_url = "%s/%s" % (kairosdb_uri, url)
     log.info("kairosdb.KairosdbUtils.get_kairosdb_url(): url: %s" % (url))
     tstart = time.time()
     result = requests.get(full_url, timeout=5)
     delay = time.time() - tstart
     log.info("kairosdb.KairosdbUtils.get_kairosdb_url(): full url: %s, delay: %5.8f, result: %s" % (full_url, delay, result))
     return result.json()
예제 #17
0
  def tree(self):
    current_mtime = os.path.getmtime(self.index_path)
    if current_mtime > self.last_mtime:
      log.info("[IndexSearcher] reloading stale index, current_mtime=%s last_mtime=%s" %
               (current_mtime, self.last_mtime))
      self.reload()

    return self._tree
예제 #18
0
 def __init__(self):
   self.index_path = settings.INDEX_FILE + 'ht'
   self.last_atime = 0
   self.every_metric = ''
   self.tree = ({}, {})
   log.info("[HyperIndex] performing initial index load")
   self._loadFromFile()
   self._loadFromHyperTable()
예제 #19
0
파일: util.py 프로젝트: aihua/graphite-web
 def stop(self):
   log.info(
     '{name} :: {msg} {sec:.6}s'.format(
       name=self.name,
       msg=self.msg,
       sec=time.time() - self.start_time,
     )
   )
예제 #20
0
	def load_storage_schema(self):
		config = ConfigParser.ConfigParser()
		try:
			configFile = getattr(settings, 'GRAPHITE_SCHEMA', '/etc/cacher/storage_schema.ini')
			config.read(configFile)
		except Exception, e:
			log.info('Failed to read storage_schema file %s: %s' % (configFile, e))
			return
예제 #21
0
  def tree(self):
    current_mtime = os.path.getmtime(self.index_path)
    if current_mtime > self.last_mtime:
      log.info("[IndexSearcher] reloading stale index, current_mtime=%s last_mtime=%s" %
               (current_mtime, self.last_mtime))
      self.reload()

    return self._tree
예제 #22
0
    def find_nodes(self, query, reqkey):
        log.info("running blablabla RRd")
        clean_pattern = query.pattern.replace('\\', '')
        pattern_parts = clean_pattern.split('.')

        for root_dir in self.directories:
            for absolute_path in self._find_paths(root_dir, pattern_parts):
                if basename(absolute_path).startswith('.'):
                    continue

                if self.DATASOURCE_DELIMETER in basename(absolute_path):
                    (absolute_path, datasource_pattern) = absolute_path.rsplit(
                        self.DATASOURCE_DELIMETER, 1)
                else:
                    datasource_pattern = None

                relative_path = absolute_path[len(root_dir):].lstrip('/')
                metric_path = fs_to_metric(relative_path)
                real_metric_path = get_real_metric_path(
                    absolute_path, metric_path)

                metric_path_parts = metric_path.split('.')
                for field_index in find_escaped_pattern_fields(query.pattern):
                    metric_path_parts[field_index] = pattern_parts[
                        field_index].replace('\\', '')
                metric_path = '.'.join(metric_path_parts)

                # Now we construct and yield an appropriate Node object
                if isdir(absolute_path):
                    yield BranchNode(metric_path)

                elif isfile(absolute_path):
                    if absolute_path.endswith(
                            '.wsp') and WhisperReader.supported:
                        reader = WhisperReader(absolute_path, real_metric_path)
                        yield LeafNode(metric_path, reader)

                    elif absolute_path.endswith(
                            '.wsp.gz') and GzippedWhisperReader.supported:
                        reader = GzippedWhisperReader(absolute_path,
                                                      real_metric_path)
                        yield LeafNode(metric_path, reader)

                    elif absolute_path.endswith(
                            '.rrd') and RRDReader.supported:
                        if datasource_pattern is None:
                            yield BranchNode(metric_path)

                        else:
                            for datasource_name in RRDReader.get_datasources(
                                    absolute_path):
                                if match_entries([datasource_name],
                                                 datasource_pattern):
                                    reader = RRDReader(absolute_path,
                                                       datasource_name)
                                    yield LeafNode(
                                        metric_path + "." + datasource_name,
                                        reader)
예제 #23
0
 def test_info_log(self):
     """ Testing writing to a log file. """
     message = 'Test Info Message'
     log.info(message)
     lines = [
         l for l in open(os.path.join(settings.LOG_DIR,
                                      'info.log')).readlines()
     ]
     self.assertEqual(message, lines[-1].split('::')[1].strip())
예제 #24
0
    def query_log(self, node, start, elapsed, result_count, query, query_type, data_format, data_start, data_end):
        if self.query_log_enabled == False:
            return

        qs = time.strftime("%Y-%m-%d %H:%M:%S", start)
        e = str(elapsed)

        log.info('******* IRONdb query -- node: %s, start: %s, result_count: %d, type: %s, format: %s, elapsed: %s\n\n  [%s, %s] "%s"\n'
                 % (node, qs, result_count, query_type, data_format, e, data_start, data_end, query))
예제 #25
0
 def fetch(self, startTime, endTime, now=None, requestContext=None):
     try:
         result = self.reader.fetch(startTime, endTime, now, requestContext)
     except TypeError:
         # Support for legacy 3rd party, readers.
         result = self.reader.fetch(startTime, endTime)
     
     log.info('$$$$$$$$$$$$$ {0}'.format(result))
     return result
예제 #26
0
def searchHypertable(request):
  query = addPrefix(request.POST['query'])
  log.info('query: %s', query)
  if not query:
    return HttpResponse("")

  result_string = ','.join(HyperStore().search(query))

  return HttpResponse(result_string, mimetype='text/plain')
예제 #27
0
    def get_intervals(self):
        log.info('===GET_INTERVALS===')
        # We have data from the beginning of the epoch :o)
        start = 1
        # We can see one hour into the future :o)
        end = int(time() + 3600)

        log.info("get_interval: start=%s; end=%s" % (start, end))

        return IntervalSet([Interval(start, end)])
예제 #28
0
    def get_intervals(self):
        log.info('===GET_INTERVALS===')
        # We have data from the beginning of the epoch :o)
        start = 1
        # We can see one hour into the future :o)
        end = int(time()+3600)

        log.info("get_interval: start=%s; end=%s" % (start, end))

        return IntervalSet([Interval(start, end)])
예제 #29
0
 def load_storage_schema(self):
     config = ConfigParser.ConfigParser()
     try:
         configFile = getattr(settings, 'GRAPHITE_SCHEMA',
                              '/etc/cacher/storage_schema.ini')
         config.read(configFile)
     except Exception, e:
         log.info('Failed to read storage_schema file %s: %s' %
                  (configFile, e))
         return
예제 #30
0
def fetchData(requestContext, pathExpr):

    seriesList = []
    startTime = int(time.mktime(requestContext['startTime'].timetuple()))
    endTime = int(time.mktime(requestContext['endTime'].timetuple()))

    matching_nodes = STORE.find(pathExpr,
                                startTime,
                                endTime,
                                local=requestContext['localOnly'])
    fetches = [(node, node.fetch(startTime, endTime))
               for node in matching_nodes if node.is_leaf]

    for node, results in fetches:
        if isinstance(results, FetchInProgress):
            results = results.waitForResults()

        if not results:
            log.info(
                "render.datalib.fetchData :: no results for %s.fetch(%s, %s)" %
                (node, startTime, endTime))
            continue

        try:
            (timeInfo, values) = results
        except ValueError:
            e = sys.exc_info()[1]
            raise Exception(
                "could not parse timeInfo/values from metric '%s': %s" %
                (node.path, e))
        (start, end, step) = timeInfo

        series = TimeSeries(node.path, start, end, step, values)
        series.pathExpression = pathExpr  #hack to pass expressions through to render functions
        seriesList.append(series)

    # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
    names = set([series.name for series in seriesList])
    for name in names:
        series_with_duplicate_names = [
            series for series in seriesList if series.name == name
        ]
        empty_duplicates = [
            series for series in series_with_duplicate_names
            if not nonempty(series)
        ]

        if series_with_duplicate_names == empty_duplicates and len(
                empty_duplicates) > 0:  # if they're all empty
            empty_duplicates.pop()  # make sure we leave one in seriesList

        for series in empty_duplicates:
            seriesList.remove(series)

    return seriesList
예제 #31
0
def find_view(request):
  "View for finding metrics matching a given pattern"
  profile = getProfile(request)
  format = request.REQUEST.get('format', 'treejson')
  local_only = int( request.REQUEST.get('local', 0) )
  contexts = int( request.REQUEST.get('contexts', 0) )
  wildcards = int( request.REQUEST.get('wildcards', 0) )

  try:
    query = str( request.REQUEST['query'] )
  except:
    return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")

  if '.' in query:
    base_path = query.rsplit('.', 1)[0] + '.'
  else:
    base_path = ''

  if local_only:
    store = LOCAL_STORE
  else:
    store = STORE

  matches = list( store.find(query) )

  log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
  matches.sort(key=lambda node: node.name)

  if format == 'treejson':
    content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards, contexts=contexts)
    response = HttpResponse(content, mimetype='text/json')

  elif format == 'pickle':
    content = pickle_nodes(matches, contexts=contexts)
    response = HttpResponse(content, mimetype='application/pickle')

  elif format == 'completer':
    #if len(matches) == 1 and (not matches[0].isLeaf()) and query == matches[0].metric_path + '*': # auto-complete children
    #  matches = list( store.find(query + '.*') )
    results = [ dict(path=node.metric_path, name=node.name) for node in matches ]

    if len(results) > 1 and wildcards:
      wildcardNode = {'name' : '*'}
      results.append(wildcardNode)

    content = json.dumps({ 'metrics' : results })
    response = HttpResponse(content, mimetype='text/json')

  else:
    return HttpResponseBadRequest(content="Invalid value for 'format' parameter", mimetype="text/plain")

  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
예제 #32
0
 def _connect(self, urlpath):
   url = "http://%s%s" % (self.store.host, urlpath)
   try:
     log.info("ReadResult :: requesting %s" % url)
     connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
     self.connection = connector_class(self.store.host)
     self.connection.timeout = settings.REMOTE_FETCH_TIMEOUT
     self.connection.request('GET', urlpath)
   except:
     self.store.fail()
     log.exception("Error requesting %s" % url)
     raise
예제 #33
0
 def __init__(self, index_path):
   self.index_path = index_path
   if not os.path.exists(index_path):
     try:
       write_index()
     except:
       log.exception("Couldn't build index file %s" % index_path)
       raise RuntimeError("Couldn't build index file %s" % index_path)
   self.last_mtime = 0
   self._tree = (None, {}) # (data, children)
   log.info("[IndexSearcher] performing initial index load")
   self.reload()
예제 #34
0
 def _connect(self, urlpath):
   url = "http://%s%s" % (self.store.host, urlpath)
   try:
     log.info("ReadResult :: requesting %s" % url)
     connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
     self.connection = connector_class(self.store.host)
     self.connection.timeout = settings.REMOTE_FETCH_TIMEOUT
     self.connection.request('GET', urlpath)
   except:
     self.store.fail()
     log.exception("Error requesting %s" % url)
     raise
예제 #35
0
 def __init__(self, index_path):
     self.index_path = index_path
     if not os.path.exists(index_path):
         try:
             write_index()
         except:
             log.exception("Couldn't build index file %s" % index_path)
             raise RuntimeError("Couldn't build index file %s" % index_path)
     self.last_mtime = 0
     self._tree = (None, {})  # (data, children)
     log.info("[IndexSearcher] performing initial index load")
     self.reload()
예제 #36
0
 def __init__(self, index_path):
   self.index_path = index_path
   if not os.path.exists(index_path):
     open(index_path, 'w').close() # touch the file to prevent re-entry down this code path
     build_index_path = os.path.join(settings.GRAPHITE_ROOT, "bin/build-index.sh")
     retcode = subprocess.call(build_index_path)
     if retcode != 0:
       log.exception("Couldn't build index file %s" % index_path)
       raise RuntimeError("Couldn't build index file %s" % index_path)
   self.last_mtime = 0
   self._tree = (None, {}) # (data, children)
   log.info("[IndexSearcher] performing initial index load")
   self.reload()
예제 #37
0
 def __init__(self, index_path):
   self.index_path = index_path
   if not os.path.exists(index_path):
     open(index_path, 'w').close() # touch the file to prevent re-entry down this code path
     build_index_path = os.path.join(settings.GRAPHITE_ROOT, "bin/build-index.sh")
     retcode = subprocess.call(build_index_path)
     if retcode != 0:
       log.exception("Couldn't build index file %s" % index_path)
       raise RuntimeError("Couldn't build index file %s" % index_path)
   self.last_mtime = 0
   self._tree = (None, {}) # (data, children)
   log.info("[IndexSearcher] performing initial index load")
   self.reload()
예제 #38
0
 def _loadFromFile(self):
   if os.path.exists(self.index_path):
     s = time.time()
     fh = open(self.index_path)
     has_lines = False
     for l in fh:
       if l.strip():
         self._add(l.strip())
         has_lines = True
     fh.close()
     if has_lines:
       self.last_atime = int(os.path.getmtime(self.index_path)) * 10**9L
     log.info("[HyperIndex] initial load took %.6f seconds" % (time.time() - s))
예제 #39
0
def default_profile():
    # '!' is an unusable password. Since the default user never authenticates
    # this avoids creating a default (expensive!) password hash at every
    # default_profile() call.
    user, created = User.objects.get_or_create(
        username='******', defaults={'email': '*****@*****.**',
                                      'password': '******'})
    if created:
        log.info("Default user didn't exist, created it")
    profile, created = Profile.objects.get_or_create(user=user)
    if created:
        log.info("Default profile didn't exist, created it")
    return profile
예제 #40
0
    def __repr__(self):
        log.info("FindQuery:repr " + str(self.start_time) + ", " + str(self.end_time))
        if self.start_time is None:
            start_string = '*'
        else:
            start_string = time.ctime(self.start_time)

        if self.end_time is None:
            end_string = '*'
        else:
            end_string = time.ctime(self.end_time)

        return '<FindQuery: %s from %s until %s>' % (self.pattern, start_string, end_string)
예제 #41
0
def default_profile():
    # '!' is an unusable password. Since the default user never authenticates
    # this avoids creating a default (expensive!) password hash at every
    # default_profile() call.
    user, created = User.objects.get_or_create(
        username='******', defaults={'email': '*****@*****.**',
                                      'password': '******'})
    if created:
        log.info("Default user didn't exist, created it")
    profile, created = Profile.objects.get_or_create(user=user)
    if created:
        log.info("Default profile didn't exist, created it")
    return profile
예제 #42
0
    def wait_jobs(self, jobs, timeout, context):
        if not jobs:
            return []

        start = time.time()
        results = []
        failed = []
        done = 0
        try:
            for job in self.pool_exec(jobs, timeout):
                elapsed = time.time() - start
                done += 1
                if job.exception:
                    failed.append(job)
                    log.info("Exception during %s after %fs: %s" % (
                        job, elapsed, str(job.exception))
                    )
                else:
                    log.debug("Got a result for %s after %fs" % (job, elapsed))
                    results.append(job.result)
        except PoolTimeoutError:
            message = "Timed out after %fs for %s" % (
                time.time() - start, context
            )
            log.info(message)
            if done == 0:
                raise Exception(message)

        if len(failed) == done:
            message = "All requests failed for %s (%d)" % (
                context, len(failed)
            )
            for job in failed:
                message += "\n\n%s: %s: %s" % (
                    job, job.exception,
                    '\n'.join(traceback.format_exception(*job.exception_info))
                )
            raise Exception(message)

        if len(results) < len(jobs) and settings.STORE_FAIL_ON_ERROR:
            message = "%s request(s) failed for %s (%d)" % (
                len(jobs) - len(results), context, len(jobs)
            )
            for job in failed:
                message += "\n\n%s: %s: %s" % (
                    job, job.exception,
                    '\n'.join(traceback.format_exception(*job.exception_info))
                )
            raise Exception(message)

        return results
예제 #43
0
def search_view(request):
  query = str(request.REQUEST['query'].strip())
  log.info('sarching for: %s' % query)
  search_request = {
    'query' : query,
    'max_results' : int( request.REQUEST.get('max_results', 25) ),
    'keep_query_pattern' : int(request.REQUEST.get('keep_query_pattern', 0)),
  }
  #if not search_request['query'].endswith('*'):
  #  search_request['query'] += '*'

  results = sorted(hypertable_searcher.search(**search_request))
  result_data = json.dumps( dict(metrics=results) )
  return HttpResponse(result_data, mimetype='application/json')
예제 #44
0
    def fetch(self, patterns, startTime, endTime, now, requestContext):
        # deduplicate patterns
        patterns = list(set(patterns))

        if not patterns:
            return []

        log.debug(
            'graphite.storage.Store.fetch :: Starting fetch on all backends')

        jobs = [
            Job(finder.fetch,
                patterns,
                startTime,
                endTime,
                now=now,
                requestContext=requestContext)
            for finder in self.get_finders(requestContext.get('localOnly'))
        ]

        results = []

        done = 0
        errors = 0

        # Start fetches
        start = time.time()
        try:
            for job in self.pool_exec(jobs, settings.REMOTE_FETCH_TIMEOUT):
                done += 1

                if job.exception:
                    errors += 1
                    log.info("Fetch for %s failed after %fs: %s" %
                             (str(patterns), time.time() - start,
                              str(job.exception)))
                    continue

                log.debug("Got a fetch result for %s after %fs" %
                          (str(patterns), time.time() - start))
                results.extend(job.result)
        except PoolTimeoutError:
            log.info("Timed out in fetch after %fs" % (time.time() - start))

        if errors == done:
            raise Exception('All fetches failed for %s' % (str(patterns)))

        log.debug("Got all fetch results for %s in %fs" %
                  (str(patterns), time.time() - start))
        return results
 def find_nodes(self, query):
     timeStart = time.time()
     
     cacheKey = "find_node_qpList:%s" % query.pattern
     tupes = cache.get(cacheKey)
     if not tupes:
         tupes = self.mt.find_nodes(query.pattern)
         cache.set(cacheKey, tupes, 30*60)
      
     nodes = []
     try:
         for mname, nodeType in tupes:
             if nodeType == 'L':
                 reader  = KairosdbReader(KAIROSDB_URL, mname)
                 nodes.append(self.getLeafNode(mname, reader, avoidIntervals=True))
             elif nodeType == 'B':
                 nodes.append(BranchNode(mname))
             else:
                 assert False, "KairosDBFinder.find_nodes(): ERROR: got wrong node type back from nodeType: %s" % (nodeType)
     except Exception as e:
         tb = traceback.format_exc()
         log.info("finders.KairosDBFinder.find_nodes(%s) EXCEPTION: e: %s, %s, tupes: %s." % (query, e, tb, tupes))
     if 0:
         log.info("finders.KairosDBFinder.find_nodes(%s) saving data! %d nodes." % (query.pattern, len(nodes)))
         if 0:
             log.info("finders.KairosDBFinder.find_nodes(%s) nodes to save: %s" % (query.pattern, nodes))
     delay = time.time() - timeStart
     log.info("KairosDBFinder.find_nodes(): kdbFindNodesDelay: %05.08f #tupes: %s query: %s" % (delay, len(tupes), query))
     return nodes
예제 #46
0
    def _find_nodes_from_pattern(self, kudu_table, pattern):
        query_parts = []
        for part in pattern.split('.'):
            part = part.replace('*', '.*')
            part = re.sub(
                r'{([^{]*)}',
                lambda x: "(%s)" % x.groups()[0].replace(',', '|'),
                part,
            )
            query_parts.append(part)

        #Request for metrics
        t = self.client.open_table("metric_ids")
        s = t.scanner()

        # Handle a prefix pattern
        if re.match(".+\\*", pattern):
            prefix_match = pattern[:-1]
            if '.com.' in prefix_match:
                host_prefix, metric_prefix = prefix_match.split(".com.")
                host_prefix += ".com"
                s.add_predicate(
                    s.range_predicate(1, metric_prefix,
                                      metric_prefix + "\xff"))
            else:
                host_prefix = prefix_match

            s.add_predicate(
                s.range_predicate(0, host_prefix, host_prefix + "\xff"))
        elif not "*" in pattern:
            # equality match
            host, metric = pattern.split(".com.")
            host += ".com"
            s.add_predicate(s.range_predicate(0, host, host))
            s.add_predicate(s.range_predicate(1, metric, metric))
        s.open()

        metrics = []
        while s.has_more_rows():
            t = s.next_batch().as_tuples()
            log.info("batch: %d" % len(t))
            metrics.extend(t)
        metric_names = ["%s/%s" % (host, metric) for (host, metric) in metrics]
        #Form tree out of them
        metrics_tree = self._fill_kudu_tree(metric_names)

        for node in self._find_kudu_nodes(kudu_table, query_parts,
                                          metrics_tree):
            yield node
예제 #47
0
 def clean_cache(self):
   self.cache_lock.acquire()
   try:
     if len(self.request_locks) >= settings.REMOTE_READER_CACHE_SIZE_LIMIT:
       log.info("RemoteReader.request_data :: clearing old from request_cache and request_locks")
       now = time.time()
       for url, timestamp in self.request_times.items():
         age = now - timestamp
         if age >= (2 * settings.REMOTE_FETCH_TIMEOUT):
           del self.request_locks[url]
           del self.request_times[url]
           if url in self.request_cache:
             del self.request_cache[url]
   finally:
     self.cache_lock.release()
예제 #48
0
  def find_nodes(self, query):
    log.info("running ceres finder %s" % query)
    for fs_path in braces_glob( self.tree.getFilesystemPath(query.pattern) ):
      metric_path = self.tree.getNodePath(fs_path)

      if CeresNode.isNodeDir(fs_path):
        ceres_node = self.tree.getNode(metric_path)

        if ceres_node.hasDataForInterval(query.startTime, query.endTime):
          real_metric_path = get_real_metric_path(fs_path, metric_path)
          reader = CeresReader(ceres_node, real_metric_path)
          yield LeafNode(metric_path, reader)

      elif isdir(fs_path):
        yield BranchNode(metric_path)
예제 #49
0
def history(request):
    query = request.GET.urlencode()
    view_dict = [('5 min', '5min'), ('1 hour', '1hour'), ('6 hours', '6hour'),
                 ('1 day', '1day'), ('1 week', '1week'), ('1 month', '1month'),
                 ('3 month', '3month'), ('1 year', '1year')]
    views = []
    query = re.sub(r'width=[0-9]+', 'width=800', query)
    query = re.sub(r'height=[0-9]+', 'height=350', query)
    for key in view_dict:
        views.append((key[0], re.sub(r'from=.*?&', "from=-%s&" % key[1],
                                     query)))
    log.info("DEBUG: views = %s" % views)
    context = {}
    context['views'] = views
    context['purpose'] = "history"
    return render_to_response("history.html", context)
예제 #50
0
def get_branch_nodes(opentsdb_uri, current_branch, shared_reader, path):
    log.info(">> get_branch_nodes({0}, {1}, {2}, {3})".format(opentsdb_uri, current_branch, shared_reader, path))
    results = get_opentsdb_url(opentsdb_uri, "tree/branch?branch=%s" % current_branch)
    if results:
        if path:
            path += '.'
        if results['branches']:
            for branch in results['branches']:
                yield OpenTSDBBranchNode(branch['displayName'], path + branch['displayName']), branch
        if results['leaves']:
            for leaf in results['leaves']:
                reader = OpenTSDBReader(
                    opentsdb_uri,
                    leaf,
                    shared_reader,
                )
                yield OpenTSDBLeafNode(leaf['displayName'], path + leaf['displayName'], reader), leaf
예제 #51
0
    def get_index(self, requestContext=None):
        log.debug(
            'graphite.storage.Store.get_index :: Starting get_index on all backends'
        )

        if not requestContext:
            requestContext = {}

        jobs = [
            Job(finder.get_index, requestContext=requestContext)
            for finder in self.get_finders(
                local=requestContext.get('localOnly'))
        ]

        results = []

        done = 0
        errors = 0

        # Start index lookups
        start = time.time()
        try:
            for job in self.pool_exec(jobs, settings.REMOTE_FETCH_TIMEOUT):
                done += 1

                if job.exception:
                    errors += 1
                    log.info("get_index failed after %fs: %s" %
                             (time.time() - start, str(job.exception)))
                    continue

                log.debug("Got an index result after %fs" %
                          (time.time() - start))
                results.extend(job.result)
        except PoolTimeoutError:
            log.info("Timed out in get_index after %fs" %
                     (time.time() - start))

        if errors == done:
            if errors == 1:
                raise Exception("get_index failed: %s" % (str(job.exception)))
            raise Exception('All index lookups failed')

        log.debug("Got all index results in %fs" % (time.time() - start))
        return sorted(list(set(results)))
예제 #52
0
def prefetchRemoteData(remote_stores, requestContext, pathExpressions):
    if requestContext['localOnly']:
        return

    if requestContext is None:
        requestContext = {}

    (startTime, endTime, now) = timebounds(requestContext)
    log.info(
        'thread %s prefetchRemoteData:: Starting fetch_list on all backends' %
        current_thread().name)

    # Go through all of the remote nodes, and launch a fetch for each one.
    # Each fetch will take place in its own thread, since it's naturally parallel work.
    for store in remote_stores:
        reader = RemoteReader(store, {'intervals': []},
                              bulk_query=pathExpressions)
        reader.fetch_list(startTime, endTime, now, requestContext)
예제 #53
0
def build_index(base_path, extension, fd):
  t = time.time()
  total_entries = 0
  contents = os.walk(base_path, followlinks=True)
  extension_len = len(extension)
  for (dirpath, dirnames, filenames) in contents:
    path = relpath(dirpath, base_path).replace('/', '.')
    for metric in filenames:
      if metric.endswith(extension):
        metric = metric[:-extension_len]
      else:
        continue
      line = "{0}.{1}\n".format(path, metric)
      total_entries += 1
      fd.write(line)
  fd.flush()
  log.info("[IndexSearcher] index rebuild of \"%s\" took %.6f seconds (%d entries)" % (base_path, time.time() - t, total_entries))
  return None
예제 #54
0
    def wait_jobs(self, jobs, timeout, context):
        if not jobs:
            return []

        start = time.time()
        results = []
        failed = []
        done = 0
        try:
            for job in self.pool_exec(jobs, timeout):
                elapsed = time.time() - start
                done += 1
                if job.exception:
                    failed.append(job)
                    log.info("Exception during %s after %fs: %s" %
                             (job, elapsed, str(job.exception)))
                else:
                    log.debug("Got a result for %s after %fs" % (job, elapsed))
                    results.append(job.result)
        except PoolTimeoutError:
            message = "Timed out after %fs for %s" % (time.time() - start,
                                                      context)
            log.info(message)
            if done == 0:
                raise Exception(message)

        if len(failed) == done:
            message = "All requests failed for %s (%d)" % (context,
                                                           len(failed))
            for job in failed:
                message += "\n\n%s: %s: %s" % (job, job.exception, '\n'.join(
                    traceback.format_exception(*job.exception_info)))
            raise Exception(message)

        if len(results) < len(jobs) and settings.STORE_FAIL_ON_ERROR:
            message = "%s request(s) failed for %s (%d)" % (
                len(jobs) - len(results), context, len(jobs))
            for job in failed:
                message += "\n\n%s: %s: %s" % (job, job.exception, '\n'.join(
                    traceback.format_exception(*job.exception_info)))
            raise Exception(message)

        return results
예제 #55
0
    def find_nodes(self, query):
        log.info('find_nodes: %s' % (query.pattern))

        # Parse the query
        path_items = filter(None, query.pattern.split('.'))

        records = []

        # Take request addressed only for this finder
        if path_items[0] == '*' or path_items[0] in self.tree:
            # Get the part of tree described by the query
            records = self.get_records(path_items)

        # Build node
        for record in records:
            if record['leaf']:
                yield LeafNode(record['id'], RandomReader(record['id']))
            else:
                yield BranchNode(record['id'])
예제 #56
0
def find_nodes_from_pattern(opentsdb_uri, opentsdb_tree, pattern):
    log.info(">> find_nodes_from_pattern({0}, {1}, {2})".format(opentsdb_uri, opentsdb_tree, pattern))
    query_parts = []
    for part in pattern.split('.'):
        part = part.replace('*', '.*')
        part = re.sub(
            r'{([^{]*)}',
            lambda x: "(%s)" % x.groups()[0].replace(',', '|'),
            part,
        )
        query_parts.append(part)

    shared_reader = SharedReader()
    nodes = list(find_opentsdb_nodes(opentsdb_uri, query_parts, "%04X" % opentsdb_tree, shared_reader=shared_reader))
    if nodes:
        log.info("DD {0}".format(nodes))
    shared_reader.node_count = len(nodes)
    #for node in nodes:
    #    yield node
    return nodes
예제 #57
0
    def _find(self, query):
        jobs = [
            Job(finder.find_nodes, query)
            for finder in self.get_finders(query.local)
        ]

        # Group matching nodes by their path
        nodes_by_path = defaultdict(list)

        done = 0
        errors = 0

        # Start finds
        start = time.time()
        try:
            for job in self.pool_exec(jobs, settings.REMOTE_FIND_TIMEOUT):
                done += 1

                if job.exception:
                    errors += 1
                    log.info(
                        "Find for %s failed after %fs: %s" %
                        (str(query), time.time() - start, str(job.exception)))
                    continue

                log.debug("Got a find result for %s after %fs" %
                          (str(query), time.time() - start))
                for node in job.result or []:
                    nodes_by_path[node.path].append(node)
        except PoolTimeoutError:
            log.info("Timed out in find after %fs" % (time.time() - start))

        if errors == done:
            if errors == 1:
                raise Exception("Find for %s failed: %s" %
                                (str(query), str(job.exception)))
            raise Exception('All finds failed for %s' % (str(query)))

        log.debug("Got all find results for %s in %fs" %
                  (str(query), time.time() - start))
        return self._list_nodes(query, nodes_by_path)
예제 #58
0
        def wrapped_f(*args, **kwargs):
            msg = 'completed in'

            t = time.time()
            if custom_msg:

                def set_msg(msg):
                    wrapped_f.msg = msg

                kwargs['msg_setter'] = set_msg

            res = f(*args, **kwargs)
            msg = getattr(wrapped_f, 'msg', msg)

            log.info('{module}.{name} :: {msg} {sec:.6}s'.format(
                module=f.__module__,
                name=f.__name__,
                msg=msg,
                sec=time.time() - t,
            ))
            return res
예제 #59
0
        def result_queue_generator():
            for node in matching_nodes:
                if node.is_leaf:
                    yield (node.path,
                           node.fetch(startTime, endTime, now, requestContext))

            log.info(
                'render.datalib.fetchData:: result_queue_generator got {count} fetches'
                .format(count=len(fetches)), )
            for key, fetch in fetches.iteritems():
                log.info(
                    'render.datalib.fetchData:: getting results of {host}'.
                    format(host=key), )

                if isinstance(fetch, FetchInProgress):
                    fetch = fetch.waitForResults()

                if fetch is None:
                    log.info('render.datalib.fetchData:: fetch is None')
                    continue

                for result in fetch:
                    yield (
                        result['path'],
                        (
                            (result['start'], result['end'], result['step']),
                            result['values'],
                        ),
                    )