Beispiel #1
0
  def fetch(self, startTime, endTime):
    if not self.__isLeaf:
      return []

    query_params = [
      ('target', self.metric_path),
      ('pickle', 'true'),
      ('from', str( int(startTime) )),
      ('until', str( int(endTime) ))
    ]
    query_string = urlencode(query_params)

    connection = HTTPConnectionWithTimeout(self.store.host)
    connection.timeout = config.get('remote').get('remote_store_fetch_timeout')
    connection.request('GET', '/?' + query_string)
    response = connection.getresponse()
    assert response.status == 200, "Failed to retrieve remote data: %d %s" % (response.status, response.reason)
    rawData = response.read()

    seriesList = pickle.loads(rawData)
    assert len(seriesList) == 1, "Invalid result: seriesList=%s" % str(seriesList)
    series = seriesList[0]

    timeInfo = (series['start'], series['end'], series['step'])
    return (timeInfo, series['values'])
Beispiel #2
0
  def __init__(self):
    log_settings = config.get('logging')

    #Setup log files
    self.infoLogFile = os.path.join(log_settings.get('log_dir'),"info.log")
    self.exceptionLogFile = os.path.join(log_settings.get('log_dir'),"exception.log")
    self.cacheLogFile = os.path.join(log_settings.get('log_dir'),"cache.log")
    self.renderingLogFile = os.path.join(log_settings.get('log_dir'),"rendering.log")
    self.metricAccessLogFile = os.path.join(log_settings.get('log_dir'),"metricaccess.log")

    #Setup loggers
    self.infoLogger = logging.getLogger("info")
    self.infoLogger.setLevel(logging.INFO)
    self.exceptionLogger = logging.getLogger("exception")
    self.cacheLogger = logging.getLogger("cache")
    self.renderingLogger = logging.getLogger("rendering")
    self.metricAccessLogger = logging.getLogger("metric_access")

    #Setup formatter & handlers
    self.formatter = logging.Formatter("%(asctime)s :: %(message)s","%a %b %d %H:%M:%S %Y")

    self.infoHandler = Rotater(self.infoLogFile,when="midnight",backupCount=1)
    self.infoHandler.setFormatter(self.formatter)
    self.infoLogger.addHandler(self.infoHandler)

    self.exceptionHandler = Rotater(self.exceptionLogFile,when="midnight",backupCount=1)
    self.exceptionHandler.setFormatter(self.formatter)
    self.exceptionLogger.addHandler(self.exceptionHandler)

    if log_settings.get('log_cache_performance'):
      self.cacheHandler = Rotater(self.cacheLogFile,when="midnight",backupCount=1)
      self.cacheHandler.setFormatter(self.formatter)
      self.cacheLogger.addHandler(self.cacheHandler)

    if log_settings.get('log_rendering_performance'):
      self.renderingHandler = Rotater(self.renderingLogFile,when="midnight",backupCount=1)
      self.renderingHandler.setFormatter(self.formatter)
      self.renderingLogger.addHandler(self.renderingHandler)

    if log_settings.get('log_metric_access'):
      self.metricAccessHandler = Rotater(self.metricAccessLogFile,when="midnight",backupCount=10)
      self.metricAccessHandler.setFormatter(self.formatter)
      self.metricAccessLogger.addHandler(self.metricAccessHandler)
Beispiel #3
0
  def send(self):
    self.cachedResults = cache[self.cacheKey]

    if self.cachedResults:
      return

    self.connection = HTTPConnectionWithTimeout(self.store.host)
    self.connection.timeout = config.get('remote').get('remote_store_find_timeout')

    query_params = [
      ('local', '1'),
      ('format', 'pickle'),
      ('query', self.query),
    ]
    query_string = urlencode(query_params)

    try:
      self.connection.request('GET', '/metrics/find?' + query_string)
    except:
      self.store.fail()
      if not self.suppressErrors:
        raise
Beispiel #4
0
import socket
import struct
import time

from graphitelite.storage import STORE, LOCAL_STORE
from graphitelite.config import config
from graphitelite.hashing import ConsistentHashRing
from graphitelite.log import log

try:
  import cPickle as pickle
except ImportError:
  import pickle

CARBONLINK_HOSTS = config.get('main').get('carbon_link_hosts')
CARBONLINK_TIMEOUT = config.get('main').get('carbon_link_time_out')

class TimeSeries(list):
  def __init__(self, name, start, end, step, values, consolidate='average'):
    self.name = name
    self.start = start
    self.end = end
    self.step = step
    list.__init__(self,values)
    self.consolidationFunc = consolidate
    self.valuesPerPoint = 1
    self.options = {}


  def __iter__(self):
    if self.valuesPerPoint > 1:
Beispiel #5
0
      return [RRDDataSource(self, datasource_name) for datasource_name in info['ds']]
    else:
      ds_keys = [ key for key in info if key.startswith('ds[') ]
      datasources = set( key[3:].split(']')[0] for key in ds_keys )
      return [ RRDDataSource(self, ds) for ds in datasources ]


class RRDDataSource(Leaf):
  def __init__(self, rrd_file, name):
    self.rrd_file = rrd_file
    self.name = name
    self.fs_path = rrd_file.fs_path
    self.metric_path = rrd_file.metric_path + '.' + name
    self.real_metric = self.metric_path

  def fetch(self, startTime, endTime):
    startString = time.strftime("%H:%M_%Y%m%d", time.localtime(startTime))
    endString = time.strftime("%H:%M_%Y%m%d", time.localtime(endTime))

    (timeInfo,columns,rows) = rrdtool.fetch(self.fs_path,'AVERAGE','-s' + startString,'-e' + endString)
    colIndex = list(columns).index(self.name)
    rows.pop() #chop off the latest value because RRD returns crazy last values sometimes
    values = (row[colIndex] for row in rows)

    return (timeInfo,values)


# Exposed Storage API
LOCAL_STORE = Store(config.get('storage').get('data_dirs'))
STORE = Store(config.get('storage').get('data_dirs'), remote_hosts=config.get('remote').get('cluster_servers'))