Beispiel #1
0
def evaluate_and_store_stat(name, stat, summary):
    """Evaluates whether the given statistic is to be recorded and if
    so, records it."""
    global filters
    if not summary:
        return
    try:
        f = filters[stat.filter]
    except KeyError:
        l.warning("Filter %s not registered", stat.filter)
        return
    try:
        if f(**stat):
            if "time" in stat:
                graphite_stats.put(name, summary[stat.time]["time"] * 100)
            elif "count" in stat:
                # print "Storing count for key %s"%stat.count
                # XXX-Anand: where is the code to update counts?
                pass
            else:
                l.warning("No storage item specified for stat %s", name)
    except Exception as k:
        l.warning(
            "Error while storing stats (%s). Complete traceback follows" % k)
        l.warning(traceback.format_exc())
Beispiel #2
0
def evaluate_and_store_stat(name, stat):
    """Evaluates whether the given statistic is to be recorded and if
    so, records it."""
    global filters
    summary = stats.stats_summary()
    if not summary:
        return
    try:
        f = filters[stat.filter]
    except KeyError:
        l.critical("Filter %s not registered", stat.filter)
        raise
    try:
        if f(**stat):
            l.debug("Storing stat %s", name)
            if stat.has_key("time"):
                graphite_stats.put(name, summary[stat.time]["time"] * 100)
            elif stat.has_key("count"):
                print "Storing count for key %s" % stat.count
            else:
                l.warning("No storage item specified for stat %s", name)
    except Exception, k:
        l.warning(
            "Error while storing stats (%s). Complete traceback follows" % k)
        l.warning(traceback.format_exc())
Beispiel #3
0
def stats_hook():
    """web.py unload hook to add X-OL-Stats header.

    This info can be written to lighttpd access log for collecting

    Also, send stats to graphite using statsd
    """
    stats_summary = stats.stats_summary()
    update_all_stats(stats_summary)
    try:
        if "stats-header" in web.ctx.features:
            web.header("X-OL-Stats", format_stats(stats_summary))
    except Exception as e:
        # don't let errors in stats collection break the app.
        print(str(e), file=web.debug)

    # This name is misleading. It gets incremented for more than just pages.
    # E.g. *.json requests (even ajax), image requests. Although I can't
    # see any *.js requests? So not sure exactly when we're called here.
    graphite_stats.increment('ol.pageviews')

    memcache_hits = 0
    memcache_misses = 0
    for s in web.ctx.get("stats", []):
        if s.name == 'memcache.get':
            if s.data['hit']:
                memcache_hits += 1
            else:
                memcache_misses += 1

    if memcache_hits:
        graphite_stats.increment('ol.memcache.hits', memcache_hits, rate=0.025)
    if memcache_misses:
        graphite_stats.increment('ol.memcache.misses',
                                 memcache_misses,
                                 rate=0.025)

    for name, value in stats_summary.items():
        name = name.replace(".", "_")
        time = value.get("time", 0.0) * 1000
        key = 'ol.' + name
        graphite_stats.put(key, time)
Beispiel #4
0
 def wrapped(*largs, **kargs):
     setup_stats()
     global task_context
     celery_extra_info = kargs.pop("celery_extra_info", {})
     enqueue_time = celery_extra_info.get('enqueue_time', None)
     parent_task = celery_extra_info.get('parent_task', None)
     s = StringIO.StringIO()
     h = logging.StreamHandler(s)
     h.setFormatter(
         logging.Formatter(
             "%(asctime)s [%(name)s] [%(levelname)s] %(message)s"))
     logging.root.addHandler(h)
     try:
         started_at = calendar.timegm(
             datetime.datetime.utcnow().timetuple())
         try:
             wait_time = started_at - enqueue_time
             stats.put("ol.celery.task_wait_time", wait_time * 1000)
             stats.put("ol.celery.%s_wait_time" % fn.__name__,
                       wait_time * 1000)
         except:
             pass
         ret = fn(*largs, **kargs)
     except Exception, e:
         log = s.getvalue()
         tb = traceback.format_exc()
         d = dict(largs=json.dumps(largs),
                  kargs=json.dumps(kargs),
                  command=fn.__name__,
                  enqueued_at=enqueue_time,
                  started_at=started_at,
                  log=log,
                  traceback=tb,
                  result=None,
                  context=task_context,
                  parent_task=parent_task)
         logging.root.removeHandler(h)
         try:
             end_time = calendar.timegm(
                 datetime.datetime.utcnow().timetuple())
             run_time = end_time - started_at
             stats.put("ol.celery.task_run_time", run_time * 1000)
             stats.put("ol.celery.%s_run_time" % fn.__name__,
                       run_time * 1000)
         except:
             pass
         raise ExceptionWrapper(e, d)
Beispiel #5
0
def evaluate_and_store_stat(name, stat, summary):
    """Evaluates whether the given statistic is to be recorded and if
    so, records it."""
    global filters
    if not summary:
        return
    try:
        f = filters[stat.filter]
    except KeyError:
        l.warning("Filter %s not registered", stat.filter)
        return
    try:
        if f(**stat):
            if stat.has_key("time"):
                graphite_stats.put(name, summary[stat.time]["time"] * 100)
            elif stat.has_key("count"):
                #print "Storing count for key %s"%stat.count
                # XXX-Anand: where is the code to update counts?
                pass
            else:
                l.warning("No storage item specified for stat %s", name)
    except Exception, k:
        l.warning("Error while storing stats (%s). Complete traceback follows"%k)
        l.warning(traceback.format_exc())
Beispiel #6
0
 def wrapped(*largs, **kargs):
     setup_stats()
     global task_context
     celery_extra_info = kargs.pop("celery_extra_info",{})
     enqueue_time = celery_extra_info.get('enqueue_time',None)
     parent_task = celery_extra_info.get('parent_task',None)
     s = StringIO.StringIO()
     h = logging.StreamHandler(s)
     h.setFormatter(logging.Formatter("%(asctime)s [%(name)s] [%(levelname)s] %(message)s"))
     logging.root.addHandler(h)
     try:
         started_at = calendar.timegm(datetime.datetime.utcnow().timetuple())
         try:
             wait_time = started_at - enqueue_time
             stats.put("ol.celery.task_wait_time", wait_time * 1000)
             stats.put("ol.celery.%s_wait_time"%fn.__name__, wait_time * 1000)
         except:
             pass
         ret = fn(*largs, **kargs)
     except Exception,e:
         log = s.getvalue()
         tb = traceback.format_exc()
         d = dict(largs = json.dumps(largs),
                  kargs = json.dumps(kargs),
                  command = fn.__name__,
                  enqueued_at = enqueue_time,
                  started_at = started_at,
                  log = log,
                  traceback = tb,
                  result = None,
                  context = task_context,
                  parent_task = parent_task)
         logging.root.removeHandler(h)
         try:
             end_time = calendar.timegm(datetime.datetime.utcnow().timetuple())
             run_time = end_time - started_at
             stats.put("ol.celery.task_run_time", run_time * 1000)
             stats.put("ol.celery.%s_run_time"%fn.__name__, run_time * 1000)
         except:
             pass
         raise ExceptionWrapper(e, d)