예제 #1
0
파일: data.py 프로젝트: pouyakh/minard
def flush_to_redis(dict_, name, time_):
    p = redis.pipeline()

    sum_keys = ['ts:%i:%i:%s:sum' % (interval, time_//interval, name)
                for interval in HASH_INTERVALS]
    len_keys = ['ts:%i:%i:%s:len' % (interval, time_//interval, name)
                for interval in HASH_INTERVALS]

    if len(dict_) > 0:
        hmincrbyfloat(sum_keys, dict_, client=p)
        hmincr(len_keys, dict_.keys(), client=p)

    for interval in HASH_INTERVALS:
        basekey = 'ts:%i:%i:%s' % (interval, time_//interval, name)
        if len(dict_) > 0:
            p.expire(basekey + ':sum', interval)
            p.expire(basekey + ':len', interval)
        prev = time_//interval - 1
        prev_key = 'ts:%i:%i:%s' % (interval, prev, name)
        if redis.incr(prev_key + ':lock') == 1:
            hdivh(prev_key, prev_key + ':sum', prev_key + ':len',
                  range(10240), format='%.2g', client=p)
            keys = setavgmax(prev_key, client=p)
            for k in keys:
                p.expire(k, HASH_EXPIRE*interval)
            p.expire(prev_key, HASH_EXPIRE*interval)
            p.expire(prev_key + ':lock', interval)
    p.execute()
예제 #2
0
파일: dispatch.py 프로젝트: icoulter/minard
def flush_cache(cache, cache_set, cache_nhit, cache_pmt, time):
    # for docs on redis pipeline see http://redis.io/topics/pipelining
    p = redis.pipeline()

    for name, hash in cache.items():
        keys = ['ts:%i:%i:%s' % (interval, time//interval, name)
                for interval in INTERVALS]

        if len(hash) > 0:
            hmincrby(keys, hash, client=p)
            
            for key, interval in zip(keys,INTERVALS):
                p.expire(key,interval*EXPIRE)

    for interval in INTERVALS:
        for name, hash in cache_set.items():
            key = 'ts:%i:%i:%s' % (interval, time//interval, name)
            if len(hash) > 0:
                p.hmset(key, hash)
                p.expire(key, interval*EXPIRE)

    keys = ['ts:%i:%i:occupancy:hits' % (interval, time//interval)
            for interval in HASH_INTERVALS]

    if len(cache_pmt) > 0:
        hmincrby(keys, cache_pmt, client=p)

    for interval in HASH_INTERVALS:
        key = 'ts:%i:%i:occupancy' % (interval, time//interval)
        p.incrby(key + ':count', cache['trig']['TOTAL'])
        # expire after just interval, because these will
        # occupancy will be set as hits/count
        p.expire(key + ':hits', interval)
        p.expire(key + ':count', interval)

        prev_key = 'ts:%i:%i:occupancy' % (interval,time//interval-1)
        if redis.incr(prev_key + ':lock') == 1:
            hdivk(prev_key, prev_key + ':hits', prev_key + ':count',
                  range(10240), format='%.2g', client=p)
            keys = setavgmax(prev_key, client=p)
            for k in keys:
                p.expire(k, HASH_EXPIRE*interval)
            p.expire(prev_key, HASH_EXPIRE*interval)
            p.expire(prev_key + ':lock', interval)

    if len(cache_nhit) > 0:
        # nhit distribution
        if len(cache_nhit) > 100:
            # if there are more than 100 events this second
            # randomly sample the nhit from 100 events
            cache_nhit = random.sample(cache_nhit,100)
        # see http://flask.pocoo.org/snippets/71/ for this design pattern
        p.lpush('ts:1:%i:nhit' % time, *cache_nhit)
        p.expire('ts:1:%i:nhit' % time, 3600)

    p.execute()
예제 #3
0
def flush_cache(cache, cache_set, cache_nhit, cache_pmt, time):
    # for docs on redis pipeline see http://redis.io/topics/pipelining
    p = redis.pipeline()

    for name, hash in cache.items():
        keys = [
            'ts:%i:%i:%s' % (interval, time // interval, name)
            for interval in INTERVALS
        ]

        if len(hash) > 0:
            hmincrby(keys, hash, client=p)

            for key, interval in zip(keys, INTERVALS):
                p.expire(key, interval * EXPIRE)

    for interval in INTERVALS:
        for name, hash in cache_set.items():
            key = 'ts:%i:%i:%s' % (interval, time // interval, name)
            if len(hash) > 0:
                p.hmset(key, hash)
                p.expire(key, interval * EXPIRE)

    keys = [
        'ts:%i:%i:occupancy:hits' % (interval, time // interval)
        for interval in HASH_INTERVALS
    ]

    if len(cache_pmt) > 0:
        hmincrby(keys, cache_pmt, client=p)

    for interval in HASH_INTERVALS:
        key = 'ts:%i:%i:occupancy' % (interval, time // interval)
        p.incrby(key + ':count', cache['trig']['TOTAL'])
        # expire after just interval, because these will
        # occupancy will be set as hits/count
        p.expire(key + ':hits', interval)
        p.expire(key + ':count', interval)

        prev_key = 'ts:%i:%i:occupancy' % (interval, time // interval - 1)
        if redis.incr(prev_key + ':lock') == 1:
            hdivk(prev_key,
                  prev_key + ':hits',
                  prev_key + ':count',
                  range(10240),
                  format='%.2g',
                  client=p)
            keys = setavgmax(prev_key, client=p)
            for k in keys:
                p.expire(k, HASH_EXPIRE * interval)
            p.expire(prev_key, HASH_EXPIRE * interval)
            p.expire(prev_key + ':lock', interval)

    if len(cache_nhit) > 0:
        # nhit distribution
        if len(cache_nhit) > 100:
            # if there are more than 100 events this second
            # randomly sample the nhit from 100 events
            cache_nhit = random.sample(cache_nhit, 100)
        # see http://flask.pocoo.org/snippets/71/ for this design pattern
        p.lpush('ts:1:%i:nhit' % time, *cache_nhit)
        p.expire('ts:1:%i:nhit' % time, 3600)

    p.execute()