Ejemplo n.º 1
0
    def post(self):
        args = post_parser.parse_args()
        # Do not allow usernames as integers
        if (args.username).isdigit():
            return 400
        # We've passed parameter validation.

        # Get redis connection pool
        redis_conn = StrictRedis(connection_pool=pool)

        # Insert into the database, using global message id as key
        _id = redis_conn.get('global_message_id')
        redis_conn.set(_id, args.message)

        # Incriment global message id
        redis_conn.incr('global_message_id')

        # Expire at timeout
        redis_conn.expire(_id, int(args.timeout))

        # Keep a list of message id's with username as the key
        redis_conn.lpush(args.username, _id)

        response = {"id": int(_id)}
        return response, 201  #status.HTTP_201_CREATED
Ejemplo n.º 2
0
class WatchmanBlacklist(object):
    def __init__(self, config_xml=None):
        if config_xml is None:
            config_xml = ET.parse(CONFIG_FILE)
        #elif not isinstance(config_xml,ET.Element) and not isinstance(config_xml,ET.ElementTree):
        #    raise TypeError("config_xml must be either None or an ElementTree element")

        try:
            password = config_xml.find('/global/password').text
        except StandardError as e:
            password = ""

        try:
            redishost = config_xml.find('/global/redis').text
        except StandardError as e:
            redishost = "localhost"

        try:
            expire = config_xml.find('/global/expire').text
            self.expire = int(expire)
        except StandardError as e:
            logging.warning(
                "No <expire> setting in the <global> section of config. Defaulting to 360s."
            )
            self.expire = 360

        try:
            dbnum = config_xml.find('/global/blacklistdb').text
            self._dbnum = int(dbnum)
        except StandardError as e:
            logging.warning(
                "No blacklistdb setting in the <global> section of config. Defaulting to Redis database 2."
            )
            dbnum = 2

        self._conn = StrictRedis(host=redishost, password=password, db=dbnum)

    def get(self, filepath, update=True, value="(locked)"):
        """
        Check if the given path is in the blacklist, and optionally update the lock whether or not it exists
        :param filepath: file path to check
        :param update: if True, then add the filepath to the blacklist and reset the expiry counter - even if it already exists.
        :param value: value to store against the file path (typically the mtime)
        :return: value of the key or None
        """

        rtn = self._conn.get(filepath)

        #if update:
        #    self._conn.setnx(filepath, value)
        #    self._conn.expire(filepath, self.expire)

        if not self._conn.exists(filepath):
            logging.debug(
                "{0} does not exist in the blacklist. Attempting to add it.".
                format(filepath))
            self._conn.setnx(filepath, value)
            self._conn.expire(filepath, self.expire)

        return rtn
Ejemplo n.º 3
0
    def post(self, request):
        course_code = request.POST.get('course_code')
        duration = int(request.POST.get('duration'))

        r = StrictRedis(host='localhost', port=6379, db=0)
        r.set('{}{}'.format(self.redis_key, course_code), course_code)
        r.expire('{}{}'.format(self.redis_key, course_code), duration*3600)

        return Response({'success': 'active_class stored'})









#def get_client_ip(request):
#    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
#    if x_forwarded_for:
#        ip = x_forwarded_for.split(',')[-1].strip()
#    else:
#        ip = request.META.get('REMOTE_ADDR')
#    return ip

#or django-ipaware
Ejemplo n.º 4
0
def string():
    redis = StrictRedis(host='localhost', port=6379, password='', db=0)
    # result=redis.set('name','zengshuai')
    # print(result)
    # redis.set('age',25)
    # redis.set('gender','male')
    # redis.set('score',100)

    #通用方法
    print(redis.exists('name'))
    print(redis.type('name'))
    print(redis.keys('a*'))  #a 开头的 键值
    print(redis.randomkey())  #随机取一个键值
    # redis.rename('score','English')  #重命名
    # print(redis.get('English'))
    print(redis.dbsize())  #size
    redis.expire('English', 2)  #设置键值过期时间
    print(redis.ttl('English'))  #获取键值过期时间

    redis.move('age', 1)  #将键移动到其他数据库
    # redis.flushdb()  #清空本数据库
    # redis.flushall() #清空所有数据库

    #字符串操作
    redis.getset('name', 'kate')
    print(redis.get('name'))
Ejemplo n.º 5
0
class ExpiringDatasetCache(object):
    """
    Cache with expiring values to keep track of recently created replicas.
    """
    def __init__(self,
                 redis_host,
                 redis_port,
                 timeout=1,
                 prefix='expiring_did_cache'):
        self._redis = StrictRedis(host=redis_host, port=redis_port)
        self._prefix = prefix + '_' + str(uuid4()).split('-')[0]
        self._timeout = timeout

    def add_dataset(self, dataset):
        """ Adds a datasets to cache with lifetime """
        key = ':'.join((self._prefix, dataset))
        self._redis.set(key, 1)
        self._redis.expire(key, self._timeout)

    def check_dataset(self, dataset):
        """ Checks if dataset is still in cache """
        key = ':'.join((self._prefix, dataset))
        if self._redis.get(key) is None:
            return False

        return True
Ejemplo n.º 6
0
def feed_db(container_id, stats):
    """ Store data to Redis.
        args:
         - constainer_id : (str) container's hash 12 first characters
         - stats : a dictionary of stats
    """
    if DEBUG:
        print('feed db with container {} stats'.format(container_id))

    # convert the time provided by stats to UTC format, parse it with strptime,
    # and transform it again to the desired REDIS_KEY_TIMESTAMP format
    instant_str = stats['read'][:-9]+stats['read'][-6:].replace(':', '')
    instant = datetime.strptime(instant_str, '%Y-%m-%dT%H:%M:%S.%f%z')
    timestamp = instant.strftime(REDIS_KEY_TIMESTAMP)

    r = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
    for resource, value in stats.items():
        if resource != 'read':
            key = REDIS_KEY.format(timestamp=timestamp,
                                   container_id=container_id,
                                   resource=resource)

            r.set(key, dumps(value))
            r.expire(key, REDIS_EXPIRE_TIME)

            if DEBUG:
                print("Stored {} => {}".format(key, value))
Ejemplo n.º 7
0
class FilterMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    def __init__(self, redis_uri):
        self.redis_uri = redis_uri
        self.redis_pool = ConnectionPool.from_url(self.redis_uri)
        self.redis_client = StrictRedis(connection_pool=self.redis_pool)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(redis_uri=crawler.settings.get('REDIS_URL'), )

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        key = '%s_ershoufang_sell:link:%s' % (
            request.url[8:10], date.today().strftime('%Y-%m-%d'))
        if re.search(r'/ershoufang/\d{12}.html', request.url):
            if not self.redis_client.sismember(key, request.url):
                self.redis_client.sadd(key, request.url)
                self.redis_client.expire(key, 82800)
            else:
                raise IgnoreRequest()
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        key = '%s_ershoufang_sell:link' % response.url[8:10]
        if re.search(r'/ershoufang/\d{12}.html', response.url):
            if not response.status == 200:
                self.redis_client.srem(key, response.url)
                # print('error status link is removed => ', response.url)
                raise IgnoreRequest()
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        # print('DownloaderMiddleware Request URL:', request.url)
        return None
Ejemplo n.º 8
0
def main():
    r = StrictRedis()
    
    key = datetime.now().strftime('%Y.%m.%d.%H.%m')

    r.hmset('toaster.%s' % key, download_data())
    # Expire slightly over 24 hours, just in case
    r.expire('toaster.%s' % key, 60 * 60 * 25)
Ejemplo n.º 9
0
    def post(self, request):
        course_code = request.POST.get('course_code')
        duration = int(request.POST.get('duration'))

        r = StrictRedis(host='localhost', port=6379, db=0)
        r.set('{}{}'.format(self.redis_key, course_code), course_code)
        r.expire('{}{}'.format(self.redis_key, course_code), duration*3600)

        return Response({'success': 'active_class stored'})
Ejemplo n.º 10
0
class QueueManager(object):
    def __init__(self, host, port, auth, db=0, lock_key="lock_key", dup_key="dup_key"):
        self.redis_client = StrictRedis(host=host, port=port, password=auth, db=db)
        self.lock_key = lock_key
        self.dup_key = dup_key

    def queue_empty(self, queue_name):
        return self.queue_empty(queue_name) <= 0

    def queue_len(self, queue_name):
        return self.redis_client.llen(queue_name)

    @staticmethod
    def _encode_request(request):
        return json.dumps(request, encoding="utf-8")

    @staticmethod
    def _decode_request(encoded_request):
        return json.loads(encoded_request, encoding="utf-8")

    @staticmethod
    def get_full_url(request):
        request_url = request.get("url")
        parse_result = urlparse.urlparse(request_url)
        dict_query = dict(urlparse.parse_qsl(parse_result.query))
        params = request.get("params")
        if params:
            dict_query.update(params)
        if not dict_query:
            return request_url
        dict_query = dict(sorted(dict_query.iteritems(), key=lambda d: d[0]))
        str_query = urlencode(dict_query)
        return urlparse.urlunparse((parse_result.scheme, parse_result.netloc, parse_result.path, parse_result.params,
                                    str_query, parse_result.fragment))

    def finger_print(self, request):
        fp = hashlib.sha1()
        fp.update(request.get("method"))
        fp.update(self.get_full_url(request))
        return fp.hexdigest()

    def request_seen(self, request):
        fp = self.finger_print(request)
        added = self.redis_client.sadd(self.dup_key, fp)
        return added == 0

    def unique_lock(self, lock_key, lock_name="lock", expire=60):
        try:
            value = self.redis_client.setnx(lock_key, lock_name)
            if value:
                self.redis_client.expire(lock_key, expire)
                return True, "set unique lock successfully"
            else:
                return True, "unique lock still on"
        except Exception as e:
            return False, "set unique lock failed: %s" % str(e)
Ejemplo n.º 11
0
def get_group_articles(conn: StrictRedis,
                       group: str,
                       page: int,
                       order="score:"):
    key = f"{order}{group}"
    if not conn.exists(key):
        # 取指定排行榜与群组的所有文章,并按照指定排行榜进行排序
        conn.zinterstore(key, [f"group:{group}", order], aggregate="max")
        conn.expire(key, 60)
    return get_articles(conn, page, key)
Ejemplo n.º 12
0
class WatchmanBlacklist(object):
    def __init__(self, config_xml = None):
        if config_xml is None:
            config_xml = ET.parse(CONFIG_FILE)
        #elif not isinstance(config_xml,ET.Element) and not isinstance(config_xml,ET.ElementTree):
        #    raise TypeError("config_xml must be either None or an ElementTree element")
        
        try:
            password = config_xml.find('/global/password').text
        except StandardError as e:
            password = ""

        try:
            redishost = config_xml.find('/global/redis').text
        except StandardError as e:
            redishost = "localhost"

        try:
            expire = config_xml.find('/global/expire').text
            self.expire = int(expire)
        except StandardError as e:
            logging.warning("No <expire> setting in the <global> section of config. Defaulting to 360s.")
            self.expire = 360

        try:
            dbnum = config_xml.find('/global/blacklistdb').text
            self._dbnum = int(dbnum)
        except StandardError as e:
            logging.warning("No blacklistdb setting in the <global> section of config. Defaulting to Redis database 2.")
            dbnum = 2

        self._conn = StrictRedis(host=redishost, password=password, db=dbnum)
        
    def get(self,filepath,update=True,value="(locked)"):
        """
        Check if the given path is in the blacklist, and optionally update the lock whether or not it exists
        :param filepath: file path to check
        :param update: if True, then add the filepath to the blacklist and reset the expiry counter - even if it already exists.
        :param value: value to store against the file path (typically the mtime)
        :return: value of the key or None
        """
        
        rtn = self._conn.get(filepath)
            
        #if update:
        #    self._conn.setnx(filepath, value)
        #    self._conn.expire(filepath, self.expire)

        if not self._conn.exists(filepath):
            logging.debug("{0} does not exist in the blacklist. Attempting to add it.".format(filepath))
            self._conn.setnx(filepath, value)
            self._conn.expire(filepath, self.expire)
        
        return rtn
Ejemplo n.º 13
0
def acquire_lock_with_timeout(conn: redis.StrictRedis, lockname, acquire_timeout=10, lock_timeout=10):
    identifier = str(uuid.uuid4())
    lockname = f'lock:{lockname}'
    lock_timeout = int(math.ceil(lock_timeout))
    end = time.time() + acquire_timeout
    while time.time() < end:
        if conn.set(lockname, identifier, ex=lock_timeout, nx=True):
            return identifier
        elif not conn.ttl(lockname):
            conn.expire(lockname, lock_timeout)
    return False
Ejemplo n.º 14
0
def update_status(msg, **redis_kwargs):
    """ Updated Redis with a message, prefix it with the current timestamp.
    
        Keyword args are passed directly to redis.StrictRedis().
    """
    pid = getpid()
    red = StrictRedis(**redis_kwargs)
    key = 'pid-%d-statuses' % pid
    msg = '%.6f %s' % (time(), msg)
    
    red.lpush(key, msg)
    red.expire(key, 60 * 60)
    red.ltrim(key, 0, _keep)
Ejemplo n.º 15
0
def update_status(msg, **redis_kwargs):
    """ Updated Redis with a message, prefix it with the current timestamp.
    
        Keyword args are passed directly to redis.StrictRedis().
    """
    pid = getpid()
    red = StrictRedis(**redis_kwargs)
    key = 'pid-%d-statuses' % pid
    msg = '%.6f %s' % (time(), msg)

    red.lpush(key, msg)
    red.expire(key, 60 * 60)
    red.ltrim(key, 0, _keep)
def expire_uniformly(expire_in, match="*"):
    redis_client = StrictRedis(host=getenv("RedisURL"),
                               port=int(getenv("RedisPort")),
                               db=0,
                               password=getenv("RedisKey"),
                               ssl=True,
                               ssl_ca_certs=certifi.where())

    for key in redis_client.scan_iter(match):
        redis_client.expire(key, randint(1, expire_in))

    redis_client.close()

    return True
Ejemplo n.º 17
0
class RedisDB(object):
    """
    Base Redis Handler Cache class which interacts with Redis server
    """
    def __init__(self, hostname, port, db, ttl, decode_responses=True):
        self.hostname = hostname
        self.port = port
        self.db = db
        self.decode_responses = decode_responses
        self.ttl = ttl
        self.client = StrictRedis(host=self.hostname,
                                  port=self.port,
                                  db=self.db,
                                  decode_responses=self.decode_responses)

    def set_alert_data(self,
                       alert_id,
                       counter=None,
                       initiated_time=None,
                       alert_level=None):
        # If initiated time is None, set time to datetime.now()
        if not initiated_time:
            initiated_time = datetime.strftime(datetime.now(),
                                               '%Y-%m-%d %H:%M:%S')

        # if counter is None, means need to initialise counter
        if not counter:
            counter = 1

        # if alert_level is None, set to first level of alerts
        if not alert_level:
            alert_level = 1

        # set data in redis cache DB
        self.client.hmset(
            alert_id,
            dict(counter=counter,
                 initiated_time=initiated_time,
                 alert_level=alert_level))
        self.client.expire(name=alert_id, time=self.ttl)

    def get_alert_data(self, alert_id):
        # get all data fields mapped with given hash
        hdata = self.client.hgetall(alert_id)
        return hdata

    def delete_alert(self, alert_id):
        # Delete a hash from cache
        self.client.delete(alert_id)
Ejemplo n.º 18
0
def view_user_json(username):
    """The twitter user JSON view"""
    username = username.lower()
    redis_key = "%s.user.%s" % (REDIS_PREFIX, username)
    redis = StrictRedis()
    cache = redis.get(redis_key)
    if not cache:
        cache = dict(status='queued', header='Queued',
                     message="Your request will be processed shortly",
                     code=200)
        redis.set(redis_key, dumps(cache))
        redis.expire(redis_key, CACHE_HOURS*60*60)
        load_tweets.delay(username)
        sleep(.5)
    cache = loads(redis.get(redis_key))

    return jsonify(cache)
Ejemplo n.º 19
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.expiretime = params.pop('expiretime', None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = StrictRedis(host=host, port=int(port), **params)

    def __getitem__(self, key):
        return pickle.loads(self.db_conn.hget(self._format_key(key), 'data'))

    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        #
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]

        self.db_conn.hset(key, 'data', pickle.dumps(value))
        self.db_conn.hset(key, 'accessed', datetime.now())
        self.db_conn.hsetnx(key, 'created', datetime.now())

        if expiretime or self.expiretime:
            self.db_conn.expire(key, expiretime or self.expiretime)

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flushdb()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
Ejemplo n.º 20
0
def run_demo_job():
    redis_client = StrictRedis(host=os.environ["REDIS_HOST"],
                               port=os.environ["REDIS_PORT"],
                               db=os.environ["REDIS_DB"])

    next_demo_job_id = str(redis_client.incr(NEXT_DEMO_JOB_ID))
    next_demo_job_sts_key = f'demo-job-sts:{next_demo_job_id}'

    redis_client.hmset(next_demo_job_sts_key, {
        "demo-job-id": next_demo_job_id,
        "demo-job-status": "Executed"
    })

    redis_client.expire(next_demo_job_sts_key, CACHE_TTL_SECONDS)

    redis_client.set(LAST_DEMO_JOB_ID, next_demo_job_id)

    return f'demo job executed with id: {next_demo_job_id}'
Ejemplo n.º 21
0
class RateLimiter(cherrypy.Tool):
    def __init__(self, limit=100, window=60):
        cherrypy.Tool.__init__(self, 'before_handler',
                               self.process_request, priority=10)
        cherrypy.log("Creating rate limiter with limit={} and window={}".format(limit, window))
        self.limit = limit
        self.window = window
        self.redis = StrictRedis(host='redis', port=6379)

    def process_request(self):
        print(cherrypy.request)
        print(cherrypy.request.remote)
        requester = cherrypy.request.remote.ip
        print("remote:", requester)

        # un-comment if you want to ignore calls from localhost
        # if requester == '127.0.0.1':
        #     return

        key = "{0}: {1}".format(requester, cherrypy.request.path_info)
        print('Key: {0}'.format(key))

        try:
            remaining = self.limit - int(self.redis.get(key))
        except (ValueError, TypeError):
            remaining = self.limit
            self.redis.set(key, 0)

        expires_in = self.redis.ttl(key)

        if expires_in == -1:
            self.redis.expire(key, self.window)
            expires_in = self.window

        cherrypy.request.headers.update({
            'X-RateLimit-Remaining: ': str(remaining - 1),
            'X-RateLimit-Limit: ': str(self.limit),
            'X-RateLimit-Reset: ': str(time.time() + expires_in)
        })

        if remaining > 0:
            self.redis.incr(key, 1)
        else:
            raise cherrypy.HTTPError(429, 'Blocked: Too many requests!')
Ejemplo n.º 22
0
class redis_session:

    prefix = 'was:session_key:' # Redis key 앞에 넣을 값

    server_ip = 'localhost' # Redis ip 

    port = 6379

    timeout = 3600

   

    def __init__(self):

        self.db = StrictRedis(self.server_ip, self.port)


    # 세션이 있으면 타임아웃 만큼 다시 연장해주고 없으면 False 있으면 사용자id 리턴

    def get_session(self, session_key):

        inst = self.db.get(self.prefix+session_key)

   

        if inst:

            self.db.expire(self.prefix+session_key, self.timeout)

        return inst

   

    # 신규 세션 요청 시 세션 값을 만들어서 리턴

    def save_session(self, user_name):

        session_key = str(uuid4())

        self.db.setex(self.prefix+session_key, user_name, self.timeout)

   

        return session_key
Ejemplo n.º 23
0
def index(lang, degree, loc):
    r = StrictRedis(host='localhost', port=6379, db=0)
    hash_sum = "h" + md5("{0}/{1}/{2}".format(lang, degree, loc)).hexdigest()
    if r.exists(hash_sum):
        return r.get(hash_sum)
    weather, weather_dict = weather_data(lang, degree, loc)
    temp = temp_now = float(weather_dict["weather_now"]["temp"])
    if degree == "F":
        temp_now = (temp_now - 32)*5.0/9
    result = dumps({
        "weather": weather,
        "links": links(lang),
        "surfaces": surfaces(lang, temp_now),
        "feedback": {"current_temp": str(int(temp)), 
                     "sensation_desc": vote_sensation_list(lang),
                     "list_surfaces": all_surface(lang)}})
    r.set(hash_sum, result)
    r.expire(hash_sum, 600)
    return result
Ejemplo n.º 24
0
class Helper(object):
    """
    Helper functions for the app. UPDATE LATER.
    """
    def __init__(self):
        self.r = StrictRedis(settings.REDIS_HOST, settings.REDIS_PORT,
                             settings.REDIS_DB)

    def get_notifications(self, user_id):
        # LATER: ADD GROUPS AND LABS
        notifications = []
        notification_set = self.r.lrange(
            'users:{}:notifications'.format(user_id), 0, -1)
        for i in notification_set:
            if self.r.get(i):
                notifications.append(pickle.loads(self.r.get(i)))
            else:
                self.r.lrem('users:{0}:notifications'.format(user_id), 0, i)
        return notifications

    def new_notification(self,
                         target,
                         notification,
                         app=None,
                         url=None,
                         severity=None,
                         expiration=30):
        # LATER: ADD GROUPS AND LABS
        notification_id = str(uuid.uuid4())
        item = Notification()
        item.created = timezone.datetime.now()
        item.payload = notification
        item.app = app
        item.url = url
        if severity not in ['success', 'warning', 'info', 'danger']:
            item.severity = None
        else:
            item.severity = severity
        self.r.set(notification_id, pickle.dumps(item))
        self.r.expire(notification_id, expiration * 24 * 3600)
        self.r.lpush('users:{0}:notifications'.format(target), notification_id)
        return item
class RedisUse(object):
    def __init__(self):
        self.sr = StrictRedis(host='localhost',
                              port=6379,
                              decode_responses=True)

    def insertTokenOpenid(self, token, openid):
        res = self.sr.set(token, openid)
        res_time = self.sr.expire(token, 7200)

        return res

    def getTokenOpenid(self, token):
        res = self.sr.get(token)

        return res

    def insertOpenidData(self, openid, data):
        res = self.sr.hmset(openid, data)
        res_time = self.sr.expire(openid, 604800)

        return res

    def selectOpenidNature(self, openid):
        res = self.sr.hkeys(openid)

        return res

    def getOpenidNature(self, openid, nature):
        res = self.sr.hget(openid, nature)

        return res

    def getOpenidNatureAll(self, openid):
        res = self.sr.hgetall(openid)

        return res

    def deleteOpenidNature(self, openid, keys):
        res = self.sr.hdel(openid, keys)

        return res
Ejemplo n.º 26
0
class RedisClient(object):
    def __init__(self, app, params):
        self._redis_client = StrictRedis(
            host=params['host'],
            port=params['port'],
            db=params['db'],
            password=params.get('password',None),
            max_connections=params.get('max_connections', 100),
            socket_timeout=params.get('socket_timeout', 1)
        )
        self._appname = app

    def _gen_cachekey(self, key):
        return '%s:%s' % (self._appname, key)

    def _compress(self, value):
        compressed_value = zlib.compress(pickle.dumps(value), zlib.Z_BEST_COMPRESSION)
        return compressed_value

    def _decompress(self, value):
        decompress_value = pickle.loads(zlib.decompress(value))
        return decompress_value

    def set(self, key, value, ex=None, px=None, nx=False, xx=False):
        nkey = self._gen_cachekey(key)
        nvalue = self._compress(value)
        return self._redis_client.set(nkey, nvalue, ex=ex, px=px, nx=nx, xx=xx)

    def get(self, key, default=None):
        nkey = self._gen_cachekey(key)
        value = self._redis_client.get(nkey)
        if value == None:
            return default
        return self._decompress(value)

    def expire(self, key, time):
        nkey = self._gen_cachekey(key)
        return self._redis_client.expire(nkey, time)

    def delete(self, key):
        nkey = self._gen_cachekey(key)
        return self._redis_client.delete(nkey)

    def hset(self, key, field, value):
        nkey = self._gen_cachekey(key)
        return self._redis_client.hset(nkey, field, value)

    def hmget(self, key, field):
        nkey = self._gen_cachekey(key)
        return self._redis_client.hmget(nkey, field)

    def incrby(self, key, value):
        nkey = self._gen_cachekey(key)
        return self._redis_client.incrby(nkey, value)
Ejemplo n.º 27
0
def login_fun():
    data = request.json

    response = make_response()
    response.content_type = 'application/json'
    status = login_sql(data['username'], data['password'])
    if status == 1:
        # 获取SECRET_KEY
        redis = StrictRedis(host='localhost',
                            port=6379,
                            db=0,
                            password='******')
        secret_key = redis.get('SECRET_KEY')
        expiration = 3600
        s = Serializer(secret_key, expires_in=expiration)  # expiration是过期时间
        token = s.dumps({'username': data['username']})
        token = str(token, 'utf-8')
        redis.set(data['username'], token)
        redis.expire(data['username'], 3600)
        post_data = {
            'info': '登录成功',
            'token': token,
            'username': data['username']
        }
        response = make_response(json.dumps(post_data))
        response.content_type = 'application/json'
        response.status_code = 200
        return response
    elif status == 0:
        post_data = {'info': '密码错误'}
        response = make_response(json.dumps(post_data))
        response.content_type = 'application/json'
        response.status_code = 401
        return response
    else:
        post_data = {"info": "此用户不存在"}
        response = make_response(json.dumps(post_data))
        response.content_type = 'application/json'
        response.status_code = 403
    return response
Ejemplo n.º 28
0
class RedisCache(AbstractCache, Loggable):
  """A cache backed by Redis

  Use as a dictionary,

  >>> cache = RedisCache(host="localhost", port=6379)
  >>> cache['hello'] = 'world'
  >>> cache['hello']            # 'world'
  >>> 'hello' in cache          # True
  >>> 'goodbye' in cache        # False

  or as a function memoizer,

  >>> @cache.memoize
  >>> def hello(name):
  ...   return "Hello, " + name

  Parameters
  ----------
  same as `redis.StrictRedis`
  """
  def __init__(self, *args, **kwargs):
    AbstractCache.__init__(self, kwargs.get('timeout', datetime.timedelta(days=1)))
    Loggable.__init__(self)

    if 'timeout' in kwargs:
      del kwargs['timeout']
    self.redis = StrictRedis(*args, **kwargs)

  def get(self, key):
    # value will be None if key is missing, but this is ambiguous
    value = self.redis.get(key)
    if not self.redis.exists(key):
      raise KeyError()
    else:
      return pickle.loads(value)

  def set(self, key, value, timeout=None):
    self.redis.set(key, pickle.dumps(value))
    self.redis.expire(key, datetime.timedelta(seconds=timeout) or self.timeout)
Ejemplo n.º 29
0
def index(lang, degree, loc):
    r = StrictRedis(host='localhost', port=6379, db=0)
    hash_sum = "h" + md5("{0}/{1}/{2}".format(lang, degree, loc)).hexdigest()
    if r.exists(hash_sum):
        return r.get(hash_sum)
    weather, weather_dict = weather_data(lang, degree, loc)
    temp = temp_now = float(weather_dict["weather_now"]["temp"])
    if degree == "F":
        temp_now = (temp_now - 32) * 5.0 / 9
    result = dumps({
        "weather": weather,
        "links": links(lang),
        "surfaces": surfaces(lang, temp_now),
        "feedback": {
            "current_temp": str(int(temp)),
            "sensation_desc": vote_sensation_list(lang),
            "list_surfaces": all_surface(lang)
        }
    })
    r.set(hash_sum, result)
    r.expire(hash_sum, 600)
    return result
Ejemplo n.º 30
0
def post_article(conn: StrictRedis, user: str, title: str, link: str) -> str:
    # 1. 获取文章id
    article_id = str(conn.incr("article:"))
    # 2. 建立该文章的投票用户集并设置一周的过期时间
    voted = f"voted:{article_id}"
    conn.sadd(voted, user)
    conn.expire(voted, ONE_WEEK_IN_SECONDS)

    now = time.time()
    article = f"article:{article_id}"
    # 3. 记录文章概要信息,包括投票数
    conn.hmset(article, {
        "title": title,
        "link": link,
        "poster": user,
        "time": now,
        "votes": 1
    })
    # 4. 初始化文章投票分数/发布时间排行榜
    conn.zadd("score:", article, now + VOTE_SCORE)
    conn.zadd("time:", now)
    return article_id
Ejemplo n.º 31
0
class FilterDownloaderMiddleware(object):
    def __init__(self, redis_uri):
        self.redis_uri = redis_uri
        self.redis_pool = ConnectionPool.from_url(self.redis_uri)
        self.redis_client = StrictRedis(connection_pool=self.redis_pool)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            redis_uri=crawler.settings.get('REDIS_URI'),
        )

    def process_request(self, request, spider):
        # self.logger.debug('ignore dropping...')
        redis_key = '%s_ershoufang_sell:link:%s' % (request.url[8:10], date.today().strftime('%Y-%m-%d'))
        if re.search(r'/ershoufang/\d{12}.html', request.url):
            if not self.redis_client.sismember(redis_key, request.url):
                self.redis_client.sadd(redis_key, request.url)
                self.redis_client.expire(redis_key, 7200)
            else:
                raise IgnoreRequest()
        return None

    def process_response(self, request, response, spider):
        redis_set = '%s_ershoufang_sell:link' % response.url[8:10]
        if re.search(r'/ershoufang/\d{12}.html', response.url):
            if response.status == 200:
                self.redis_client.sadd(redis_set, response.url)
            else:
                self.redis_client.srem(redis_set, response.url)
                print('error status link is removed => ', response.url)
                raise IgnoreRequest()
        return response

    def process_exception(self, request, exception, spider):
        # print("duplicate drop!!!!")
        return None
Ejemplo n.º 32
0
class Redis():
    def __init__(self, host, port):
        self.redis = StrictRedis(host=host, port=port, db=0, password=None)

    def addKey(self, key, value):
        self.redis.set(key, value)

    def setExpireTime(self, key, expireTime) -> bool:
        return self.redis.expire(key, expireTime)

    def deleteKey(self, key):
        self.redis.delete(key)

    def containsKey(self, key):
        return self.redis.exists(key)

    def getValue(self, key):
        return self.redis.get(key)
Ejemplo n.º 33
0
class RedisCache(BaseCache):
    def __init__(self, redis_host, redis_port, default_timeout):
        super(RedisCache, self).__init__(default_timeout=default_timeout)
        self.redis_connection = StrictRedis(host=redis_host,
                                            port=redis_port,
                                            db=1)

    def _put(self, context, params, item, timeout):
        key = RedisCache._make_key(context, params)
        self.redis_connection.set(key, item, ex=timeout)
        context_key = RedisCache._make_context_key(context)
        self.redis_connection.sadd(context_key, key)
        self.redis_connection.expire(context_key, self.timeout())

    def _get(self, context, params):
        item = None
        key = RedisCache._make_key(context, params)
        context_key = RedisCache._make_context_key(context)
        if self.redis_connection.sismember(context_key, key):
            item = self.redis_connection.get(key)
            if item is None:
                self.redis_connection.srem(context_key, key)
            else:
                self.redis_connection.expire(key, self.timeout())
                self.redis_connection.expire(context_key, self.timeout())
        return item

    def _clear(self, context):
        context_key = RedisCache._make_context_key(context)
        pipe = self.redis_connection.pipeline()
        item = self.redis_connection.spop(context_key)
        while item is not None:
            pipe.delete(item)
            item = self.redis_connection.spop(context_key)
        pipe.execute(raise_on_error=True)

    @staticmethod
    def _make_key(context, params):
        params = json.dumps(params, ensure_ascii=True, sort_keys=True)
        return '{}:{}'.format(context, params)

    @staticmethod
    def _make_context_key(context):
        return RedisCache._make_key('ctx', context)
Ejemplo n.º 34
0

# 获取随机的一个值
print(redis.randomkey())


# key 重命名
redis.rename('name','nikename')


# 获取当前数据库 key 的数量
print(redis.dbsize())


# 设置 key 的过期时间 单位为秒
redis.expire('name', 2)


# 获取 key 的过期时间. 单位为秒  -1 代表永不过期
print(redis.ttl('name'))


# 移动key 到其他的数据库
redis.move('name',2) # 2为数据库代号


# 删除当前选择数据库数据
redis.flushdb()


# 删除所有数据库数据
Ejemplo n.º 35
0
def start_competition(request,groupname):
    # prepare the game
    # now set the questions here!
    '''
    ok so the new idea is to delete the group entry
    so that the ginfo won't come to know about this 
    group, so that means every thing related to the
    group will be deleted, after storing it to the database
    so the set element in pref:groups need to be deleted
    then pref:groupname:hash needs to be deleted
    but pref:groupname:wordpks need to be added, with
    a expire timeout of say 5 seconds, though it is too much
    chuck that, how about setting the expire timeout here
    and then copy every property of hash ...
    actually only pref:groups entry has to be deleted because
    using this set, ginfo accesses the hash.
    lets set the time out here for the hash
    and also broadcast this to inform that a group was deleted
    maybe settings message.type == 'group_delete'
    '''
    print "starting the competition for "+groupname
    print 'the following keys are there'
    rd = StrictRedis()
    pref = settings.MY_PREFIX
    prefg = pref+":"+groupname
    d = rd.hgetall(prefg+":hash")
    print rd.keys(pref+'*')
    print "changing from ",d
    totwords = int(d['totwords'])
    wordpks = random.sample([x for x in range(1,31)], totwords)
    wordpks = '-'.join([str(x) for x in wordpks])
    rd.hset(prefg+":hash", 'wordpks', wordpks)
    print "to",rd.hgetall(prefg+":hash")
    redis_publisher = RedisPublisher(facility = groupname, broadcast = True)
    message = RedisMessage('#start')
    redis_publisher.publish_message(message)
    print "published the #start"
    # rd.expire(prefg+":hash", extime)         # get rid of the group details after extime seconds
    # rd.expire(pref+":"+groupname, extime)   # don't require the usernames in that group also
    '''
    don't do the expire here, do it in ganswer_post:
    whenever user requests the wordpks, it removes its username from pref:groupname. 
    so check if llen('pref:groupname) is one then remove both, hash and this set
    this way we will be sure that only delete when every user has transferred wordpks from redis to session
    '''
    rd.srem(pref+":groups", groupname)  # prevent this group from showing up in the ginfo
    # to be on the safer side, should i create a new key storing wordpks? prefg:wordpks?

    group_members = rd.smembers(prefg)
    rd.sadd(prefg+":members", *group_members)
    rd.sadd(prefg+":gmembers", *group_members)
    rd.expire(prefg+":gmembers", 500)
    # create a new key containing all the members of this group
    # in pref:groupname:members this key will be helpful in checking
    # how many users have completed the competition, on similar lines
    # pref:groupname key will be responsible in providing the condition
    # as to when the pref:groupname:hash need to be deleted!
    # also copying to gmembers so that i know whose results are to be delivered in result_group
    # but only for 5 minutes you can view your previous group results, restricted, right? i know
    # the idea is to make a generic view which returns all the information, pretty much like
    # the admin interface app but providing interactive charts rather than pure data to see
    print "copied the group_members",group_members
    redis_publisher = RedisPublisher(facility = pref, broadcast = True)
    delete_group_msg = json.dumps({'type':'group_busy', 'name':groupname})
    redis_publisher.publish_message(RedisMessage(delete_group_msg))
    sttime=datetime.now()
    usrs=rd.smembers(prefg)
    print "for",groupname,"members are",usrs
    for i in usrs:
        obj=GroupFinalResult(re_user=User.objects.get(username=i),groupname=groupname,marks=0,starttime=sttime,endtime=sttime)
        obj.save()

    print 'leaving start competition...'
    print rd.keys(pref+"*")
Ejemplo n.º 36
0
$ 0
"""

from redis import StrictRedis

# 创建redis连接
redis_client = StrictRedis(decode_responses=True)

# 设计redis悲观锁 处理秒杀超卖问题

# 先获取锁
while True:
    order_lock = redis_client.setnx('lock:order', 1)
    if order_lock:
        # 防止死锁,5秒后锁没有释放自动过期释放
        redis_client.expire('lock:order', 5)  # 给锁设置过期时间, 超出5秒, 自动删除锁

        reserve_count = redis_client.get('count:reserve')
        if int(reserve_count) > 0:
            redis_client.decr('count:reserve')
            print("生成订单")
        else:
            print("已售罄")
        # 完成处理, 移除锁
        redis_client.delete('lock:order')
        break




Ejemplo n.º 37
0
class ItemViewsTest(BaseTestCase):
    def __init__(self, *args, **kwargs):
        super(ItemViewsTest, self).__init__(*args, **kwargs)
        self.redis = StrictRedis(host=pyramid_settings['redis.host'],
                                 port=int(pyramid_settings['redis.port']),
                                 db=int(pyramid_settings['redis.db']))

    def setUp(self):
        super(ItemViewsTest, self).setUp()
        self.config.registry.redis = self.redis

    def test_items_post(self):
        """
        Test creation of new item by POSTing.
        """
        payload = {
            "name": "Macbook Air",
            "type": "TRADE",
            "quantity": "1",
            "price": "",
            "description": "Lightweight lappy.",
            "reason": "",
            "is_draft": "y",
            "uuid": str(uuid.uuid4())
        }

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        response = items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 1)

    def test_items_post_failed(self):
        """
        Test that when POSTing malformed payload, it'll raise HTTPBadRequest.
        """
        payload = {
            "name": "",
            "type": "",
            "quantity": "",
            "price": "",
            "description": "",
            "reason": "",
            "is_draft": "",
            "uuid": ""
        }

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        self.assertRaises(HTTPBadRequest, items, request)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_items_put(self):
        """
        Test updating an item.
        """
        self._create_item_status()

        payload = {
            "name": "Macbook Air",
            "type": "TRADE",
            "quantity": "1",
            "price": "",
            "description": "Lightweight lappy.",
            "reason": "",
            "is_draft": "y",
            "uuid": str(uuid.uuid4())
        }

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        # make the request
        items(request)

        # try retrieving the newly added item
        item = DBSession.query(Item).first()
        self.failUnless(item)

        payload = {
            "name": "Macbook Pro",
            "type": "SALE",
            "quantity": "5",
            "price": "200.00",
            "description": "Lightweight lappy.",
            "reason": "",
            "is_draft": "n",
            "id": item.id
        }

        request.matchdict = {'id': item.id}
        request.method = 'PUT'
        request.body = json.dumps(payload)

        # make the request again
        response = items(request)
        self.assertEqual(response.status_code, 200)

        # reload item
        item = DBSession.query(Item).filter_by(id=item.id).first()
        self.assertEqual(item.name, payload['name'])
        self.assertEqual(item.type, payload['type'])
        self.assertEqual(item.quantity, int(payload['quantity']))
        self.assertEqual(str(item.price), payload['price'])
        self.assertEqual(item.status_id, self.draft_status.id)

    def test_items_put_failed(self):
        """
        Test that updating non-existent item fails.
        """
        payload = {
            "name": "Macbook Pro",
            "type": "SALE",
            "quantity": "5",
            "price": "200.00",
            "description": "Lightweight lappy.",
            "reason": "",
            "is_draft": "n",
            "id": 1
        }

        request = Request({}, method='PUT', body=json.dumps(payload))
        request.registry = self.config.registry
        request.matchdict = {'id': 1}
        request.method = 'PUT'

        self.assertRaises(HTTPBadRequest, items, request)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_items_delete(self):
        """
        Test deleting an item.
        """
        # first create an item
        self._create_item_status()
        payload = {
            "name": "Macbook Air",
            "type": "TRADE",
            "quantity": "1",
            "price": "",
            "description": "Lightweight lappy.",
            "reason": "",
            "is_draft": "y",
            "uuid": str(uuid.uuid4())
        }

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        response = items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 1)

        # try retrieving the newly added item
        item = DBSession.query(Item).first()

        # now send a delete request
        request.method = 'DELETE'
        request.matchdict = {'id': item.id}
        request.body = None
        items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_upload_item_images_post_uuid(self):
        """
        Test posting images for an item via uuid.
        """
        self._create_item_status()
        item = Item(name='iPhone',
                    type='TRADE',
                    quantity=1,
                    description='A smart phone',
                    status=self.draft_status,
                    reason='just because')
        DBSession.add(item)
        DBSession.commit()

        item_uuid = str(uuid.uuid4())
        mock_image = MockFileImage('image1.png')

        # write to disk the dummy image so the view can resize it
        original = '%s.png' % item_uuid
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        image_path = os.path.join(static_path,
                                  os.path.join('items/images', str(item.id)),
                                  original)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # build request
        mock_image.file.seek(0)
        payload = {"uuid": item_uuid, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        # set a dummy uuid to regis
        self.redis.hset('item_uuid_to_id', item_uuid, item.id)
        self.redis.expire(item_uuid, 3600)

        response = upload_item_images(request)
        self.assertEqual(response.status_code, 200)

        # test that there are 3 images: original, small and medium
        self.assertEqual(
            DBSession.query(ItemImage).filter_by(item_id=item.id).count(), 3)

    def test_upload_item_images_post_uuid_failed(self):
        """
        Test posting images for an item via uuid with invalid image fails.
        """
        self._create_item_status()
        item = Item(name='iPhone',
                    type='TRADE',
                    quantity=1,
                    description='A smart phone',
                    status=self.draft_status,
                    reason='just because')
        DBSession.add(item)
        DBSession.commit()

        class DumbMockImage(object):
            file = StringIO('image')
            filename = 'image1.jpg'

        item_uuid = str(uuid.uuid4())
        mock_image = DumbMockImage()

        payload = {"uuid": item_uuid, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        # set a dummy uuid to regis
        self.redis.hset('item_uuid_to_id', item_uuid, item.id)
        self.redis.expire(item_uuid, 3600)

        self.assertRaises(HTTPBadRequest, upload_item_images, request)

    def test_upload_item_images_post_id(self):
        """
        Test posting images for an item via id.
        """
        self._create_item_status()
        item = Item(name='iPhone',
                    type='TRADE',
                    quantity=1,
                    description='A smart phone',
                    status=self.draft_status,
                    reason='just because')
        DBSession.add(item)
        DBSession.commit()

        uuid_filename = str(uuid.uuid4())
        mock_image = MockFileImage('image1.png')

        # write to disk the dummy image so the view can resize it
        original = '%s.png' % uuid_filename
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        image_path = os.path.join(static_path,
                                  os.path.join('items/images', str(item.id)),
                                  original)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # build request
        mock_image.file.seek(0)
        payload = {"item_id": item.id, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        response = upload_item_images(request)
        self.assertEqual(response.status_code, 200)

        # test that there are 3 images: original, small and medium
        self.assertEqual(
            DBSession.query(ItemImage).filter_by(item_id=item.id).count(), 3)

    def test_item_image_delete(self):
        """
        Test that image is deleted when DELETE request is sent.
        """
        self._create_item_status()
        item = Item(name='iPhone',
                    type='TRADE',
                    quantity=1,
                    description='A smart phone',
                    status=self.draft_status,
                    reason='just because')
        DBSession.add(item)
        DBSession.commit()

        # write to disk the dummy image
        mock_image = MockFileImage('original.jpg')
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        item_images_path = os.path.join(
            static_path, os.path.join('items/images', str(item.id)))
        image_path = os.path.join(item_images_path, mock_image.filename)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # save the image in db
        item_image = ItemImage(
            item.id, mock_image.filename,
            os.path.join('/%s' % item_images_path, mock_image.filename))
        DBSession.add(item_image)
        DBSession.commit()

        # send DELETE request
        request = Request({}, method='DELETE')
        request.matchdict = {'id': item.id}
        request.registry = self.config.registry

        # check that record was deleted
        response = item_images(None, request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(ItemImage).count(), 0)
        self.failUnless(not os.path.exists(image_path))

    def test_item_image_delete_fail(self):
        """
        Test deletion of non-existent image via DELETE request.
        """
        # send DELETE request
        request = Request({}, method='DELETE')
        request.matchdict = {'id': 1}
        request.registry = self.config.registry

        self.assertRaises(HTTPBadRequest, item_images, None, request)
Ejemplo n.º 38
0
def compute_nodes(nodes, timeout=60):
    """
    Computes values for all computational nodes as defined in the
    FunctionType model. Recursive calls are made based on information
    present in respective Dependency entries.

    'nodes' should be a dict mapping name of computational node, to:
    A floating point number as input, OR
    None, if the value is to be (possibly) computed.
    'timeout' defines the time interval for which the values of the
    nodes mentioned in 'nodes' will be valid. Default is a minute.
    """

    #First handle the boundary case
    if len(nodes) == 0:
        return None

    #Get a connection to Redis
    conn = StrictRedis()

    #This will contain all the parent nodes that we will try to compute
    #recursively based on the args currently provided.
    parents = set([])

    #Default initialization for Celery
    value = None

    #Iterate over all nodes
    for node in nodes:
        ##First obtain the value of the node
        if nodes[node] is not None:
            #Value has been provided as input
            try:
                #Ensure the given value can be parsed/converted to
                #a float.
                value = float(nodes[node])
            except:
                #If representing the value as a float fails,
                #skip this one.
                continue
        else:
            #Value has to be computed.

            #First acquire lock for the particular node.
            #This ensures that the inputs needed for computing
            #the current node don't get modified midway(unless
            #one expires, in which case the computation may or may
            #not go through).
            lock = redis_lock.RedisLock(conn, node + '_lock')
            if lock.acquire():
                try:
                    #This will indicate if all args are present for
                    #computation of the result
                    all_args_flag = True
                    #This will store all the required arguments in order
                    args = []
                    #Get the pertinent FunctionType instance
                    func_info = FunctionType.objects.get(node=node)
                    #Iterate over all arguments
                    for i in range(func_info.noofargs):
                        #Get Redis value
                        v = conn.get(node + '_' + `i`)
                        if v is None or v == 'None':
                            #If value not present stop iterations
                            all_args_flag = False
                            break
                        else:
                            args.append(float(v))
                    #If any arg was absent, abort processing of this node
                    if not all_args_flag:
                        continue
                    #Compute the value, since all args are present
                    value = Functions.mapping[func_info.functionname](
                        args)
                    #Delete info about current args
                    for i in range(func_info.noofargs):
                        conn.delete(node + '_' + `i`)
                except:
                    pass
                finally:
                    #Release lock
                    lock.release()

        ##Now that the value has been obtained, update the args info
        ##for all parent nodes
        parent_objs = Dependency.objects.filter(child=node)
        for parent_obj in parent_objs:
            #Get lock for parent
            lock = redis_lock.RedisLock(conn, parent_obj.parent + '_lock')
            if lock.acquire():
                try:
                    #Set value
                    conn.set(parent_obj.parent + '_' + `parent_obj.argno`,
                             value)
                    #Set expiry time
                    conn.expire(parent_obj.parent + '_' + `parent_obj.argno`,
                                timeout)
                except:
                    pass
                finally:
                    #Release lock
                    lock.release()
            #Add this parent to the set of parents to process
            parents.add(parent_obj.parent)

        #Save value in database as needed
        save_node_value(node, value)

    #Make the recursive call on parent nodes
    compute_nodes.delay(dict((parent, None) for parent in parents),
                        timeout)
Ejemplo n.º 39
0
class RedisConn(object):
    """docstring for RedisConn"""

    def __init__(self, startup_nodes=None, host="localhost",
                 port=6379, db=0, password=None, encoding='utf-8',
                 socket_keepalive=False, connection_pool=None,
                 max_connections=None, project="", decode_responses=True, **kwargs):
        if project:
            project = f'{project}:'
        self.cluster_flag = False
        self.project = project
        if startup_nodes:
            from rediscluster import StrictRedisCluster
            if isinstance(startup_nodes, (str, bytes)):
                startup_nodes = _normalize_startup_nodes(startup_nodes)
            self._redis = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=decode_responses,
                                             skip_full_coverage_check=True, **kwargs)
            self.cluster_flag = True
        else:
            self._redis = StrictRedis(host=host, port=port, db=db, password=password,
                                      socket_keepalive=socket_keepalive, connection_pool=connection_pool,
                                      max_connections=max_connections, **kwargs)

    def add_head(self, key):
        return f'{self.project}{key}'

    def format_key():
        def make_wrapper(func):
            def wrapper(self, key, *args, **kwargs):
                new_key = self.add_head(key)
                return func(self, new_key, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_key_keys():
        def make_wrapper(func):
            def wrapper(self, key, keys, *args, **kwargs):
                new_key = self.add_head(key)
                new_keys = list(map(self.add_head, keys))
                return func(self, new_key, new_keys, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_args():
        def make_wrapper(func):
            def wrapper(self, *args, **kwargs):
                new_args = list(map(self.add_head, list(args)))
                return func(self, *new_args, **kwargs)
            return wrapper
        return make_wrapper

    def format_two_key():
        def make_wrapper(func):
            def wrapper(self, src, dst, *args, **kwargs):
                new_src = self.add_head(src)
                new_dst = self.add_head(dst)
                return func(self, new_src, new_dst, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_keys():
        def make_wrapper(func):
            def wrapper(self, keys, *args):
                new_keys = list(map(self.add_head, keys))
                return func(self, new_keys, *args)
            return wrapper
        return make_wrapper

    def format_dicts():
        def make_wrapper(func):
            def wrapper(self, mapping, *args):
                new_mapping = {}
                for key in mapping.keys():
                    new_key = self.add_head(key)
                    new_mapping[new_key] = mapping[key]
                return func(self, new_mapping, *args)
            return wrapper
        return make_wrapper

    @format_args()
    def unlink(self, *keys):
        """
        time complexity O(1)
        redis异步删除keys
        """
        return self._redis.unlink(*keys)

    def pipeline(self, transaction=True, shard_hint=None):
        """
        返回一个pipe对象
        """
        return self._redis.pipeline(transaction, shard_hint)

    """===============================string-start=========================="""
    # }
    @format_key()
    def set(self, key, value, ex=None, px=None, nx=False, xx=False):
        """
        time complexity O(1)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (str):    key value
            ex(int):    过期时间(秒)
            px(int):    过期时间(豪秒)
            nx(bool):   如果设置为True,则只有key不存在时,当前set操作才执行(新建)
            xx(bool):   如果设置为True,则只有key存在时,当前set操作才执行 (修改)
        Returns:
            result(bool): 是否成功成功是True失败可能是None
        """
        return self._redis.set(key, value, ex, px, nx, xx)

    @format_key()
    def get(self, key):
        """
        time complexity O(1)
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
        Returns:
            value (str):返回value
        """
        return self._redis.get(key)

    @format_key()
    def getset(self, key, value):
        """
        time complexity O(1)
        设置新值并获取原来的值
        """
        return self._redis.getset(key, value)

    @format_key()
    def strlen(self, key):
        """
        time complexity O(1)
        获得key对应的value长度
        """
        return self._redis.strlen(key)

    @format_key()
    def getrange(self, key, start, end):
        """
        time complexity O(1)
        获得key对应的value的start到end长度字符返回
        """
        return self._redis.getrange(key, start, end)

    @format_key()
    def setrange(self, key, offset, value):
        """
        time complexity O(1)
        设置key对应的value从offset地方用新value替换
        """
        return self._redis.setrange(key, offset, value)

    @format_key()
    def setbit(self, key, offset, value):
        """
        time complexity O(1)
        value值只能是1或0
        设置key对应的value二进制在offset位用value替换
        """
        return self._redis.setbit(key, offset, value)

    @format_key()
    def getbit(self, key, offset):
        """
        time complexity O(1)
        获取key对应的value二进制在offset位的值
        """
        return self._redis.getbit(key, offset)

    @format_key()
    def expire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间s
        """
        return self._redis.expire(key, time)

    @format_key()
    def pexpire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间ms
        """
        return self._redis.pexpire(key, time)

    @format_key()
    def pexpireat(self, key, when):
        """
        time complexity O(1)
        设置key的过期时间(在什么时候过期)
        when是uninx的时间戳ms
        """
        return self._redis.pexpireat(key, when)

    @format_key()
    def pttl(self, key):
        """
        time complexity O(1)
        获得key过期时间(ms),没有设置过期时间返回-1
        """
        return self._redis.pttl(key)

    @format_key()
    def ttl(self, key):
        """
        time complexity O(1)
        获得name过期时间(s),没有设置过期时间返回-1
        """
        return self._redis.ttl(key)

    @format_dicts()
    def mset(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return ok
        """
        return self._redis.mset(mapping)

    @format_dicts()
    def msetnx(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return (bool): 与mset区别是指定的key中有任意一个已存在,则不进行任何操作,返回错误
        """
        return self._redis.msetnx(mapping)

    @format_keys()
    def mget(self, keys, *args):
        """
        time complexity O(n)
        Arguments:
            keys (list): [name, name1]
        Returns:
            return (list): 返回对应keys的value, name在数据库不存在返回None
        Mind!:
            一次性取多个key确实比get提高了性能,但是mget的时间复杂度O(n),
            实际使用过程中测试当key的数量到大于100之后性能会急剧下降,
            建议mget每次key数量不要超过100。在使用前根据实列的redis吞吐量可能会不一样。
        """
        return self._redis.mget(keys, *args)

    @format_key()
    def incr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incr(key, amount)

    @format_key()
    def incrbyfloat(self, key, amount=1.0):
        """
        time complexity O(1)
        amount 可以为负数代表减法
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为float的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incrbyfloat(key, amount)

    @format_key()
    def decr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自减amount,并返回自减后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.decr(key, amount)

    def keys(self, pattern='*'):
        """
        time complexity O(n)
        获取匹配pattern的所有key.实际项目中慎用
        """
        return self._redis.keys(pattern)

    @format_key()
    def move(self, key, db):
        """
        time complexity O(1)
        移动key到其他db
        """
        return self._redis.move(key, db)

    def randomkey(self):
        """
        time complexity O(1)
        随机返回一个key
        """
        return self._redis.randomkey()

    @format_args()
    def rename(self, src, dst):
        """
        time complexity O(1)
        重命名key src to dst
        """
        return self._redis.rename(src, dst)

    @format_args()
    def exists(self, *keys):
        """
        time complexity O(1)
        查看keys是否存在返回存在的key数量
        """
        return self._redis.exists(*keys)

    @format_args()
    def delete(self, *keys):
        """
        time complexity O(1)
        删除keys
        """
        return self._redis.delete(*keys)

    @format_key()
    def type(self, key):
        """
        time complexity O(1)
        查看key对应value类型
        """
        return self._redis.type(key)
# {
    """===============================string-end============================"""

    """===============================list-start============================"""
# }
    @format_keys()
    def blpop(self, keys, timeout=0):
        """
        如果keys里面有list为空要求整个服务器被阻塞以保证块执行时的原子性,
        该行为阻止了其他客户端执行 LPUSH 或 RPUSH 命令
        阻塞的一个命令,用来做轮询和会话配合使用
        Arguments:
            keys(list): [keys, keys]
            timeout(int): S
        """
        return self._redis.blpop(keys, timeout)

    @format_keys()
    def brpop(self, keys, timeout=0):
        """
        同上,取数据的方向不同
        """
        return self._redis.brpop(keys, timeout)

    @format_two_key()
    def brpoplpush(self, src, dst, timeout=0):
        """
        从src表尾取一个数据插入dst表头。同上src为空阻塞
        """
        return self._redis.brpoplpush(src, dst, timeout)

    @format_key()
    def lpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpush(key, *values)

    @format_key()
    def lpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpushx(key, *values)

    @format_key()
    def lpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key 的头元素。
        """
        return self._redis.lpop(key)

    @format_key()
    def rpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpush(key, *values)

    @format_key()
    def rpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpushx(key, *values)

    @format_key()
    def rpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key尾元素。
        """
        return self._redis.rpop(key)

    @format_key()
    def lrange(self, key, start, end):
        """
        time complexity O(n)
        获取list数据包含start,end.在不清楚list的情况下尽量不要使用lrange(key, 0, -1)操作
        应尽可能控制一次获取的元素数量
        """
        return self._redis.lrange(key, start, end)

    @format_args()
    def rpoplpush(self, src, dst):
        """
        从src表尾取一个数据插入dst表头
        """
        return self._redis.rpoplpush(src, dst)

    @format_key()
    def llen(self, key):
        """
        time complexity O(1)
        获取list长度,如果key不存在返回0,如果key不是list类型返回错误
        """
        return self._redis.llen(key)

    @format_key()
    def lindex(self, key, index):
        """
        time complexity O(n) n为经过的元素数量
        返回key对应list的index位置的value
        """
        return self._redis.lindex(key, index)

    @format_key()
    def linsert(self, key, where, refvalue, value):
        """
        time complexity O(n) n为经过的元素数量
        key或者refvalue不存在就不进行操作
        Arguments:
            where(str): BEFORE|AFTER  后|前
            refvalue(str): list里面的值
        """
        return self._redis.linsert(key, where, refvalue, value)

    @format_key()
    def lrem(self, key, count, value):
        """
        time complexity O(n)
        删除count数量的value
        Arguments:
            count(int): count>0 表头开始搜索
                        count<0 表尾开始搜索
                        count=0 删除所有与value相等的数值
        Returns:
            result(int): 删除的value的数量
        """
        if self.cluster_flag:
            return self._redis.lrem(key, value, count)
        return self._redis.lrem(key, count, value)

    @format_key()
    def lset(self, key, index, value):
        """
        time complexity O(n)
        设置list的index位置的值,没有key和超出返回错误
        """
        return self._redis.lset(key, index, value)

    @format_key()
    def ltrim(self, key, start, end):
        """
        time complexity O(n) n为被删除的元素数量
        裁剪让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除。
        """
        return self._redis.ltrim(key, start, end)

    @format_key()
    def sort(self, key, start=None, num=None, by=None, get=None,
             desc=False, alpha=False, store=None, groups=False):
        """
        time complexity O(n)
        O(N+M*log(M)), N 为要排序的列表或集合内的元素数量, M 为要返回的元素数量。
        删除count数量的value
        Arguments:
            by(str): 让排序按照外部条件排序,
                    可以先将权重插入redis然后再作为条件进行排序如(user_level_*)
            get(str): redis有一组user_name_*然后*是按照list里面的值,
                    按照排序取一个个key的value
            store(str): 保留sort之后的结果,可以设置expire过期时间作为结果缓存
            alpha: 按照字符排序
            desc: 逆序
        Returns:
            result(list): 排序之后的list
        """
        return self._redis.sort(key, start, num, by, get, desc, alpha, store, groups)

    def scan(self, cursor=0, match=None, count=None):
        """
        time complexity O(1) 单次
        增量迭代返回redis数据库里面的key,因为是增量迭代过程中返回可能会出现重复
        Arguments:
            cursor(int): 游标
            match(str): 匹配
            count(int): 每次返回的key数量
        Returns:
            result(set): 第一个是下次scan的游标,后面是返回的keys(list)当返回的游标为0代表遍历完整个redis
        """
        return self._redis.scan(cursor, match, count)
# {
    """===============================list-end===================================="""

    """===============================hash-start==================================="""
# }
    @format_key()
    def hdel(self, key, *names):
        """
        time complexity O(n) n为names长度
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
            names(list): hash里面的域
        Returns:
            result (int): 成功删除的个数
        """
        return self._redis.hdel(key, *names)

    @format_key()
    def hexists(self, key, name):
        """
        time complexity O(1)
        判断key中是否有name域
        """
        return self._redis.hexists(key, name)

    @format_key()
    def hget(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hget(key, name)

    @format_key()
    def hgetall(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hgetall(key)

    @format_key()
    def hincrby(self, key, name, amount=1):
        """
        time complexity O(1)
        amount可以为负数,且value值为整数才能使用否则返回错误
        """
        return self._redis.hincrby(key, name, amount)

    @format_key()
    def hincrbyfloat(self, key, name, amount=1.0):
        """
        time complexity O(1)
        """
        return self._redis.hincrbyfloat(key, name, amount)

    @format_key()
    def hkeys(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hkeys(key)

    @format_key()
    def hlen(self, key):
        """
        time complexity O(1)
        """
        return self._redis.hlen(key)

    @format_key()
    def hset(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hset(key, name, value)

    @format_key()
    def hsetnx(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hsetnx(key, name, value)

    @format_key()
    def hmset(self, key, mapping):
        """
        time complexity O(n)
        """
        return self._redis.hmset(key, mapping)

    @format_key()
    def hmget(self, key, names, *args):
        """
        time complexity O(n)
        """
        return self._redis.hmget(key, names, *args)

    @format_key()
    def hvals(self, key):
        """
        time complexity O(n)
        返回hash表所有的value
        """
        return self._redis.hvals(key)

    @format_key()
    def hstrlen(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hstrlen(key, name)
# {
    """=================================hash-end==================================="""

    """=================================set-start================================="""
# }
    @format_key()
    def sadd(self, key, *values):
        """
        time complexity O(n) n为values长度
        """
        return self._redis.sadd(key, *values)

    @format_key()
    def scard(self, key):
        """
        time complexity O(n) set长度
        返回set大小
        """
        return self._redis.scard(key)

    @format_args()
    def sdiff(self, key, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的列表。
        """
        return self._redis.sdiff(key, *args)

    @format_args()
    def sdiffstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sdiffstore(dest, keys, *args)

    @format_args()
    def sinter(self, key, *args):
        """
        time complexity O(N * M), N 为给定集合当中基数最小的集合, M 为给定集合的个数。
        返回交集数据的list
        """
        return self._redis.sinter(key, *args)

    @format_args()
    def sinterstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回交集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sinterstore(dest, keys, *args)

    @format_key()
    def sismember(self, key, name):
        """
        time complexity O(1)
        判断name是否在key中
        """
        return self._redis.sismember(key, name)

    @format_key()
    def smembers(self, key):
        """
        time complexity O(n)
        返回set里面所有成员
        """
        return self._redis.smembers(key)

    @format_two_key()
    def smove(self, src, dst, value):
        """
        time complexity O(1)
        将value从src移动到dst原子性操作
        """
        return self._redis.smove(src, dst, value)

    @format_key()
    def spop(self, key, count=None):
        """
        time complexity O(n) n
        默认随机删除一条, 删除count条
        """
        return self._redis.spop(key, count)

    @format_key()
    def srandmember(self, key, number=None):
        """
        time complexity O(n) n
        默认随机返回一条, 返回number条
        """
        return self._redis.srandmember(key, number)

    @format_key()
    def srem(self, key, *values):
        """
        time complexity O(n) n为values长度
        移除key里面values
        """
        return self._redis.srem(key, *values)

    @format_args()
    def sunion(self, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和
        返回并集
        """
        return self._redis.sunion(keys, *args)

    @format_args()
    def sunionstore(self, dest, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和。
        求并集并保存
        """
        return self._redis.sunionstore(dest, keys, *args)

    @format_key()
    def sscan(self, key, cursor=0, match=None, count=None):
        """
        time complexity O(1)
        同scan只是这个是set使用
        """
        return self._redis.sscan(key, cursor, match, count)
# {
    """==================================set-end=================================="""

    """===============================SortedSet-start============================="""
# }
    @format_key()
    def zadd(self, key, mapping, nx=False, xx=False, ch=False, incr=False):
        """
        time complexity O(M*log(N)), N 是有序集的基数, M 为成功添加的新成员的数量。
        Arguments:
            mapping(dict): (value:score)
            XX(bool): 仅仅更新存在的成员,不添加新成员。
            NX(bool): 不更新存在的成员。只添加新成员。
            CH(bool): 修改返回值为发生变化的成员总数,原始是返回新添加成员的总数 (CH 是 changed 的意思)。
                      更改的元素是新添加的成员,已经存在的成员更新分数。 所以在命令中指定的成员有相同的分数将不被计算在内。
                      注:在通常情况下,ZADD返回值只计算新添加成员的数量。
            INCR(bool): 当ZADD指定这个选项时,成员的操作就等同ZINCRBY命令,对成员的分数进行递增操作。
        Returns:
            result(int): 成功插入数量
        """
        if self.cluster_flag:
            return self._redis.zadd(key, **mapping)
        return self._redis.zadd(key, mapping, nx, xx, ch, incr)

    @format_key()
    def zcard(self, key):
        """
        time complexity O(1)
        返回zset()基数
        """
        return self._redis.zcard(key)

    @format_key()
    def zcount(self, key, minz, maxz):
        """
        time complexity O(log(N)), N 为有序集的基数。
        返回score在min和max之间的value的个数
        """
        return self._redis.zcount(key, minz, maxz)

    @format_key()
    def zincrby(self, key, amount, value):
        """
        time complexity O(log(N)), N 为有序集的基数。
        amount 可以为负数
        """
        if self.cluster_flag:
            return self._redis.zincrby(key, value, amount)
        return self._redis.zincrby(key, amount, value)

    @format_key_keys()
    def zinterstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N*K)+O(M*log(M)), N 为给定 key 中基数最小的有序集, K 为给定有序集的数量, M 为结果集的基数。
        求交集并按照aggregate做处理之后保存到dest。默认是求和
        Arguments:
            aggregate(str):sum 和, min 最小值, max 最大值
        返回新zset里面的value个数
        """
        return self._redis.zinterstore(dest, keys, aggregate)

    @format_key()
    def zrange(self, key, start, end, desc=False, withscores=False,
               score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            desc,排序规则,默认按照分数从小到大排序
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrange(key, start, end, desc, withscores, score_cast_func)

    @format_key()
    def zrevrange(self, key, start, end, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrevrange(key, start, end, withscores, score_cast_func)

    @format_key()
    def zrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递增(从小到大)次序排列。
        """
        return self._redis.zrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrevrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递增(从小到大)次序排列。
        """
        return self._redis.zrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrevrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrank(key, value)

    @format_key()
    def zrevrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrevrank(key, value)

    @format_key()
    def zrem(self, key, *values):
        """
        time complexity O(M*log(N)), N 为有序集的基数, M 为被成功移除的成员的数量
        删除zset里面单个或者多个成员
        """
        return self._redis.zrem(key, *values)

    @format_key()
    def zremrangebylex(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照字典增序范围删除
        """
        return self._redis.zremrangebylex(key, minz, maxz)

    @format_key()
    def zremrangebyrank(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照rank范围删除
        """
        return self._redis.zremrangebyrank(key, minz, maxz)

    @format_key()
    def zremrangebyscore(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照score范围删除
        """
        return self._redis.zremrangebyscore(key, minz, maxz)

    @format_key()
    def zscore(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的score排名从0开始
        """
        return self._redis.zscore(key, value)

    @format_key_keys()
    def zunionstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N)+O(M log(M)), N 为给定有序集基数的总和, M 为结果集的基数。
        求并集保存
        """
        return self._redis.zunionstore(dest, keys, aggregate)

    @format_key()
    def zscan(self, key, cursor=0, match=None, count=None, score_cast_func=float):
        """
        time complexity O(1)
        同SCAN
        """
        return self._redis.zscan(key, cursor, match, count, score_cast_func)

    def zlexcount(self, key, minz, maxz):
        """
        time complexity O(log(N)),其中 N 为有序集合包含的元素数量。
        min -负无限  [闭空间不包括自己 (开空间包括自己
        max +正无限 [a, (c
        """
        return self._redis.zlexcount(key, minz, maxz)
# {
    """===============================SortedSet-end================================="""
    """===============================HyperLogLog-start==============================="""
# }
    @format_key()
    def pfadd(self, key, *values):
        """
        time complexity O(n)
        """
        return self._redis.pfadd(key, *values)

    @format_args()
    def pfcount(self, *sources):
        """
        time complexity O(1)
        计算key的基数
        """
        return self._redis.pfcount(*sources)

    @format_args()
    def pfmerge(self, dest, *sources):
        """
        time complexity O(n) 其中 N 为被合并的 HyperLogLog 数量,不过这个命令的常数复杂度比较高
        合并HyperLogLog
        """
        return self._redis.pfmerge(dest, *sources)
# {
    """===============================HyperLogLog-end================================="""

    """==================================GEO-start===================================="""
# }
    @format_key()
    def geoadd(self, key, *values):
        """
        time complexity O(log(N)) 每添加一个元素的复杂度为 O(log(N)) , 其中 N 为键里面包含的位置元素数量。
        """
        return self._redis.geoadd(key, *values)

    @format_key()
    def geopos(self, key, *values):
        """
        time complexity O(log(N))
        从键里面返回所有给定位置元素的位置(经度和纬度)。
        """
        return self._redis.geopos(key, *values)

    @format_key()
    def geohash(self, key, *values):
        """
        time complexity O(log(N))
        命令返回的 geohash 的位置与用户给定的位置元素的位置一一对应
        """
        return self._redis.geohash(key, *values)

    @format_key()
    def geodist(self, key, place1, place2, unit=None):
        """
        time complexity O(log(N))
        返回两个给定位置之间的距离。
        Argument:
            unit : m: 米,km: 千米,mi: 英里,ft: 英尺
        """
        return self._redis.geodist(key, place1, place2, unit)

    @format_key()
    def georadius(self, key, longitude, latitude, radius, unit=None,
                  withdist=False, withcoord=False, withhash=False, count=None,
                  sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            longitude: 经度
            latitude: 纬度
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。
                      这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadius(key, longitude, latitude, radius, unit, withdist, withcoord,
                                     withhash, count, sort, store, store_dist)

    @format_key()
    def georadiusbymember(self, key, member, radius, unit=None,
                          withdist=False, withcoord=False, withhash=False, count=None,
                          sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            member: 位置元素
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。 这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadiusbymember(key, member, radius, unit, withdist, withcoord,
                                             withhash, count, sort, store, store_dist)

# {
    """==================================GEO-end======================================"""
class KeyValueStore(object):
    def __init__(self):
        self.data = StrictRedis(host = settings.redis.server.host, port = settings.redis.server.port)
        log.debug("Connected to REDIS(%s, %s)" % (settings.redis.server.host, settings.redis.server.port))

    def _get_value(self, key):
        return self.data.get(key)

    def _set_value(self, key, value, seconds=None):
        self.data.set(key, value)
        if seconds is not None:
            self.data.expire(key, seconds)

    def _delete_key(self, key):
        self.data.delete(key)

    def _search_keys(self, pattern):
        return self.data.keys(pattern)

    def _get_model(self, model_pf, model_id):
        value = self._get_value(model_pf+model_id)
        if value is None:
            return None
        return Struct(loads(self._get_value(model_pf+model_id)))

    def _set_model(self, model_pf, model_id, model_value, seconds=None):
        self._set_value(model_pf+model_id, dumps(model_value, default=datetime_serializer), seconds)

    def _list_model(self, model_pf):
        return [Struct(loads(self._get_value(key))) for key in self._search_keys(model_pf+'*')]

    def _get_list_models(self, list_pf, list_id):
        return [Struct(loads(value)) for value in self.data.lrange(list_pf+list_id, 0, -1)]

    def _get_list_scalars(self, list_pf, list_id):
        return [value for value in self.data.lrange(list_pf+list_id, 0, -1)]

    def _pop_list_scalars(self, list_pf, list_id):
        scalars = []
        scalar = True

        while scalar:
            scalar=self.data.lpop(list_pf+list_id)
            if scalar:
                scalars += [scalar]

        return scalars

    def _push_list(self, list_pf, list_id, model_value, trim_count):
        if not isinstance(model_value, unicode):
            model_value = dumps(model_value)

        self.data.lpush(list_pf+list_id, model_value)
        self.data.ltrim(list_pf+list_id, 0, MAX_STAT_ITEMS-1)

    # Devices

    def get_device(self, mac_address):
        return self._get_model(DEVICE_PF, mac_address.replace(":","").upper())

    def set_device(self, mac_address, device):
        self._set_model(DEVICE_PF, mac_address.replace(":","").upper(), device)

    def delete_device(self, mac_address):
        self._delete_key(DEVICE_PF+mac_address.replace(":","").upper())
        self._delete_key(ASSETLIST_PF+mac_address.replace(":","").upper())

    def list_devices(self):
        return self._list_model(DEVICE_PF)

    # Assets

    def get_asset(self, guid):
        return self._get_model(ASSET_PF, guid)

    def set_asset(self, guid, asset):
        self._set_model(ASSET_PF, guid, asset)

    def delete_asset(self, guid):
        self._delete_key(ASSET_PF+guid)

    def list_assets(self):
        return self._list_model(ASSET_PF)

    # Playlists

    def get_playlist(self, name):
        return self._get_model(PLAYLIST_PF, name)

    def set_playlist(self, name, playlist):
        self._set_model(PLAYLIST_PF, name, playlist)

    def delete_playlist(self, name):
        self._delete_key(PLAYLIST_PF+name)

    def list_playlists(self):
        return self._list_model(PLAYLIST_PF)

    # Stats

    def push_stats(self, mac_address, stats):
        self._push_list(STATS_PF, mac_address, stats, MAX_STAT_ITEMS)

    def list_stats(self, mac_address):
        return self._get_list_models(STATS_PF, mac_address)

    # Alerts

    def push_alert(self, mac_address, uri):
        self._push_list(ALERTS_PF, mac_address, uri, MAX_ALERT_ITEMS)

    def list_alerts(self, mac_address):
        return self._get_list_scalars(ALERTS_PF, mac_address)

    def pop_alerts(self, mac_address):
        return self._pop_list_scalars(ALERTS_PF, mac_address)


    # Assetlists - preprocessed Playlists

    def get_assetlist(self, mac_address):
        return self._get_model(ASSETLIST_PF, mac_address.replace(":","").upper())

    def set_assetlist(self, mac_address, assetlist):
        self._set_model(ASSETLIST_PF, mac_address.replace(":","").upper(), assetlist)

    def delete_assetlist(self, mac_address):
        self._delete_key(ASSETLIST_PF+mac_address.replace(":","").upper())

    def list_assetlists(self):
        assetlists = self._list_model(ASSETLIST_PF)
        for assetlist in assetlists:
            assetlist['timestamp'] = datetime.datetime.now().timestamp()
        return assetlists

    #MEO List with expiration

    def get_meolist(self, playlist_name, index):
        return self._get_model(MEOKANAL_PF, playlist_name.replace(":","") + "_" + str(index))

    def set_meolist(self, playlist_name, index, seconds, meolist):
        self._set_model(MEOKANAL_PF, playlist_name.replace(":","") + "_" + str(index), meolist, seconds)
Ejemplo n.º 41
0
class RedisAPICacheStore(APICacheStore):

    def __init__(self, *args, **kwargs):
        self.config = kwargs.get('config', {})
        self.ttl = self.config.get('ttl', 300)

        super(RedisAPICacheStore, self).__init__(*args, **kwargs)
        if self.config.get("use_settings", False):
            redis_settings = settings.CACHE_REDIS
        else:
            redis_settings = self.config.get('parameters')


        host = convert_variable_to_env_setting(redis_settings.get('host', "localhost"))
        port = redis_settings.get('port', 6379)
        db = redis_settings.get('db', 0)
        pw = redis_settings.get('password', None)

        timeout = redis_settings.get('timeout', .3)

        self.redis = StrictRedis(host=host,
                                 port=port,
                                 db=db,
                                 password=pw,
                                 socket_timeout=timeout)

        if self.config.get('use_settings'):
            logger.info("Configuring Face/Off API cache with REDIS using settings.py")
        else:
            logger.info("Configuring Face/Off API cache with REDIS using JSON settings")

        logger.info("Face/off API cache settings: redis://%s:%s/%s with ttl %s" %
                    (host, port, db, self.ttl))

    def retrieve(self, key):
        try:
            resp = self.redis.get(key)
            if resp is not None:
                return pickle.loads(resp)
            else:
                return None
        except ConnectionError as e:
            logger.warning("Got a timeout error trying to get from Redis API Cache", exc_info=True)
            return None

    def store(self, key, value, ttl=None):
        if ttl is None:
            ttl = self.ttl
        try:
            self.redis.set(key, pickle.dumps(value))
            if ttl > 0:
                self.redis.expire(key, ttl)
        except ConnectionError as e:
            logger.warning("Got a timeout error trying to store into Redis API Cache", exc_info=True)

    def invalidate(self, key):
        try:
            self.redis.delete(key)
        except ConnectionError as e:
            logger.warning("Got a timeout error trying to store invalidate Redis API Cache", exc_info=True)

    def flush(self):
        try:
            self.redis.flushall()
        except ConnectionError as e:
            logger.warning("Got a timeout error trying to flush Redis API Cache", exc_info=True)
Ejemplo n.º 42
0
class RedisHelper:
    prefix = "bc:chart:cache"

    def __init__(self, host=None, port=None, prefix=None):
        # 这里读取redis配置信息,用到了conf.py里面的函数
        self._host = host
        self._port = int(port)
        self._redis = StrictRedis(host=self._host, port=self._port)

    def gen_key(self, chart_id):
        return "%s:%s" % (self.prefix, chart_id)

    def put(self, chart_id, data, expire=2000):
        key = self.gen_key(chart_id)
        self._redis.set(key, dumps(data))
        self._redis.expire(key, expire)
        return True

    def delete(self, chart_id):
        key = self.gen_key(chart_id)
        self._redis.delete(key)

    def deleteN(self, chart_id):
        key = self.gen_key(chart_id)
        keys = self._redis.keys("%s*" % key)
        for k in keys:
            self._redis.delete(k)

    def get(self, chart_id):
        key = self.gen_key(chart_id)
        data = self._redis.get(key)
        return {} if not data else loads(data)

    def hset(self, key, field, value):
        self._redis.hset(key, field, value)

    def hmget(self, key, fields):
        return self._redis.hmget(key, fields)

    def flush(self):
        keys = self._redis.keys("%s*" % self.prefix)
        pipe = self._redis.pipeline()
        for key in keys:
            pipe.delete(key)
        pipe.execute()

    # the type of value is list
    def list_push(self, key, data):
        return self._redis.rpush(key, dumps(data))
    # pop the head element of the list

    def list_pop(self, key):
        return self._redis.lpop(key)

    # pop all elements of the list
    def list_all_pop(self, key):
        while True:
            if self.list_size(key) == 0:
                self._redis.delete(key)
                break
            res = self._redis.lpop(key)
            if res:
                yield loads(res)

    # the length of list
    def list_size(self, key):
        return self._redis.llen(key)

    @property
    def redis(self):
        return self._redis
Ejemplo n.º 43
0
def load_tweets(username):
    """Loads json results into redis as a cache"""
    redis = StrictRedis()
    redis_key = "%s.user.%s" % (REDIS_PREFIX, username)
    status = redis.get(redis_key)

    # Prevent DoS
    if status is not None and loads(status)['status'] != 'queued':
        return None
    try:
        created = datetime.utcnow()
        status = dumps(dict(stats='running', header="Retrieving tweets",
                            message='', code=200))
        redis.set(redis_key, status)
        redis.expire(redis_key, 2*60)
        timeline = get_full_timeline(username)
        start = timeline[-1]
        start = dict(id=start['id'],
                     timestamp=convert_timestamp(start['created_at']))
        end = timeline[0]
        end = dict(id=end['id'],
                   timestamp=convert_timestamp(end['created_at']))
        total = len(timeline)
        formatted_total = format_number(total, locale=LOCALE)
        status = dumps(dict(stats='running', header="Processing tweets",
                            message='Received %s tweets' % formatted_total,
                            code=200))
        redis.set(redis_key, status)
        redis.expire(redis_key, 10*60)
        total = dict(int=total, formatted=formatted_total)

        words = get_count(get_words_from_tweets(timeline), limit=300)
        dates = get_count(get_tweet_datetimes(timeline, date_only=True),
                          order_by=0, reverse=False)
        sum = 0
        for date in dates:
            sum += date[1]
        avg = float(sum) / float(len(dates))
        avg = dict(int=avg, formatted=format_number(avg, locale=LOCALE))

        _max = sorted(dates, key=lambda x: x[1], reverse=True)[0][1]

        _max = dict(int=_max, formatted=format_number(_max, locale=LOCALE))

        expires = datetime.utcnow() + timedelta(hours=CACHE_HOURS)

        stats = dict(avg_per_day=avg, max_per_day=_max, total=total)

        status = 'done'

        status = dict(status=status, code=200,
                      data=dict(start=start, end=end, dates=dates, words=words,
                                stats=stats, created=created,
                                expires=expires, users=[username],
                                search_terms=[]))
        status = dumps(status)
        redis.set(redis_key, status)
        redis.expire(redis_key, CACHE_HOURS*60*60)

    except TwythonAuthError:
        status = 'error'
        header = "Tweets not available"
        message = "That user's timeline is protected/private"
        status = dict(status=status, header=header, message=message,
                      code=403)
        status = dumps(status)
        redis.set(redis_key, status)
        redis.expire(redis_key, 60*5)

    except ValueError:
        status = 'error'
        header = "User not found"
        message = "The specified Twitter username does not exist"
        status = dict(status=status, header=header, message=message,
                      code=404)
        status = dumps(status)
        redis.set(redis_key, status)
        redis.expire(redis_key, 60*5)

    except TwitterRateLimitException:
        status = 'error'
        header = "Resources exhausted"
        reset = format_timedelta(get_next_reset() - datetime.utcnow(),
                                 locale=LOCALE)
        message = "TweetFreq is under a heavy load. Try again in %s." % reset
        status = dict(status=status, header=header, message=message, code=503)
        status = dumps(status)
        redis.set(redis_key, status)
        redis.expire(redis_key, 2)

    except NoTweetsException:
        status = 'error'
        header = "No tweets found"
        message = ""
        status = dict(status=status, header=header, message=message,
                      code=404)
        status = dumps(status)
        redis.set(redis_key, status)
        redis.expire(redis_key, 60*5)
Ejemplo n.º 44
0
class LibRedis:

    # 默认所有key的前缀
    key_prefix = 'RAWE_'

    # redis 连接对象
    obj_redis = None

    # 默认的过期时间为3天
    DEFAULT_EXPIRE = 259200

    def __init__(self, host='127.0.0.1', port=6379, db=0, prefix=None, charset='utf-8'):
        """
        初始化
        """
        if not host or not port:
            return None

        if prefix:
            self.key_prefix = prefix.strip()
        # construct
        self.obj_redis = StrictRedis(
            host=host, port=port, db=db, charset='utf-8')

    def key_make(self, keyname=None):
        """
        处理所有key,增加前缀
        如果实例化时没有设置,则使用默认前缀
        """
        if not keyname:
            return None

        return self.key_prefix + str(keyname).strip()

    def set_expire(self, keyname=None):
        """
        设置key的过期时间,装饰器调用
        """
        if not keyname:
            return None

        return self.obj_redis.expire(self.key_make(keyname), self.DEFAULT_EXPIRE)

    # --------------------------------------------------------
    # String
    # --------------------------------------------------------

    @wraps_set_expire
    def set(self, keyname=None, value=None):
        """
        设置指定 key 的值。
        如果 key 已经存储其他值, SET 就覆写旧值,且无视类型。
        return:
        设置操作成功完成时,才返回 OK
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        if isinstance(value, str):
            value = value.strip()

        return self.obj_redis.set(keyname, value)

    def get(self, keyname=None):
        """
        获取指定 key 的值。
        return:
        key 的值
        如果 key 不存在,返回 nil。
        如果key 储存的值不是字符串类型,返回一个错误。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.get(keyname)

        return None if not result else bytes.decode(result)

    def delete(self, keyname=None):
        """
        删除已存在的键。不存在的 key 会被忽略
        return:
        被删除 key 的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.delete(keyname)

    @wraps_set_expire
    def append(self, keyname=None, value=None):
        """
        为指定的 keyname 追加值
        如果 keyname 已经存在并且是一个字符串,
        APPEND 命令将 value 追加到 keyname 原来的值的末尾。
        如果 keyname 不存在,
        APPEND 就简单地将给定 keyname 设为 value ,就像执行 SET keyname value 一样
        return:
        追加指定值之后, keyname 中字符串的长度
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        if isinstance(value, str):
            value = value.strip()
        else:
            value = str(value)

        return self.obj_redis.append(keyname, value)

    @wraps_set_expire
    def incr(self, keyname=None, expire=None):
        """
        将 keyname 中储存的数字值增一。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCR 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        执行 INCR 命令之后 key 的值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.incr(keyname, 1)

    @wraps_set_expire
    def incrBy(self, keyname=None, amount=1):
        """
        将 keyname 中储存的数字加上指定的增量值。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCRBY 命令
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        加上指定的增量值之后, key 的值
        """
        if not keyname or not amount:
            return None

        keyname = self.key_make(keyname.strip())

        if isinstance(amount, int):
            amount = max(0, amount)
        else:
            amount = 1

        return self.obj_redis.incrby(keyname, amount)

    @wraps_set_expire
    def decr(self, keyname=None):
        """
        将 key 中储存的数字值减一。
        如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECR 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        执行命令之后 key 的值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.decr(keyname, 1)

    @wraps_set_expire
    def decrBy(self, keyname=None, amount=1):
        """
        将 keyname 所储存的值减去指定的减量值。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECRBY 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内
        """
        if not keyname or not amount:
            return None

        keyname = self.key_make(keyname.strip())
        amount = int(amount)
        return self.obj_redis.decr(keyname, amount)

    # --------------------------------------------------------
    # Hash 哈希
    # 一个string类型的field和value的映射表,hash特别适合用于存储对象
    # 每个 hash 可以存储 232 - 1 键值对(40多亿)
    # --------------------------------------------------------

    @wraps_set_expire
    def hSet(self, keyname=None, key=None, value=None):
        """
        从哈希名为keyname中添加key1->value1 将哈希表key中的域field的值设为value。-ok -ok
        如果key不存在,一个新的哈希表被创建并进行hset操作。
        如果域field已经存在于哈希表中,旧值将被覆盖。
        错误则 返回 FALSE
        如果字段是哈希表中的一个新建字段,并且值设置成功,返回 1 。 
        如果哈希表中域字段已经存在且旧值已被新值覆盖,返回 0 。
        """
        if not keyname or not key or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        key = key.strip()
        return self.obj_redis.hset(keyname, key, value)

    @wraps_set_expire
    def hGet(self, keyname=None, key=None):
        """
        获取存储在哈希表中指定字段的值
        返回给定字段的值。如果给定的字段或 key 不存在时,返回 None 
        """
        if not keyname or not key:
            return None

        keyname = self.key_make(keyname.strip())
        key = key.strip()

        result = self.obj_redis.hget(keyname, key)
        if not result:
            return None

        # bytes to str
        return bytes.decode(result)

    @wraps_set_expire
    def hLen(self, keyname=None):
        """
        获取哈希表中字段的数量
        哈希表中字段的数量。 当 keyname 不存在时,返回 0 
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hlen(keyname)

    @wraps_set_expire
    def hKeys(self, keyname=None):
        """
        获取哈希表中的所有域(field)
        包含哈希表中所有域(field)列表。 
        当 key 不存在时,返回一个空列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hkeys(keyname)
        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def hVals(self, keyname=None):
        """
        哈希表所有域(field)的值
        包含哈希表中所有域(field)值的列表。 
        当 key 不存在时,返回一个空表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hvals(keyname)
        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def hGetAll(self, keyname=None):
        """
        获取在哈希表中指定 keyname 的所有字段和值
        返回哈希表中,所有的字段和值
        在返回值里,紧跟每个字段名(field name)之后是字段的值(value),
        所以返回值的长度是哈希表大小的两倍。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hgetall(keyname)
        if not result:
            return None

        # bytes to str
        ret_dict = dict()
        for k, v in result.items():
            ret_dict[bytes.decode(k)] = bytes.decode(v)

        return ret_dict

    def hExists(self, keyname=None, key=None):
        """
        查看哈希表 keyname 中,是否存在键名为key的字段
        ashname含有给定字段key,返回 True。 
        keyname不存在 或 key 不存在,返回 False
        """
        if not keyname or key is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hexists(keyname, key)

    def hDel(self, keyname=None, *keys):
        """
        删除哈希表 key 中的一个或多个指定字段,不存在的字段将被忽略
        返回值
        被成功删除字段的数量,不包括被忽略的字段
        keyname 或 key 不存在则返回 0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hdel(keyname, *keys)

    # --------------------------------------------------------
    # List 列表, 左(Left)为头部,右(Right)为尾部
    # 一个列表最多可以包含 232 - 1 个元素 (4294967295, 每个列表超过40亿个元素)
    # --------------------------------------------------------

    @wraps_set_expire
    def lPush(self, keyname=None, *values):
        """
        将一个或多个值插入到列表头部, 返回操作后列表的长度。
        如果 key 不存在,一个空列表会被创建并执行 LPUSH 操作。 
        当 key 存在但不是列表类型时,返回一个错误。
        注意:在Redis 2.4版本以前的 LPUSH 命令,都只接受单个 value 值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lpush(keyname, *values)

    @wraps_set_expire
    def lPop(self, keyname=None):
        """
        弹出队列头部元素,移除并返回列表的第一个元素。
        返回列表的第一个元素。 当列表 key 不存在时,返回 None 
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lpop(keyname)

    @wraps_set_expire
    def rPush(self, keyname=None, *values):
        """
        将一个或多个值插入到列表的尾部(最右边), 返回操作后列表的长度。
        如果列表不存在,一个空列表会被创建并执行 RPUSH 操作。 
        当列表存在但不是列表类型时,返回一个错误。
        注意:在 Redis 2.4 版本以前的 RPUSH 命令,都只接受单个 value 值。

        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.rpush(keyname, *values)

    @wraps_set_expire
    def rPop(self, keyname=None):
        """
        移除并获取列表最后一个元素
        返回列表的最后一个元素。 当列表不存在时,返回 None
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.rpop(keyname)
        if not result:
            return None
        # bytes to str
        return bytes.decode(result)

    @wraps_set_expire
    def lLen(self, keyname=None):
        """
        获取列表长度 
        如果列表 key 不存在,则 key 被解释为一个空列表,返回 0  
        如果 key 不是列表类型,返回一个错误
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.llen(keyname)

    @wraps_set_expire
    def lTrim(self, keyname=None, start=0, end=-1):
        """
        让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
        下标 0 表示列表的第一个元素,1 表示列表的第二个元素
        -1 表示列表的最后一个元素,-2 表示列表的倒数第二个元素
        返回 True
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.ltrim(keyname, start, end)

    @wraps_set_expire
    def lGetRange(self, keyname=None, start=0, end=-1):
        """
        返回列表中指定区间内的元素,区间以偏移量 START 和 END 指定
        下标 0 表示列表的第一个元素,以 1 表示列表的第二个元素
        -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素
        返回一个列表,包含指定区间内的元素
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.lrange(keyname, start, end)
        if not result:
            return None
        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def lRemove(self, keyname=None, value=None, count=1):
        """
        根据参数 COUNT 的值,移除列表中与参数 VALUE 相等的元素。
        COUNT 的值可以是以下几种:
        count > 0 : 从表头开始向表尾搜索,移除与 VALUE 相等的元素,数量为 COUNT 。
        count < 0 : 从表尾开始向表头搜索,移除与 VALUE 相等的元素,数量为 COUNT 的绝对值。
        count = 0 : 移除表中所有与 VALUE 相等的值。
        返回被移除元素的数量。 列表或元素不存在时返回 0
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lrem(keyname, count, value)

    # --------------------------------------------------------
    # Set 无序集合
    # Set 是 String 类型的无序集合。集合成员是唯一的。
    # 集合是通过哈希表实现的,所以添加,删除,查找的复杂度都是 O(1)
    # 集合中最大的成员数为 232 - 1 (4294967295, 每个集合可存储40多亿个成员)
    # --------------------------------------------------------

    @wraps_set_expire
    def sAdd(self, keyname=None, *values):
        """
        将一个或多个成员元素加入到集合中,已经存在于集合的成员元素将被忽略。
        假如集合 key 不存在,则创建一个只包含添加的元素作成员的集合。
        当集合 key 不是集合类型时,返回一个错误。
        注意:在Redis2.4版本以前, SADD 只接受单个成员值。
        """
        if not keyname:
            return None
        keyname = self.key_make(keyname.strip())
        return self.obj_redis.sadd(keyname, *values)

    @wraps_set_expire
    def sCard(self, keyname=None):
        """
        获取集合key中元素的数量
        集合的数量。 当集合 key 不存在时,返回 0
        """
        if not keyname:
            return None
        keyname = self.key_make(keyname.strip())
        return self.obj_redis.scard(keyname)

    def sDiff(self, keyname=None, *keys):
        """
        差集
        返回所给key列表想减后的集合,相当于求差集
        不存在的集合 key 将视为空集。
        请注意顺序是前面的集合,减去后面的集合,求差集
        返回包含差集成员的列表
        """
        if not keyname:
            return None

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sdiff(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sDiffStore(self, store_key=None, key=None, *keys):
        """
        差集并存储
        给定所有集合的差集并存储在 store_key 中
        将给定集合之间的差集存储在指定的集合中。
        如果指定的集合 key 已存在,则会被覆盖
        返回store_key结果集中的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sdiffstore(store_key, key, *other_keys)

    def sInter(self, keyname=None, *keys):
        """
        交集
        返回给定所有给定集合的交集。 不存在的集合 key 被视为空集。 
        当给定集合当中有一个空集或key不存在时,结果也为空集(根据集合运算定律)。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sinter(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sInterStore(self, store_key=None, key=None, *keys):
        """
        交集并存储
        将给定集合之间的交集存储在指定的集合store_key中。
        如果指定的集合已经存在,则将其覆盖
        返回store_key存储交集的集合的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sinterstore(store_key, key, *other_keys)

    def sUnion(self, keyname=None, *keys):
        """
        并集
        所给key列表所有的值,相当于求并集
        给定集合的并集。不存在的集合 key 被视为空集。
        返回并集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sunion(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sUnionStore(self, store_key=None, key=None, *keys):
        """
        并集存储
        将给定集合的并集存储在指定的集合 store_key 中。
        如果 store_key 已经存在,则将其覆盖
        返回store_key存储并集的集合的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sunionstore(store_key, key, *other_keys)

    @wraps_set_expire
    def sIsMember(self, keyname=None, value=None):
        """
        判断成员元素是否是集合的成员
        如果成员元素是集合的成员,返回 True 
        如果成员元素不是集合的成员,或 key 不存在,返回 False
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.sismember(keyname, value)

    @wraps_set_expire
    def sMembers(self, keyname=None):
        """
        返回集合中的所有的成员。 
        不存在的集合 key 被视为空集合
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.smembers(keyname)

        if not result:
            return None
        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sRem(self, keyname=None, *values):
        """
        删除该数组中对应的值
        移除集合中的一个或多个成员元素,不存在的成员元素会被忽略。
        当 key 不是集合类型,返回一个错误。
        在 Redis 2.4 版本以前, SREM 只接受单个成员值。
        返回被成功移除的元素的数量,不包括被忽略的元素
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.srem(keyname, *values)

    @wraps_set_expire
    def sPop(self, keyname=None):
        """
        移除并返回集合中的一个随机元素
        将随机元素从集合中移除并返回
        移除的随机元素。 当集合不存在或是空集时,返回 None
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.spop(keyname)

        # bytes to str
        return None if not result else bytes.decode(result)

    @wraps_set_expire
    def sRandMember(self, keyname=None, count=1):
        """
        返回集合中的随机元素,而不对集合进行任何改动
        从 Redis 2.6 版本开始, Srandmember 命令接受可选的 count 参数:
        如果 count 为正数,且小于集合基数,
            返回一个包含 count 个元素的数组,数组中的元素各不相同。
        如果 count 大于等于集合基数,那么返回整个集合。
        如果 count 为负数,返回一个数组,
            数组中的元素可能会重复出现多次,而数组的长度为 count 的绝对值。
        返回:随机个数的元素列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        if isinstance(count, int):
            count = max(0, count)
        else:
            count = 1

        result = self.obj_redis.srandmember(keyname, count)

        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    # --------------------------------------------------------
    # Zset( sorted set ) 有序集合
    # 有序集合和集合一样也是string类型元素的集合,且不允许重复的成员
    # 有序集合的成员是唯一的,但分数(score)却可以重复
    # 集合是通过哈希表实现的,所以添加,删除,查找的复杂度都是O(1)
    # 集合中最大的成员数为 232 - 1 (4294967295, 每个集合可存储40多亿个成员)
    # --------------------------------------------------------

    @wraps_set_expire
    def zAdd(self, keyname=None, **kwargs):
        """
        将一个或多个成员元素及其分数值加入到有序集当中。
        如果某个成员已经是有序集的成员,那么更新这个成员的分数值,
        并通过重新插入这个成员元素,来保证该成员在正确的位置上。
        如果有序集合 key 不存在,则创建一个空的有序集并执行 ZADD 操作。
        当 key 存在但不是有序集类型时,返回一个错误。
        返回:
            被成功添加的新成员的数量,不包括那些被更新的、已经存在的成员。
        注意: 在 Redis 2.4 版本以前, ZADD 每次只能添加一个元素
        **kwargs: name1=score1, name2=score2
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zadd(keyname, **kwargs)

    def zRangeByScore(self, keyname=None, min=None, max=None, withscores=False):
        """
        分数值正序
        返回有序集中指定分数区间内的所有的成员。
        有序集成员按分数值递减(从大到小)的次序排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order )排列
        返回;
        指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrangebyscore(
            keyname, min, max, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRevRangeByScore(self, keyname=None, max=None, min=None, withscores=False):
        """
        分数值逆序
        返回有序集中指定分数区间内的所有的成员。
        有序集成员按分数值递减(从大到小)的次序排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order )排列。
        返回;
        指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrevrangebyscore(
            keyname, max, min, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRank(self, keyname=None, member=None):
        """
        排名正序
        返回有序集中指定成员的排名。
        其中有序集成员按分数值递增(从小到大)顺序排列
        如果成员是有序集 key 的成员,返回 member 的排名。
        如果成员不是有序集 key 的成员,返回 None 。
        """
        if not keyname or member is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrank(keyname, member)

    def zRevRank(self, keyname=None, member=None):
        """
        排名逆序
        返回有序集中指定成员的排名。
        其中有序集成员按分数值递减(从大到小)排序
        如果成员是有序集 key 的成员,返回 member 的排名。
        如果成员不是有序集 key 的成员,返回 None 。
        """
        if not keyname or member is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrevrank(keyname, member)

    def zRange(self, keyname=None, start=None, end=None, withscores=False):
        """
        位置正序
        返回有序集中,指定区间内的成员。
        其中成员的位置按分数值递增(从小到大)来排序。
        具有相同分数值的成员按字典序(lexicographical order )来排列
        返回指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrange(
            keyname, start, end, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRevrange(self, keyname=None, start=None, end=None, withscores=False):
        """
        位置逆序
        返回有序集中,指定区间内的成员。
        其中成员的位置按分数值递减(从大到小)来排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order)排列
        返回指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrevrange(
            keyname, start, end, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRem(self, keyname, *member):
        """
        移除有序集中的一个或多个成员,不存在的成员将被忽略。
        当 key 存在但不是有序集类型时,返回一个错误。
        注意: 在 Redis 2.4 版本以前, ZREM 每次只能删除一个元素。
        返回被成功移除的成员的数量,不包括被忽略的成员0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrem(keyname, *member)

    def zRemRangeByRank(self, keyname=None, min=None, max=None):
        """
        删除正序
        移除有序集中,指定排名(rank)区间内的所有成员
        返回被移除成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zremrangebyrank(keyname, min, max)

    def zRemrangebyscore(self, keyname=None, min=None, max=None):
        """
        删除正序
        移除有序集中,指定分数(score)区间内的所有成员
        返回被移除成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zremrangebyscore(keyname, min, max)

    def zCard(self, keyname=None):
        """
        计算集合中元素的数量
        当 key 存在且是有序集类型时,返回有序集的基数。
        当 key 不存在时,返回 0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zcard(keyname)

    def zCount(self, keyname=None, min=None, max=None):
        """
        计算有序集合中指定分数区间的成员数量
        返回分数值在 min 和 max 之间的成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zcount(keyname, min, max)
Ejemplo n.º 45
0
class Leaderboard(object):
    """
    Main class for leaderboards.
    """
    _1_DAY_SECONDS = 60 * 60 * 24
    _1_WEEK_SECONDS = _1_DAY_SECONDS * 7
    _1_MONTH_SECONDS = _1_DAY_SECONDS * 31

    # Constants for specifying range(s) to Leaderboard constructor
    # TODO: make expiration configurable and setup a pruner task
    RANGE_DAILY = TimeRange('d', '%Y%m%d', 3 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_WEEKLY = TimeRange('w', '%Y%W', 2 * _1_WEEK_SECONDS + 2 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_MONTHLY = TimeRange('m', '%Y%m', 2 * _1_MONTH_SECONDS + 2 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_ALLTIME = TimeRange('a', 'a', -1, _KEY_DELIMITER)
    RANGES_ALL = [RANGE_DAILY, RANGE_WEEKLY, RANGE_MONTHLY, RANGE_ALLTIME]

    def __init__(self, game, metric, ranges=RANGES_ALL, reverse=True,
                 timed_ties=False, tie_oldest_wins=True,
                 redis=None):
        """
        :param reverse: True for sorting by high to low scores
        :param timed_ties: True to use a given timestamp to resolve tie scores, assumes score values are ints
        :param tie_oldest_wins: True if the earlier time wins
        """
        self.game = game
        self.metric = metric
        self.ranges = ranges
        self.reverse = reverse
        self.timed_ties = timed_ties
        self.tie_oldest_wins = tie_oldest_wins

        if not redis:
            self.r = StrictRedis()
        else:
            self.r = redis

    def _board_key(self, range, slots_ago=0):
        """
        Board keys are of the format:
        /leaders/{game}/{metric}/{range_code}/{range_slot}
        e.g. /combat/highscore/d/20130207
        """
        if slots_ago != 0:
            d = range.date_range(slots_ago)[0]
        else:
            d = datetime.utcnow()
        return _KEY_DELIMITER.join(["leaders", self.game, self.metric,
                                    range.format(d)])

    def _hashlist(self, l):
        """
        hash from a list for creating unique temp zset keys
        """
        h = hashlib.sha1()
        for i in l:
            h.update(i)
        h.update(str(time.time()))
        return h.hexdigest()

    def _range(self, key, start, end):
        if self.reverse:
            return self.r.zrevrange(key, start, end, withscores=True, score_cast_func=float)
        else:
            return self.r.zrange(key, start, end, withscores=True, score_cast_func=float)

    def _add_ranks(self, leaders, offset=0):
        """
        Calculate ranks and update the given leader list to include them.
        Ranks start at 1.
        """
        with_ranks = [Leader(m, s, rank, t) for (m, s, t), rank in zip(leaders, itertools.count(offset + 1))]
        return with_ranks

    def _dt_to_ts(self, ts):
        """
        Ensure we are using a UNIX timestamp
        """
        if isinstance(ts, datetime):
            return (ts - datetime(1970, 1, 1)).total_seconds()
        else:
            return ts

    def _encode_value_with_time(self, value, ts):
        """
        Redis will rank members with identical scores lexigraphically. Often this is not
        what we want for a leaderboard. Using the timed_ties option, we will r the
        timestamp in the decimal part of the float score and thereby use it for tie-breaking.
        tie_oldest_wins controls whether older or newer timestamps get ranked higher.
        """
        if not ts:
            ts = time.time()
        else:
            ts = self._dt_to_ts(ts)
        if self.reverse == self.tie_oldest_wins:
            # invert the timestamp for proper ordering
            ts = 3000000000 - ts
        to_dec = 0.0000000001
        return float(value) + (ts * to_dec)

    def _decode_value_with_time(self, combo):
        from_dec = 10000000000
        value = int(combo)
        ts = (combo - value) * from_dec
        if self.reverse == self.tie_oldest_wins:
            ts = datetime.utcfromtimestamp(3000000000 - ts)
        return value, ts

    def _leaders_with_ranks(self, key, offset, end):
        total = self.r.zcard(key)
        l = self._range(key, offset, end)
        if self.timed_ties:
            l = [((m,) + self._decode_value_with_time(s)) for (m, s) in l]
        else:
            l = [(m, s, 0) for (m, s) in l]
        log.info(l)
        with_ranks = self._add_ranks(l, offset)
        return total, with_ranks

    def set_metric(self, user, value, ts=None):
        """
        Set a new peak value for this user, e.g. high score
        """
        if self.timed_ties:
            value = self._encode_value_with_time(value, ts)

        for r in self.ranges:
            key = self._board_key(r)
            self.r.zadd(key, value, user)
            if r != self.RANGE_ALLTIME:
                self.r.expire(key, r.expiration)

    def inc_metric(self, user, value, ts=None):
        """
        Increment the current value for this user, e.g. total earned
        """
        if ts:
            log.warn('inc_metric: timestamps not supported yet')

        for r in self.ranges:
            key = self._board_key(r)
            self.r.zincrby(key, user, value)
            if r != self.RANGE_ALLTIME:
                self.r.expire(key, r.expiration)

    def leaders(self, range, limit=-1, offset=0, id=None, slots_ago=0):
        """
        Retrieve a list of global leaders.

        :param range: The TimeRange to query
        :param limit: Maximum number of entries to return
        :param offset: Rank to start at, ignored if id is provided
        :param id: Member to center the range of entries around, i.e. "leaders near me"
        :param slots_ago: number of time slots prior, e.g. 1 for yesterday, last week, etc.
        """
        key = self._board_key(range, slots_ago)

        if id:
            if self.reverse:
                rank = self.r.zrevrank(key, id)
            else:
                rank = self.r.zrank(key, id)
            log.debug('uid: %r, rank: %r', id, rank)
            if rank is None:
                log.warn('specified id %r not found in board %r', id, key)
                rank = 0
            offset = max(0, rank - int(round(limit / 2.0)) + 1)
            end = rank + limit / 2 if limit > 0 else -1
        else:
            end = offset + limit - 1 if limit > 0 else -1

        total, with_ranks = self._leaders_with_ranks(key, offset, end)
        start, end = range.date_range(slots_ago)
        return Leaders(total, start, end, with_ranks)

    def leaders_friends_list(self, friends, range, limit=-1, offset=0, slots_ago=0):
        """
        retrieve a list of leaders from the given friends list
        """
        # create a temp zset of friends to intersect w/global list
        # todo: allow for caching the friend list via config
        tmpid = self._hashlist(friends)
        friends_key = 'friends_' + tmpid
        pipe = self.r.pipeline()
        for f in friends:
            pipe.zadd(friends_key, 0, f)
        pipe.execute()

        l = self.leaders_friends_key(friends_key, range, limit, offset, slots_ago)
        self.r.delete(friends_key)
        return l

    def leaders_friends_key(self, friends_key, range, limit=-1, offset=0, slots_ago=0):
        """
        Retrieve a list of leaders from the given friends list
        """
        key = self._board_key(range, slots_ago)
        inter_key = 'inter_' + friends_key + "_" + key

        self.r.zinterstore(inter_key, [key, friends_key])
        end = offset + limit if limit > 0 else -1

        total, with_ranks = self._leaders_with_ranks(inter_key, offset, end)

        self.r.delete(inter_key)
        start, end = range.date_range(slots_ago)
        return Leaders(total, start, end, with_ranks)

    def clear(self, range, slots_ago=0):
        """
        """
        key = self._board_key(range, slots_ago)
        self.r.delete(key)

    def clear_all(self):
        # TODO: track and clear all prior slots
        for range in self.ranges:
            self.clear(range)
Ejemplo n.º 46
0
class Redabas(object):
    """docstring for redabas"""

    __rdb = None
    __redis_opt = None
    __files = None
    __net = None
    __proc = None

    def __init__(self, redis_opt):
        super().__init__()
        if not self.__rdb:
            self.__rdb = StrictRedis(
                host=redis_opt["host"],
                port=redis_opt["port"],
                db=redis_opt["db"],
                decode_responses=redis_opt["decode_responses"],
            )
            self.__redis_opt = redis_opt
            LOGGER.info("created new redis connection")
        if not self.__files:
            self.__files = Files()
        if not self.__net:
            self.__net = Net()
        if not self.__proc:
            self.__proc = Cycle(redis_opt["image_timeout"], self.next_image).start()
            pass

    def redis_ping(self):
        if self.__rdb:
            try:
                LOGGER.info("redis ping")
                return self.__rdb.ping()
            except RedisConnectionError as ex:
                LOGGER.error("could not ping redis: %s" % (ex))

        return False

    def get_ropt(self, field):
        if field in self.__redis_opt.keys():
            return self.__redis_opt[field]

    def flush_all(self):
        rdbfields = list()
        for folder in self.__files.get_contentsub(full=False):
            rdbfields.append("%s:%s" % (self.__redis_opt["image_prefix"], folder))
        rdbfields.append("%s:feed" % (self.__redis_opt["status_prefix"]))

        for entry in rdbfields:
            self.__rdb.delete(entry)
            LOGGER.info("flushed data for %s" % (entry))

    #

    def get_images(self, folder="public"):
        """gets images from redis"""
        result = list()
        rdbfield = "%s:%s" % (self.__redis_opt["image_prefix"], folder)

        def __readin():
            """reloads db"""
            self.__rdb.delete(rdbfield)
            for image in self.__files.find_images(folder=folder):
                self.__rdb.rpush(rdbfield, image)
                result.append(image)
            LOGGER.info("rebuilt redis image cache for %s" % (rdbfield))
            return result

        if folder in self.__files.get_contentsub():
            result = sorted(self.__rdb.lrange(rdbfield, 0, -1))
            return result if result else __readin()

    def __dblocate_image(self, name):
        for folder in self.__files.get_contentsub(full=False):
            if name in self.get_images(folder):
                return folder

    def locate_image(self, name):
        """locates images"""
        folder = self.__dblocate_image(name)
        if folder:
            LOGGER.info("found requested image %s in folder %s" % (name, folder))
            image = self.__files.jinja_static_file(name, folder=folder)
            if image:
                return image
        LOGGER.info("requested image %s not found" % (name))
        return self.__files.jinja_nullimg()

    def get_imagestats(self):
        """counts images"""
        result = dict()
        for folder in self.__files.get_contentsub(full=False):
            result[folder] = len(self.get_images(folder=folder))
        return result

    def get_all_images(self):
        """suppenkasper needs a list of all images"""
        result = list()
        for folder in self.__files.get_contentsub(full=False):
            result += self.get_images(folder=folder)
        return result

    def get_dict_images(self, folder):
        result = dict()
        if folder in self.__files.get_contentsub(full=False):
            for image in sorted(self.get_images(folder=folder)):
                result[image] = self.locate_image(image)
            return result

    def get_sort_images(self, folder="unsorted", page=0):
        """batch of images to sort"""
        result = dict()
        if folder in self.__files.get_contentsub(full=False):
            for image in sorted(self.get_images(folder=folder))[
                page * self.__redis_opt["sort_slices"] : page * self.__redis_opt["sort_slices"]
                + self.__redis_opt["sort_slices"]
            ]:
                result[image] = self.locate_image(image)
            return result

    def random_image(self, folder="public"):
        """just one of those images"""
        if folder in self.__files.get_contentsub(full=False):
            images = self.get_images(folder)
            if images:
                result = self.locate_image(choice(images))
                return result if result else self.__files.jinja_nullimg()

    def move_image(self, name, target):
        """moves images"""
        folder = self.__dblocate_image(name)
        if folder and target in self.__files.get_contentsub(full=False):
            sourcefile = self.__files.jinja_static_file(name, folder=folder, full=True)
            targetfile = self.__files.path_join(target, name)
            if sourcefile and targetfile:
                rdbsourcefield = "%s:%s" % (self.__redis_opt["image_prefix"], folder)
                rdbtargetfield = "%s:%s" % (self.__redis_opt["image_prefix"], target)
                self.__rdb.rpush(rdbtargetfield, name)
                self.__rdb.lrem(rdbsourcefield, 0, name)
                self.__files.file_rename(sourcefile, targetfile)
                print("<- %s\n-> %s\n[ %s %s ]\n\n" % (sourcefile, targetfile, rdbsourcefield, rdbtargetfield))

    #

    def get_status(self):
        """gets status from redis"""
        result = dict()
        rdbfield = "%s:feed" % (self.__redis_opt["status_prefix"])

        def __readin():
            """reloads db"""
            self.__rdb.delete(rdbfield)
            result = self.__net.url_scrape(self.__redis_opt["status_url"])
            if result:
                self.__rdb.set(rdbfield, result)
                self.__rdb.expire(rdbfield, self.__redis_opt["status_expire"])
                LOGGER.info(
                    "rebuilt redis status cache for %s, again after %i seconds"
                    % (rdbfield, self.__redis_opt["status_expire"])
                )
                return loads(result)

        result = self.__rdb.get(rdbfield)
        return loads(result) if result else __readin()

    #

    def browser_shout(self, channel):
        pubsub = self.__rdb.pubsub()
        pubsub.subscribe(channel)
        for event in pubsub.listen():
            LOGGER.info("shouting to browser channel:%s message:%s" % (channel, event["data"]))
            if event["type"] == "message":
                strdata = "data: %s\r\n\r\n" % (event["data"].replace("\n", "<br />"))
                yield strdata.encode("UTF-8")

    def redis_shout(self, channel, message):
        LOGGER.info("shouting to redis channel:%s message:%s" % (channel, message))
        self.__rdb.publish(channel, message)
        return message

    #

    def next_image(self):
        """shout next random image from public to redis"""
        image = self.random_image(folder="public")
        if image:
            LOGGER.info("shouting next image: %s" % (image))
            self.redis_shout(self.__redis_opt["image_pubsub"], image)
            print("shouting next image: %s" % (image))
Ejemplo n.º 47
0
class ItemViewsTest(BaseTestCase):

    def __init__(self, *args, **kwargs):
        super(ItemViewsTest, self).__init__(*args, **kwargs)
        self.redis = StrictRedis(
            host=pyramid_settings['redis.host'],
            port=int(pyramid_settings['redis.port']),
            db=int(pyramid_settings['redis.db']))

    def setUp(self):
        super(ItemViewsTest, self).setUp()
        self.config.registry.redis = self.redis

    def test_items_post(self):
        """
        Test creation of new item by POSTing.
        """
        payload = {"name": "Macbook Air", "type": "TRADE", "quantity": "1",
                   "price": "", "description": "Lightweight lappy.",
                   "reason": "", "is_draft": "y", "uuid": str(uuid.uuid4())}

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        response = items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 1)

    def test_items_post_failed(self):
        """
        Test that when POSTing malformed payload, it'll raise HTTPBadRequest.
        """
        payload = {"name": "", "type": "", "quantity": "",
                   "price": "", "description": "", "reason": "",
                   "is_draft": "", "uuid": ""}

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        self.assertRaises(HTTPBadRequest, items, request)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_items_put(self):
        """
        Test updating an item.
        """
        self._create_item_status()

        payload = {"name": "Macbook Air", "type": "TRADE", "quantity": "1",
                   "price": "", "description": "Lightweight lappy.",
                   "reason": "", "is_draft": "y", "uuid": str(uuid.uuid4())}

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        # make the request
        items(request)

        # try retrieving the newly added item
        item = DBSession.query(Item).first()
        self.failUnless(item)

        payload = {"name": "Macbook Pro", "type": "SALE", "quantity": "5",
                   "price": "200.00", "description": "Lightweight lappy.",
                   "reason": "", "is_draft": "n", "id": item.id}

        request.matchdict = {'id': item.id}
        request.method = 'PUT'
        request.body = json.dumps(payload)

        # make the request again
        response = items(request)
        self.assertEqual(response.status_code, 200)

        # reload item
        item = DBSession.query(Item).filter_by(id=item.id).first()
        self.assertEqual(item.name, payload['name'])
        self.assertEqual(item.type, payload['type'])
        self.assertEqual(item.quantity, int(payload['quantity']))
        self.assertEqual(str(item.price), payload['price'])
        self.assertEqual(item.status_id, self.draft_status.id)

    def test_items_put_failed(self):
        """
        Test that updating non-existent item fails.
        """
        payload = {"name": "Macbook Pro", "type": "SALE", "quantity": "5",
                   "price": "200.00", "description": "Lightweight lappy.",
                   "reason": "", "is_draft": "n", "id": 1}

        request = Request({}, method='PUT', body=json.dumps(payload))
        request.registry = self.config.registry
        request.matchdict = {'id': 1}
        request.method = 'PUT'

        self.assertRaises(HTTPBadRequest, items, request)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_items_delete(self):
        """
        Test deleting an item.
        """
        # first create an item
        self._create_item_status()
        payload = {"name": "Macbook Air", "type": "TRADE", "quantity": "1",
                   "price": "", "description": "Lightweight lappy.",
                   "reason": "", "is_draft": "y", "uuid": str(uuid.uuid4())}

        request = Request({}, method='POST', body=json.dumps(payload))
        request.registry = self.config.registry

        response = items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 1)

        # try retrieving the newly added item
        item = DBSession.query(Item).first()

        # now send a delete request
        request.method = 'DELETE'
        request.matchdict = {'id': item.id}
        request.body = None
        items(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(Item).count(), 0)

    def test_upload_item_images_post_uuid(self):
        """
        Test posting images for an item via uuid.
        """
        self._create_item_status()
        item = Item(name='iPhone', type='TRADE', quantity=1,
            description='A smart phone', status=self.draft_status,
            reason='just because')
        DBSession.add(item)
        DBSession.commit()

        item_uuid = str(uuid.uuid4())
        mock_image = MockFileImage('image1.png')

        # write to disk the dummy image so the view can resize it
        original = '%s.png' % item_uuid
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        image_path = os.path.join(static_path,
            os.path.join('items/images', str(item.id)), original)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # build request
        mock_image.file.seek(0)
        payload = {"uuid": item_uuid, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        # set a dummy uuid to regis
        self.redis.hset('item_uuid_to_id', item_uuid, item.id)
        self.redis.expire(item_uuid, 3600)

        response = upload_item_images(request)
        self.assertEqual(response.status_code, 200)

        # test that there are 3 images: original, small and medium
        self.assertEqual(DBSession.query(ItemImage).filter_by(item_id=item.id).count(), 3)

    def test_upload_item_images_post_uuid_failed(self):
        """
        Test posting images for an item via uuid with invalid image fails.
        """
        self._create_item_status()
        item = Item(name='iPhone', type='TRADE', quantity=1,
            description='A smart phone', status=self.draft_status,
            reason='just because')
        DBSession.add(item)
        DBSession.commit()

        class DumbMockImage(object):
            file = StringIO('image')
            filename = 'image1.jpg'

        item_uuid = str(uuid.uuid4())
        mock_image = DumbMockImage()

        payload = {"uuid": item_uuid, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        # set a dummy uuid to regis
        self.redis.hset('item_uuid_to_id', item_uuid, item.id)
        self.redis.expire(item_uuid, 3600)

        self.assertRaises(HTTPBadRequest, upload_item_images, request)

    def test_upload_item_images_post_id(self):
        """
        Test posting images for an item via id.
        """
        self._create_item_status()
        item = Item(name='iPhone', type='TRADE', quantity=1,
            description='A smart phone', status=self.draft_status,
            reason='just because')
        DBSession.add(item)
        DBSession.commit()

        uuid_filename = str(uuid.uuid4())
        mock_image = MockFileImage('image1.png')

        # write to disk the dummy image so the view can resize it
        original = '%s.png' % uuid_filename
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        image_path = os.path.join(static_path,
            os.path.join('items/images', str(item.id)), original)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # build request
        mock_image.file.seek(0)
        payload = {"item_id": item.id, "image": mock_image}
        request = testing.DummyRequest(post=payload)
        request.registry = self.config.registry

        response = upload_item_images(request)
        self.assertEqual(response.status_code, 200)

        # test that there are 3 images: original, small and medium
        self.assertEqual(DBSession.query(ItemImage).filter_by(item_id=item.id).count(), 3)

    def test_item_image_delete(self):
        """
        Test that image is deleted when DELETE request is sent.
        """
        self._create_item_status()
        item = Item(name='iPhone', type='TRADE', quantity=1,
            description='A smart phone', status=self.draft_status,
            reason='just because')
        DBSession.add(item)
        DBSession.commit()

        # write to disk the dummy image
        mock_image = MockFileImage('original.jpg')
        static_path = pkgr.resource_filename('tradeorsale', 'static')
        item_images_path = os.path.join(static_path,
            os.path.join('items/images', str(item.id)))
        image_path = os.path.join(item_images_path, mock_image.filename)
        with open(image_path, 'wb') as handle:
            handle.write(mock_image.file.read())
        self.failUnless(os.path.exists(image_path))

        # save the image in db
        item_image = ItemImage(item.id, mock_image.filename,
            os.path.join('/%s' % item_images_path, mock_image.filename))
        DBSession.add(item_image)
        DBSession.commit()

        # send DELETE request
        request = Request({}, method='DELETE')
        request.matchdict = {'id': item.id}
        request.registry = self.config.registry

        # check that record was deleted
        response = item_images(None, request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(DBSession.query(ItemImage).count(), 0)
        self.failUnless(not os.path.exists(image_path))

    def test_item_image_delete_fail(self):
        """
        Test deletion of non-existent image via DELETE request.
        """
        # send DELETE request
        request = Request({}, method='DELETE')
        request.matchdict = {'id': 1}
        request.registry = self.config.registry

        self.assertRaises(HTTPBadRequest, item_images, None, request)
Ejemplo n.º 48
0
class JobsDB(object):
    prefix = { 'job' : 'multivac_job',
               'log' : 'multivac_log',
               'group' : 'multivac_group',
               'action' : 'multivac_action',
               'worker' : 'multivac_worker' }

    def __init__(self, redis_host, redis_port):
        self.redis = StrictRedis(
            host=redis_host,
            port=redis_port,
            decode_responses=True)
        self.subs = {}

        # TODO: add connection test with r.config_get('port')

    #######
    # Job Methods
    #######

    def create_job(self, action_name, args=None, initiator=None):
        """
        Create a new job with unique ID and subscribe to log channel
        params:
         - action_name(str): Name of the action this job uses
         - args(str): Optional space-delimited series of arguments to be
           appended to the job command
         - initiator(str): Optional name of the user who initiated this job
        """
        job = self.get_action(action_name)

        if not job:
            return (False, 'No such action')

        #check that user has privilege for this command
        if not self.check_user(initiator, job['allow_groups'].split(',')):
            log.debug('action denied: %s for user %s' % \
                     (action_name, initiator))
            return (False, 'Invalid user command')

        job['id'] = str(uuid4().hex)
        job['args'] = args
        job['created'] = unix_time(datetime.utcnow())

        if job['confirm_required'] == "True":
            job['status'] = 'pending'
        else:
            job['status'] = 'ready'

        self._subscribe_to_log(job['id'])

        if initiator:
            self.append_job_log(job['id'], 'Job initiated by %s' % initiator)

        self.redis.hmset(self._key('job', job['id']), job)

        return (True, job['id'])

    def cancel_job(self, job_id):
        """ Cancel and cleanup a pending job by ID """
        job = self.get_job(job_id)
        if job['status'] != 'pending':
            return (False, 'Cannot cancel job in %s state' % job['status'])

        self.cleanup_job(job_id, canceled=True)

        return (True, '')

    def update_job(self, job_id, field, value):
        """ Update an arbitrary field for a job """
        self.redis.hset(self._key('job', job_id), field, value)
        return (True,)

    def cleanup_job(self, job_id, canceled=False):
        """
        Cleanup log subscriptions for a given job id and mark completed
        params:
         - canceled(bool): If True, mark job as canceled instead of completed
        """
        logkey = self._key('log', job_id)

        # send EOF signal to streaming clients
        self.redis.publish(logkey, 'EOF')

        if job_id in self.subs:
            self.subs[job_id].unsubscribe()
            del self.subs[job_id]
            log.debug('Unsubscribed from log channel: %s' % logkey)

        if canceled:
            self.update_job(job_id, 'status', 'canceled')
        else:
            self.update_job(job_id, 'status', 'completed')

    def get_job(self, job_id):
        """
        Return single job dict given a job id
        """
        return self.redis.hgetall(self._key('job', job_id))

    def get_jobs(self, status='all'):
        """
        Return all jobs dicts, optionally filtered by status
        via the 'status' param
        """
        jobs = [self.redis.hgetall(k) for k in
                self.redis.keys(pattern=self._key('job', '*'))]
        if status != 'all':
            return [j for j in jobs if j['status'] == status]
        else:
            return [j for j in jobs]

    def get_log(self, job_id, timestamp=True):
        """
        Return stored log for a given job id if finished,
        otherwise return streaming log generator
        params:
         - timestamp(bool): prefix lines with timestamp. default True.
        """
        job = self.get_job(job_id)

        if not job:
            return (False, 'no such job id')

        if job['status'] == 'completed':
            return self.get_stored_log(job_id, timestamp=timestamp)
        else:
            return self.get_logstream(job_id, timestamp=timestamp)

    def get_logstream(self, job_id, timestamp=True):
        """
        Returns a generator object to stream all job output
        until the job has completed
        params:
         - timestamp(bool): prefix lines with timestamp. default True.
        """
        key = self._key('log', job_id)
        sub = self.subs[job_id]

        for msg in sub.listen():
            if str(msg['data']) == 'EOF':
                break
            else:
                yield self._read_jsonlog(msg['data'], append_ts=timestamp)

    def get_stored_log(self, job_id, timestamp=True):
        """
        Return the stored output of a given job id
        params:
         - timestamp(bool): prefix lines with timestamp. default True.
        """
        logs = self.redis.lrange(self._key('log', job_id), 0, -1)
        return [ self._read_jsonlog(l,append_ts=timestamp) for \
                 l in reversed(logs) ]

    def append_job_log(self, job_id, text):
        """
        Append a line of job output to a redis list and
        publish to relevant channel
        """
        key = self._key('log', job_id)

        #split up the line if carriage returns, newlines
        if len(text.splitlines()) > 1:
            for line in text.splitlines():
                self.append_job_log(job_id, line)
        else:
            if not text.isspace(): #don't keep empty lines
                logjson = self._jsonlog(text)
                self.redis.publish(key, logjson)
                self.redis.lpush(key, logjson)

    @staticmethod
    def _read_jsonlog(jsonlog, append_ts=True):
        ts,msg = json.loads(jsonlog)
        if not append_ts:
            return msg
        return '[%s] %s' % (ts, msg)

    @staticmethod
    def _jsonlog(msg):
        ts = datetime.utcnow().strftime('%a %b %d %H:%M:%S %Y')
        return json.dumps((ts, msg))

    def _subscribe_to_log(self, job_id):
        """ Subscribe this db object to a jobs log channel by ID """
        key = self._key('log', job_id)

        sub = self.redis.pubsub(ignore_subscribe_messages=True)
        sub.subscribe(key)
        self.subs[job_id] = sub

        log.debug('Subscribed to log channel: %s' % key)

    #######
    # Action Methods
    #######

    def get_action(self, action_name):
        """
        Return a single action dict, given the action name
        """
        return self.redis.hgetall(self._key('action', action_name))

    def get_actions(self):
        """
        Return all configured actions
        """
        return [self.redis.hgetall(k) for k in
                self.redis.keys(pattern=self._key('action', '*'))]

    def add_action(self, action):
        self.redis.hmset(self._key('action', action['name']), action)

    def purge_actions(self):
        [self.redis.delete(k) for k in
         self.redis.keys(pattern=self._key('action', '*'))]

    #######
    # Usergroup Methods
    #######

    def check_user(self, user, groups):
        """
        Check a list of groups to see if a user is a member to any
        params:
         - user(str): user name
         - groups(list): list of group names
        """
        if 'all' in groups:
            return True
        for group in groups:
            log.debug('checking group %s' % (group))
            if user in self.get_group(group):
                return True
        return False

    def get_group(self, group_name):
        """
        Return a list of usernames belonging to a group
        """
        return self.redis.lrange(self._key('group', group_name), 0, -1)

    def get_groups(self):
        """
        Return all configured groups
        """
        key = self._key('group', '*')
        groups = [ g.split(':')[1] for g in self.redis.keys(pattern=key) ]
        return { g:self.get_group(g) for g in groups }

    def add_group(self, group_name, members):
        key = self._key('group', group_name)
        for m in members:
            self.redis.lpush(key, m)

    def purge_groups(self):
        [ self.redis.delete(k) for k in \
          self.redis.keys(pattern=self._key('group', '*')) ]

    #######
    # Job Worker Methods 
    #######

    def register_worker(self, name, hostname):
        key = self._key('worker', name)
        worker = {'name': name, 'host': hostname}

        self.redis.hmset(key, worker)
        self.redis.expire(key, 15)

    def get_workers(self):
        return [self.redis.hgetall(k) for k in
                self.redis.keys(pattern=self._key('worker', '*'))]

    #######
    # Keyname Methods
    #######

    def _key(self, keytype, id):
        return self.prefix[keytype] + ':' + id