예제 #1
0
    def wrapper(
        analysis_id: str,
        t: int,
        redis: StrictRedis,
        kill_handler: KillHandler,
        **kwargs,
    ):
        # notify sign up as worker
        n_worker = redis.incr(idfy(N_WORKER, analysis_id, t))
        logger.info(f"Begin generation {t}. I am worker {n_worker}")
        # don't be killed during work
        kill_handler.exit = False

        # do the actual work
        ret = work_on_population(
            analysis_id=analysis_id,
            t=t,
            redis=redis,
            kill_handler=kill_handler,
            n_worker=n_worker,
            **kwargs,
        )

        # notify end work
        redis.decr(idfy(N_WORKER, analysis_id, t))
        # allow to kill again
        kill_handler.exit = True

        # return whatever the method wants to return
        return ret
예제 #2
0
def no_op(host, port, db):
    global r
    if r is None:
        r = StrictRedis(host=host, port=port, db=db)
    i = r.decr("countdown")
    if i == 0:
        r.set("finish", time.time())
예제 #3
0
파일: cli.py 프로젝트: ldxlovelbx/pyabc
def work_on_population(redis: StrictRedis, start_time: int, max_runtime_s: int,
                       kill_handler: KillHandler):
    ssa = redis.get(SSA)
    if ssa is None:
        return
    kill_handler.exit = False
    n_worker = redis.incr(N_WORKER)
    worker_logger.info("Begin population. I am worker {}".format(n_worker))
    sample, simulate, accept = pickle.loads(ssa)

    n_particles = int(redis.get(N_PARTICLES).decode())

    internal_counter = 0
    while n_particles > 0:
        if kill_handler.killed:
            worker_logger.info("Worker {} received stop signal. "
                               "Terminating in the middle of a population"
                               " after {} samples.".format(
                                   n_worker, internal_counter))
            redis.decr(N_WORKER)
            sys.exit(0)

        current_runtime = time() - start_time
        if current_runtime > max_runtime_s:
            worker_logger.info("Worker {} stops during population because "
                               "max runtime {} is exceeded {}".format(
                                   n_worker, max_runtime_s, current_runtime))
            redis.decr(N_WORKER)
            return

        particle_id = redis.incr(N_EVAL)
        internal_counter += 1

        new_param = sample()
        new_sim = simulate(new_param)
        if accept(new_sim):
            n_particles = redis.decr(N_PARTICLES)
            redis.rpush(QUEUE, cloudpickle.dumps((particle_id, new_sim)))
        else:
            n_particles = int(redis.get(N_PARTICLES).decode())

    redis.decr(N_WORKER)
    kill_handler.exit = True
    worker_logger.info(
        "Finished population, did {} samples.".format(internal_counter))
예제 #4
0
class KeyValueStore:
    """
    Interface to the Redis store used by the annotation count index.

    This provides an abstraction over the Redis store to facilitate testing etc.
    """
    def __init__(self, redis_host, redis_port):
        self.redis = StrictRedis(redis_host, redis_port, db=0)

    def inc_counter(self, key):
        return self.redis.incr(key)

    def dec_counter(self, key):
        return self.redis.decr(key)

    def sum_counters(self, keys):
        counts = [int(count) for count in self.redis.mget(keys) if count]
        return sum(counts)

    def put_dict(self, key, value, expiry=None):
        if expiry is None:
            self.redis.set(key, expiry, json.dumps(value))
        else:
            self.redis.setex(key, expiry, json.dumps(value))

    def get_dict(self, key):
        return json.loads(self.redis.get(key) or 'null')

    def get(self, key, typ=tostr):
        val = self.redis.get(key)
        if val is None:
            return None
        return typ(val)

    def put(self, key, value):
        self.redis.set(key, value)

    def delete(self, key):
        self.redis.delete(key)
예제 #5
0
# 在 string value 中指定的位置插入文本内容, offset 为偏移量
redis.setrange('name', 6, 'emmm... haha')


# 批量赋值
data = {'name1':'Durant', 'name2':'James'}
redis.mset(data)


# key 若存在 value 做加法, 如果 key 不存在则直接创建
redis.incr('age', 1)
redis.incr('age', 1)

# 与 incr 相反, 给 value 做减法, key 不存在则直接创建
redis.decr('age',1)


# 在 value 的后面追加内容
redis.append('name1', ' Hello')


# 返回 value 指定的 start index / end index 之间的内容. 只支持 string 类型
print(redis.substr('name1',2, -2))  # getrange() 也效果对于字符串  等同



#  -------  List 相关 --------

# 在指定的 key 的list 末尾增加元素. 如果 key 不存在则直接创建
redis.rpush('list', 1, 2, 3)
예제 #6
0
def work_on_population_static(
    analysis_id: str,
    t: int,
    redis: StrictRedis,
    catch: bool,
    start_time: float,
    max_runtime_s: float,
    kill_handler: KillHandler,
    n_worker: int,
):
    """Work on population in static mode.
    Here the actual sampling happens.
    """

    def get_int(var: str):
        """Convenience function to read an int variable."""
        return int(redis.get(idfy(var, ana_id, t)).decode())

    # set timers
    population_start_time = time()
    cumulative_simulation_time = 0

    # short-form
    ana_id = analysis_id

    # extract bytes
    ssa_b = redis.get(idfy(SSA, ana_id, t))

    if ssa_b is None:
        # no more work needed in the meantime
        return

    # convert from bytes
    simulate_one, sample_factory = pickle.loads(ssa_b)

    # count simulations
    internal_counter = 0

    while True:
        with redis.lock('worker'):
            # check whether there is work to be done
            n_job_b = redis.get(idfy(N_JOB, ana_id, t))
            if n_job_b is None or int(n_job_b.decode()) <= 0:
                population_total_time = time() - population_start_time
                logger.info(
                    "I'm a sad jobless worker. "
                    f"Finished generation {t}, did {internal_counter} "
                    "samples. "
                    f"Simulation time: {cumulative_simulation_time:.2f}s, "
                    f"total time {population_total_time:.2f}."
                )
                return

            # decrease job counter
            redis.decr(idfy(N_JOB, ana_id, t))

        # sample until one simulation gets accepted
        sample = sample_factory()

        while True:
            # check whether the process was externally asked to stop
            if kill_handler.killed:
                logger.info(
                    f"Worker {n_worker} received stop signal. "
                    "Terminating in the middle of a population "
                    f"after {internal_counter} samples."
                )
                # notify quit (manually here as we call exit)
                redis.decr(idfy(N_WORKER, ana_id, t))
                redis.incr(idfy(N_JOB, ana_id, t))
                sys.exit(0)

            # check whether time's up
            current_runtime = time() - start_time
            if current_runtime > max_runtime_s:
                logger.info(
                    f"Worker {n_worker} stops during population because "
                    f"runtime {current_runtime} exceeds "
                    f"max runtime {max_runtime_s}"
                )
                # return to task queue
                redis.incr(idfy(N_JOB, ana_id, t))
                return

            # check whether the analysis was terminated or replaced by a new
            #  one
            ana_id_new_b = redis.get(ANALYSIS_ID)
            if ana_id_new_b is None or str(ana_id_new_b.decode()) != ana_id:
                logger.info(
                    f"Worker {n_worker} stops during population because "
                    "the analysis seems to have been stopped."
                )
                # return to task queue
                redis.incr(idfy(N_JOB, ana_id, t))
                return

            # increase global evaluation counter
            redis.incr(idfy(N_EVAL, ana_id, t))
            # increase internal evaluation counter
            internal_counter += 1

            # timer for current simulation until batch_size acceptances
            this_sim_start = time()
            try:
                # simulate
                new_sim = simulate_one()
            except Exception as e:
                logger.warning(
                    f"Redis worker number {n_worker} failed. "
                    f"Error message is: {e}"
                )
                # increment the failure counter
                redis.incr(idfy(N_FAIL, ana_id, t), 1)
                if not catch:
                    raise e
                continue

            # update total simulation-specific time
            cumulative_simulation_time += time() - this_sim_start

            # append to current sample
            sample.append(new_sim)
            # check for acceptance
            if new_sim.accepted:
                # serialize simulation
                dump = cloudpickle.dumps(sample)
                # put on pipe
                (
                    redis.pipeline()
                    .incr(idfy(N_ACC, ana_id, t))
                    .rpush(idfy(QUEUE, ana_id, t), dump)
                    .execute()
                )

                # upon success, leave the loop and check the job queue again
                break
예제 #7
0
# 创建redis连接
redis_client = StrictRedis(decode_responses=True)

# 设计redis悲观锁 处理秒杀超卖问题

# 先获取锁
while True:
    order_lock = redis_client.setnx('lock:order', 1)
    if order_lock:
        # 防止死锁,5秒后锁没有释放自动过期释放
        redis_client.expire('lock:order', 5)  # 给锁设置过期时间, 超出5秒, 自动删除锁

        reserve_count = redis_client.get('count:reserve')
        if int(reserve_count) > 0:
            redis_client.decr('count:reserve')
            print("生成订单")
        else:
            print("已售罄")
        # 完成处理, 移除锁
        redis_client.delete('lock:order')
        break








예제 #8
0
class RedisConn(object):
    """docstring for RedisConn"""

    def __init__(self, startup_nodes=None, host="localhost",
                 port=6379, db=0, password=None, encoding='utf-8',
                 socket_keepalive=False, connection_pool=None,
                 max_connections=None, project="", decode_responses=True, **kwargs):
        if project:
            project = f'{project}:'
        self.cluster_flag = False
        self.project = project
        if startup_nodes:
            from rediscluster import StrictRedisCluster
            if isinstance(startup_nodes, (str, bytes)):
                startup_nodes = _normalize_startup_nodes(startup_nodes)
            self._redis = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=decode_responses,
                                             skip_full_coverage_check=True, **kwargs)
            self.cluster_flag = True
        else:
            self._redis = StrictRedis(host=host, port=port, db=db, password=password,
                                      socket_keepalive=socket_keepalive, connection_pool=connection_pool,
                                      max_connections=max_connections, **kwargs)

    def add_head(self, key):
        return f'{self.project}{key}'

    def format_key():
        def make_wrapper(func):
            def wrapper(self, key, *args, **kwargs):
                new_key = self.add_head(key)
                return func(self, new_key, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_key_keys():
        def make_wrapper(func):
            def wrapper(self, key, keys, *args, **kwargs):
                new_key = self.add_head(key)
                new_keys = list(map(self.add_head, keys))
                return func(self, new_key, new_keys, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_args():
        def make_wrapper(func):
            def wrapper(self, *args, **kwargs):
                new_args = list(map(self.add_head, list(args)))
                return func(self, *new_args, **kwargs)
            return wrapper
        return make_wrapper

    def format_two_key():
        def make_wrapper(func):
            def wrapper(self, src, dst, *args, **kwargs):
                new_src = self.add_head(src)
                new_dst = self.add_head(dst)
                return func(self, new_src, new_dst, *args, **kwargs)
            return wrapper
        return make_wrapper

    def format_keys():
        def make_wrapper(func):
            def wrapper(self, keys, *args):
                new_keys = list(map(self.add_head, keys))
                return func(self, new_keys, *args)
            return wrapper
        return make_wrapper

    def format_dicts():
        def make_wrapper(func):
            def wrapper(self, mapping, *args):
                new_mapping = {}
                for key in mapping.keys():
                    new_key = self.add_head(key)
                    new_mapping[new_key] = mapping[key]
                return func(self, new_mapping, *args)
            return wrapper
        return make_wrapper

    @format_args()
    def unlink(self, *keys):
        """
        time complexity O(1)
        redis异步删除keys
        """
        return self._redis.unlink(*keys)

    def pipeline(self, transaction=True, shard_hint=None):
        """
        返回一个pipe对象
        """
        return self._redis.pipeline(transaction, shard_hint)

    """===============================string-start=========================="""
    # }
    @format_key()
    def set(self, key, value, ex=None, px=None, nx=False, xx=False):
        """
        time complexity O(1)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (str):    key value
            ex(int):    过期时间(秒)
            px(int):    过期时间(豪秒)
            nx(bool):   如果设置为True,则只有key不存在时,当前set操作才执行(新建)
            xx(bool):   如果设置为True,则只有key存在时,当前set操作才执行 (修改)
        Returns:
            result(bool): 是否成功成功是True失败可能是None
        """
        return self._redis.set(key, value, ex, px, nx, xx)

    @format_key()
    def get(self, key):
        """
        time complexity O(1)
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
        Returns:
            value (str):返回value
        """
        return self._redis.get(key)

    @format_key()
    def getset(self, key, value):
        """
        time complexity O(1)
        设置新值并获取原来的值
        """
        return self._redis.getset(key, value)

    @format_key()
    def strlen(self, key):
        """
        time complexity O(1)
        获得key对应的value长度
        """
        return self._redis.strlen(key)

    @format_key()
    def getrange(self, key, start, end):
        """
        time complexity O(1)
        获得key对应的value的start到end长度字符返回
        """
        return self._redis.getrange(key, start, end)

    @format_key()
    def setrange(self, key, offset, value):
        """
        time complexity O(1)
        设置key对应的value从offset地方用新value替换
        """
        return self._redis.setrange(key, offset, value)

    @format_key()
    def setbit(self, key, offset, value):
        """
        time complexity O(1)
        value值只能是1或0
        设置key对应的value二进制在offset位用value替换
        """
        return self._redis.setbit(key, offset, value)

    @format_key()
    def getbit(self, key, offset):
        """
        time complexity O(1)
        获取key对应的value二进制在offset位的值
        """
        return self._redis.getbit(key, offset)

    @format_key()
    def expire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间s
        """
        return self._redis.expire(key, time)

    @format_key()
    def pexpire(self, key, time):
        """
        time complexity O(1)
        设置key的过期时间ms
        """
        return self._redis.pexpire(key, time)

    @format_key()
    def pexpireat(self, key, when):
        """
        time complexity O(1)
        设置key的过期时间(在什么时候过期)
        when是uninx的时间戳ms
        """
        return self._redis.pexpireat(key, when)

    @format_key()
    def pttl(self, key):
        """
        time complexity O(1)
        获得key过期时间(ms),没有设置过期时间返回-1
        """
        return self._redis.pttl(key)

    @format_key()
    def ttl(self, key):
        """
        time complexity O(1)
        获得name过期时间(s),没有设置过期时间返回-1
        """
        return self._redis.ttl(key)

    @format_dicts()
    def mset(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return ok
        """
        return self._redis.mset(mapping)

    @format_dicts()
    def msetnx(self, mapping):
        """
        time complexity O(n)
        Arguments:
            mapping (dict):   {name: value,name1: value1}
        Returns:
            return (bool): 与mset区别是指定的key中有任意一个已存在,则不进行任何操作,返回错误
        """
        return self._redis.msetnx(mapping)

    @format_keys()
    def mget(self, keys, *args):
        """
        time complexity O(n)
        Arguments:
            keys (list): [name, name1]
        Returns:
            return (list): 返回对应keys的value, name在数据库不存在返回None
        Mind!:
            一次性取多个key确实比get提高了性能,但是mget的时间复杂度O(n),
            实际使用过程中测试当key的数量到大于100之后性能会急剧下降,
            建议mget每次key数量不要超过100。在使用前根据实列的redis吞吐量可能会不一样。
        """
        return self._redis.mget(keys, *args)

    @format_key()
    def incr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incr(key, amount)

    @format_key()
    def incrbyfloat(self, key, amount=1.0):
        """
        time complexity O(1)
        amount 可以为负数代表减法
        将key对应的value值自增amount,并返回自增后的值。只对可以转换为float的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.incrbyfloat(key, amount)

    @format_key()
    def decr(self, key, amount=1):
        """
        time complexity O(1)
        将key对应的value值自减amount,并返回自减后的值。只对可以转换为整型的String数据起作用。
        用于统计sql型数据库大表里面的数据量
        """
        return self._redis.decr(key, amount)

    def keys(self, pattern='*'):
        """
        time complexity O(n)
        获取匹配pattern的所有key.实际项目中慎用
        """
        return self._redis.keys(pattern)

    @format_key()
    def move(self, key, db):
        """
        time complexity O(1)
        移动key到其他db
        """
        return self._redis.move(key, db)

    def randomkey(self):
        """
        time complexity O(1)
        随机返回一个key
        """
        return self._redis.randomkey()

    @format_args()
    def rename(self, src, dst):
        """
        time complexity O(1)
        重命名key src to dst
        """
        return self._redis.rename(src, dst)

    @format_args()
    def exists(self, *keys):
        """
        time complexity O(1)
        查看keys是否存在返回存在的key数量
        """
        return self._redis.exists(*keys)

    @format_args()
    def delete(self, *keys):
        """
        time complexity O(1)
        删除keys
        """
        return self._redis.delete(*keys)

    @format_key()
    def type(self, key):
        """
        time complexity O(1)
        查看key对应value类型
        """
        return self._redis.type(key)
# {
    """===============================string-end============================"""

    """===============================list-start============================"""
# }
    @format_keys()
    def blpop(self, keys, timeout=0):
        """
        如果keys里面有list为空要求整个服务器被阻塞以保证块执行时的原子性,
        该行为阻止了其他客户端执行 LPUSH 或 RPUSH 命令
        阻塞的一个命令,用来做轮询和会话配合使用
        Arguments:
            keys(list): [keys, keys]
            timeout(int): S
        """
        return self._redis.blpop(keys, timeout)

    @format_keys()
    def brpop(self, keys, timeout=0):
        """
        同上,取数据的方向不同
        """
        return self._redis.brpop(keys, timeout)

    @format_two_key()
    def brpoplpush(self, src, dst, timeout=0):
        """
        从src表尾取一个数据插入dst表头。同上src为空阻塞
        """
        return self._redis.brpoplpush(src, dst, timeout)

    @format_key()
    def lpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpush(key, *values)

    @format_key()
    def lpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.lpushx(key, *values)

    @format_key()
    def lpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key 的头元素。
        """
        return self._redis.lpop(key)

    @format_key()
    def rpush(self, key, *values):
        """
        time complexity O(n)
        Set the value at key ``key`` to ``value``
        Arguments:
            key (str):     key key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpush(key, *values)

    @format_key()
    def rpushx(self, key, *values):
        """
        time complexity O(n)
        only key not exists
        Arguments:
            key (str):     key
            value (list):    key value
        Returns:
            result(int): 插入成功之后list长度
        """
        return self._redis.rpushx(key, *values)

    @format_key()
    def rpop(self, key):
        """
        time complexity O(1)
        移除并返回列表 key尾元素。
        """
        return self._redis.rpop(key)

    @format_key()
    def lrange(self, key, start, end):
        """
        time complexity O(n)
        获取list数据包含start,end.在不清楚list的情况下尽量不要使用lrange(key, 0, -1)操作
        应尽可能控制一次获取的元素数量
        """
        return self._redis.lrange(key, start, end)

    @format_args()
    def rpoplpush(self, src, dst):
        """
        从src表尾取一个数据插入dst表头
        """
        return self._redis.rpoplpush(src, dst)

    @format_key()
    def llen(self, key):
        """
        time complexity O(1)
        获取list长度,如果key不存在返回0,如果key不是list类型返回错误
        """
        return self._redis.llen(key)

    @format_key()
    def lindex(self, key, index):
        """
        time complexity O(n) n为经过的元素数量
        返回key对应list的index位置的value
        """
        return self._redis.lindex(key, index)

    @format_key()
    def linsert(self, key, where, refvalue, value):
        """
        time complexity O(n) n为经过的元素数量
        key或者refvalue不存在就不进行操作
        Arguments:
            where(str): BEFORE|AFTER  后|前
            refvalue(str): list里面的值
        """
        return self._redis.linsert(key, where, refvalue, value)

    @format_key()
    def lrem(self, key, count, value):
        """
        time complexity O(n)
        删除count数量的value
        Arguments:
            count(int): count>0 表头开始搜索
                        count<0 表尾开始搜索
                        count=0 删除所有与value相等的数值
        Returns:
            result(int): 删除的value的数量
        """
        if self.cluster_flag:
            return self._redis.lrem(key, value, count)
        return self._redis.lrem(key, count, value)

    @format_key()
    def lset(self, key, index, value):
        """
        time complexity O(n)
        设置list的index位置的值,没有key和超出返回错误
        """
        return self._redis.lset(key, index, value)

    @format_key()
    def ltrim(self, key, start, end):
        """
        time complexity O(n) n为被删除的元素数量
        裁剪让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除。
        """
        return self._redis.ltrim(key, start, end)

    @format_key()
    def sort(self, key, start=None, num=None, by=None, get=None,
             desc=False, alpha=False, store=None, groups=False):
        """
        time complexity O(n)
        O(N+M*log(M)), N 为要排序的列表或集合内的元素数量, M 为要返回的元素数量。
        删除count数量的value
        Arguments:
            by(str): 让排序按照外部条件排序,
                    可以先将权重插入redis然后再作为条件进行排序如(user_level_*)
            get(str): redis有一组user_name_*然后*是按照list里面的值,
                    按照排序取一个个key的value
            store(str): 保留sort之后的结果,可以设置expire过期时间作为结果缓存
            alpha: 按照字符排序
            desc: 逆序
        Returns:
            result(list): 排序之后的list
        """
        return self._redis.sort(key, start, num, by, get, desc, alpha, store, groups)

    def scan(self, cursor=0, match=None, count=None):
        """
        time complexity O(1) 单次
        增量迭代返回redis数据库里面的key,因为是增量迭代过程中返回可能会出现重复
        Arguments:
            cursor(int): 游标
            match(str): 匹配
            count(int): 每次返回的key数量
        Returns:
            result(set): 第一个是下次scan的游标,后面是返回的keys(list)当返回的游标为0代表遍历完整个redis
        """
        return self._redis.scan(cursor, match, count)
# {
    """===============================list-end===================================="""

    """===============================hash-start==================================="""
# }
    @format_key()
    def hdel(self, key, *names):
        """
        time complexity O(n) n为names长度
        Return the value at ``key``, or None if the key doesn't exist
        Arguments:
            key (str):     key
            names(list): hash里面的域
        Returns:
            result (int): 成功删除的个数
        """
        return self._redis.hdel(key, *names)

    @format_key()
    def hexists(self, key, name):
        """
        time complexity O(1)
        判断key中是否有name域
        """
        return self._redis.hexists(key, name)

    @format_key()
    def hget(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hget(key, name)

    @format_key()
    def hgetall(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hgetall(key)

    @format_key()
    def hincrby(self, key, name, amount=1):
        """
        time complexity O(1)
        amount可以为负数,且value值为整数才能使用否则返回错误
        """
        return self._redis.hincrby(key, name, amount)

    @format_key()
    def hincrbyfloat(self, key, name, amount=1.0):
        """
        time complexity O(1)
        """
        return self._redis.hincrbyfloat(key, name, amount)

    @format_key()
    def hkeys(self, key):
        """
        time complexity O(n)
        """
        return self._redis.hkeys(key)

    @format_key()
    def hlen(self, key):
        """
        time complexity O(1)
        """
        return self._redis.hlen(key)

    @format_key()
    def hset(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hset(key, name, value)

    @format_key()
    def hsetnx(self, key, name, value):
        """
        time complexity O(1)
        """
        return self._redis.hsetnx(key, name, value)

    @format_key()
    def hmset(self, key, mapping):
        """
        time complexity O(n)
        """
        return self._redis.hmset(key, mapping)

    @format_key()
    def hmget(self, key, names, *args):
        """
        time complexity O(n)
        """
        return self._redis.hmget(key, names, *args)

    @format_key()
    def hvals(self, key):
        """
        time complexity O(n)
        返回hash表所有的value
        """
        return self._redis.hvals(key)

    @format_key()
    def hstrlen(self, key, name):
        """
        time complexity O(1)
        """
        return self._redis.hstrlen(key, name)
# {
    """=================================hash-end==================================="""

    """=================================set-start================================="""
# }
    @format_key()
    def sadd(self, key, *values):
        """
        time complexity O(n) n为values长度
        """
        return self._redis.sadd(key, *values)

    @format_key()
    def scard(self, key):
        """
        time complexity O(n) set长度
        返回set大小
        """
        return self._redis.scard(key)

    @format_args()
    def sdiff(self, key, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的列表。
        """
        return self._redis.sdiff(key, *args)

    @format_args()
    def sdiffstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回差集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sdiffstore(dest, keys, *args)

    @format_args()
    def sinter(self, key, *args):
        """
        time complexity O(N * M), N 为给定集合当中基数最小的集合, M 为给定集合的个数。
        返回交集数据的list
        """
        return self._redis.sinter(key, *args)

    @format_args()
    def sinterstore(self, dest, keys, *args):
        """
        time complexity O(n) N 是所有给定集合的成员数量之和
        返回交集成员的数量。并将结果保存到dest这个set里面
        """
        return self._redis.sinterstore(dest, keys, *args)

    @format_key()
    def sismember(self, key, name):
        """
        time complexity O(1)
        判断name是否在key中
        """
        return self._redis.sismember(key, name)

    @format_key()
    def smembers(self, key):
        """
        time complexity O(n)
        返回set里面所有成员
        """
        return self._redis.smembers(key)

    @format_two_key()
    def smove(self, src, dst, value):
        """
        time complexity O(1)
        将value从src移动到dst原子性操作
        """
        return self._redis.smove(src, dst, value)

    @format_key()
    def spop(self, key, count=None):
        """
        time complexity O(n) n
        默认随机删除一条, 删除count条
        """
        return self._redis.spop(key, count)

    @format_key()
    def srandmember(self, key, number=None):
        """
        time complexity O(n) n
        默认随机返回一条, 返回number条
        """
        return self._redis.srandmember(key, number)

    @format_key()
    def srem(self, key, *values):
        """
        time complexity O(n) n为values长度
        移除key里面values
        """
        return self._redis.srem(key, *values)

    @format_args()
    def sunion(self, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和
        返回并集
        """
        return self._redis.sunion(keys, *args)

    @format_args()
    def sunionstore(self, dest, keys, *args):
        """
        time complexity O(N), N 是所有给定集合的成员数量之和。
        求并集并保存
        """
        return self._redis.sunionstore(dest, keys, *args)

    @format_key()
    def sscan(self, key, cursor=0, match=None, count=None):
        """
        time complexity O(1)
        同scan只是这个是set使用
        """
        return self._redis.sscan(key, cursor, match, count)
# {
    """==================================set-end=================================="""

    """===============================SortedSet-start============================="""
# }
    @format_key()
    def zadd(self, key, mapping, nx=False, xx=False, ch=False, incr=False):
        """
        time complexity O(M*log(N)), N 是有序集的基数, M 为成功添加的新成员的数量。
        Arguments:
            mapping(dict): (value:score)
            XX(bool): 仅仅更新存在的成员,不添加新成员。
            NX(bool): 不更新存在的成员。只添加新成员。
            CH(bool): 修改返回值为发生变化的成员总数,原始是返回新添加成员的总数 (CH 是 changed 的意思)。
                      更改的元素是新添加的成员,已经存在的成员更新分数。 所以在命令中指定的成员有相同的分数将不被计算在内。
                      注:在通常情况下,ZADD返回值只计算新添加成员的数量。
            INCR(bool): 当ZADD指定这个选项时,成员的操作就等同ZINCRBY命令,对成员的分数进行递增操作。
        Returns:
            result(int): 成功插入数量
        """
        if self.cluster_flag:
            return self._redis.zadd(key, **mapping)
        return self._redis.zadd(key, mapping, nx, xx, ch, incr)

    @format_key()
    def zcard(self, key):
        """
        time complexity O(1)
        返回zset()基数
        """
        return self._redis.zcard(key)

    @format_key()
    def zcount(self, key, minz, maxz):
        """
        time complexity O(log(N)), N 为有序集的基数。
        返回score在min和max之间的value的个数
        """
        return self._redis.zcount(key, minz, maxz)

    @format_key()
    def zincrby(self, key, amount, value):
        """
        time complexity O(log(N)), N 为有序集的基数。
        amount 可以为负数
        """
        if self.cluster_flag:
            return self._redis.zincrby(key, value, amount)
        return self._redis.zincrby(key, amount, value)

    @format_key_keys()
    def zinterstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N*K)+O(M*log(M)), N 为给定 key 中基数最小的有序集, K 为给定有序集的数量, M 为结果集的基数。
        求交集并按照aggregate做处理之后保存到dest。默认是求和
        Arguments:
            aggregate(str):sum 和, min 最小值, max 最大值
        返回新zset里面的value个数
        """
        return self._redis.zinterstore(dest, keys, aggregate)

    @format_key()
    def zrange(self, key, start, end, desc=False, withscores=False,
               score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            desc,排序规则,默认按照分数从小到大排序
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrange(key, start, end, desc, withscores, score_cast_func)

    @format_key()
    def zrevrange(self, key, start, end, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        Arguments:
            start,有序集合索引起始位置(非分数)
            end,有序集合索引结束位置(非分数)
            withscores,是否获取元素的分数,默认只获取元素的值
            score_cast_func,对分数进行数据转换的函数
        """
        return self._redis.zrevrange(key, start, end, withscores, score_cast_func)

    @format_key()
    def zrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递增(从小到大)次序排列。
        """
        return self._redis.zrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrevrangebyscore(self, key, minz, maxz, start=None, num=None, withscores=False, score_cast_func=float):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 score 值递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebyscore(key, minz, maxz, start, num, withscores, score_cast_func)

    @format_key()
    def zrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递增(从小到大)次序排列。
        """
        return self._redis.zrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrevrangebylex(self, key, minz, maxz, start=None, num=None):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为结果集的基数。
        有序集成员按 value 字典序递减(从大到小)次序排列。
        """
        return self._redis.zrevrangebylex(key, minz, maxz, start, num)

    @format_key()
    def zrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrank(key, value)

    @format_key()
    def zrevrank(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的rank排名从0开始
        """
        return self._redis.zrevrank(key, value)

    @format_key()
    def zrem(self, key, *values):
        """
        time complexity O(M*log(N)), N 为有序集的基数, M 为被成功移除的成员的数量
        删除zset里面单个或者多个成员
        """
        return self._redis.zrem(key, *values)

    @format_key()
    def zremrangebylex(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照字典增序范围删除
        """
        return self._redis.zremrangebylex(key, minz, maxz)

    @format_key()
    def zremrangebyrank(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照rank范围删除
        """
        return self._redis.zremrangebyrank(key, minz, maxz)

    @format_key()
    def zremrangebyscore(self, key, minz, maxz):
        """
        time complexity O(log(N)+M), N 为有序集的基数,而 M 为被移除成员的数量。
        按照score范围删除
        """
        return self._redis.zremrangebyscore(key, minz, maxz)

    @format_key()
    def zscore(self, key, value):
        """
        time complexity O(log(N))
        查找zset里面这个value的score排名从0开始
        """
        return self._redis.zscore(key, value)

    @format_key_keys()
    def zunionstore(self, dest, keys, aggregate=None):
        """
        time complexity O(N)+O(M log(M)), N 为给定有序集基数的总和, M 为结果集的基数。
        求并集保存
        """
        return self._redis.zunionstore(dest, keys, aggregate)

    @format_key()
    def zscan(self, key, cursor=0, match=None, count=None, score_cast_func=float):
        """
        time complexity O(1)
        同SCAN
        """
        return self._redis.zscan(key, cursor, match, count, score_cast_func)

    def zlexcount(self, key, minz, maxz):
        """
        time complexity O(log(N)),其中 N 为有序集合包含的元素数量。
        min -负无限  [闭空间不包括自己 (开空间包括自己
        max +正无限 [a, (c
        """
        return self._redis.zlexcount(key, minz, maxz)
# {
    """===============================SortedSet-end================================="""
    """===============================HyperLogLog-start==============================="""
# }
    @format_key()
    def pfadd(self, key, *values):
        """
        time complexity O(n)
        """
        return self._redis.pfadd(key, *values)

    @format_args()
    def pfcount(self, *sources):
        """
        time complexity O(1)
        计算key的基数
        """
        return self._redis.pfcount(*sources)

    @format_args()
    def pfmerge(self, dest, *sources):
        """
        time complexity O(n) 其中 N 为被合并的 HyperLogLog 数量,不过这个命令的常数复杂度比较高
        合并HyperLogLog
        """
        return self._redis.pfmerge(dest, *sources)
# {
    """===============================HyperLogLog-end================================="""

    """==================================GEO-start===================================="""
# }
    @format_key()
    def geoadd(self, key, *values):
        """
        time complexity O(log(N)) 每添加一个元素的复杂度为 O(log(N)) , 其中 N 为键里面包含的位置元素数量。
        """
        return self._redis.geoadd(key, *values)

    @format_key()
    def geopos(self, key, *values):
        """
        time complexity O(log(N))
        从键里面返回所有给定位置元素的位置(经度和纬度)。
        """
        return self._redis.geopos(key, *values)

    @format_key()
    def geohash(self, key, *values):
        """
        time complexity O(log(N))
        命令返回的 geohash 的位置与用户给定的位置元素的位置一一对应
        """
        return self._redis.geohash(key, *values)

    @format_key()
    def geodist(self, key, place1, place2, unit=None):
        """
        time complexity O(log(N))
        返回两个给定位置之间的距离。
        Argument:
            unit : m: 米,km: 千米,mi: 英里,ft: 英尺
        """
        return self._redis.geodist(key, place1, place2, unit)

    @format_key()
    def georadius(self, key, longitude, latitude, radius, unit=None,
                  withdist=False, withcoord=False, withhash=False, count=None,
                  sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            longitude: 经度
            latitude: 纬度
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。
                      这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadius(key, longitude, latitude, radius, unit, withdist, withcoord,
                                     withhash, count, sort, store, store_dist)

    @format_key()
    def georadiusbymember(self, key, member, radius, unit=None,
                          withdist=False, withcoord=False, withhash=False, count=None,
                          sort=None, store=None, store_dist=None):
        """
        time complexity O(N+log(M)), 其中 N 为指定半径范围内的位置元素数量, 而 M 则是被返回位置元素的数量。
        以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
        Argument:
            member: 位置元素
            radius: 距离
            unit: 距离单位
            withdist: 在返回位置元素的同时, 将位置元素与中心之间的距离也一并返回。 距离的单位和用户给定的范围单位保持一致。
            withcoord: 将位置元素的经度和维度也一并返回
            withhash: 以 52 位有符号整数的形式, 返回位置元素经过原始 geohash 编码的有序集合分值。 这个选项主要用于底层应用或者调试, 实际中的作用并不大。
            sort: 根据中心的位置排序 ASC,DESC
            count: 取前多少个
            store: 保存
            store_dist: 存储地名和距离
        Return:
            list(list)
            [['Foshan', 109.4922], ['Guangzhou', 105.8065]]
        """
        return self._redis.georadiusbymember(key, member, radius, unit, withdist, withcoord,
                                             withhash, count, sort, store, store_dist)

# {
    """==================================GEO-end======================================"""
예제 #9
0
def work_on_population(redis: StrictRedis,
                       start_time: int,
                       max_runtime_s: int,
                       kill_handler: KillHandler):
    population_start_time = time()
    cumulative_simulation_time = 0

    pipeline = redis.pipeline()
    pipeline.get(SSA)
    pipeline.get(N_PARTICLES)
    pipeline.get(BATCH_SIZE)
    ssa, n_particles_bytes, batch_size_bytes = pipeline.execute()

    if ssa is None:
        return

    kill_handler.exit = False

    n_particles_bytes = redis.get(N_PARTICLES)
    if n_particles_bytes is None:
        return
    n_particles = int(n_particles_bytes.decode())
    batch_size = int(batch_size_bytes.decode())

    # load sampler options
    simulate_one, sample_factory = pickle.loads(ssa)

    n_worker = redis.incr(N_WORKER)
    worker_logger.info(f"Begin population, "
                       f"batch size {batch_size}. "
                       f"I am worker {n_worker}")
    internal_counter = 0

    # create empty sample
    sample = sample_factory()

    while n_particles > 0:
        if kill_handler.killed:
            worker_logger.info("Worker {} received stop signal. "
                               "Terminating in the middle of a population"
                               " after {} samples."
                               .format(n_worker, internal_counter))
            redis.decr(N_WORKER)
            sys.exit(0)

        current_runtime = time() - start_time
        if current_runtime > max_runtime_s:
            worker_logger.info("Worker {} stops during population because "
                               "max runtime {} is exceeded {}"
                               .format(n_worker, max_runtime_s,
                                       current_runtime))
            redis.decr(N_WORKER)
            return

        particle_max_id = redis.incr(N_EVAL, batch_size)

        this_sim_start = time()
        accepted_samples = []
        for n_batched in range(batch_size):
            new_sim = simulate_one()
            sample.append(new_sim)
            internal_counter += 1
            if new_sim.accepted:
                # the order of the IDs is reversed, but this does not
                # matter. Important is only that the IDs are specified
                # before the simulation starts
                accepted_samples.append(cloudpickle.dumps((particle_max_id -
                                                           n_batched, sample)))
                sample = sample_factory()
        cumulative_simulation_time += time() - this_sim_start

        if len(accepted_samples) > 0:
            pipeline = redis.pipeline()
            pipeline.decr(N_PARTICLES, len(accepted_samples))
            pipeline.rpush(QUEUE, *accepted_samples)
            n_particles, _ = pipeline.execute()
        else:
            n_particles = int(redis.get(N_PARTICLES).decode())

    redis.decr(N_WORKER)
    kill_handler.exit = True
    population_total_time = time() - population_start_time
    worker_logger.info(f"Finished population, did {internal_counter} samples. "
                       f"Simulation time: {cumulative_simulation_time:.2f}s, "
                       f" total time {population_total_time:.2f}.")
# coding:utf-8
# author: Articuly
# datetime: 2020/6/19 19:05
# software: PyCharm

from redis import StrictRedis

redis = StrictRedis(host='127.0.0.1')

# 字符串类型
# 设置一个键 - set(name, value, ex=None, px=None, nx=False, xx=False)
# ex - 设定过期时间,单位 秒
# px - 设定过期时间,单位 微秒
# nx - 设为True,name不存在时才能set
# xx - 设为True, name存在时才能set
redis.set('username', 'articuly')
redis.set('website', 'articuly.com', ex=10)
# 获取一个键 - get(name)
# 返回类型为bytes类型
username = redis.get('username')
website = redis.get('website').decode()
# 数值加减
# incr, decr
redis.set('number', 10)
redis.incr('number', 100)
print(redis.get('number'))
redis.decr('number', 50)
print(redis.get('number'))
예제 #11
0
# 3.字符串操作
print()
print(redis.set('emotion', 'smile'))
redis.set('name', 'Leo')
redis.set('age', '19')
print(redis.get('emotion'))
print(redis.getset('emotion', 'humour'))
print(redis.mget(['emotion', 'name', 'age']))
print(redis.setnx('newname', 'James'))  # 不存在键才更新
print(redis.setex('country', 1, 'china'))
redis.setrange('name', 3, '2019')
print(redis.get('name'))
print(redis.mset({'name1': 'Modric', 'name2': 'Van Dik'}))
print(redis.msetnx({'name3': 'Salah', 'name4': 'Mane'}))  #键均不存在才批量更新
print(redis.incr('age', 1))
print(redis.decr('age', 1))
print(redis.append('name', 'ooo'))
print(redis.substr('name', 1, 4))
print(redis.getrange('name', 0, 3))

# 4.列表操作
print(redis.rpush('list', 1, 2, 3, 4, 5))
print(redis.lpush('list', 0))
print(redis.llen('list'))
print(redis.lrange('list', 1, 3))
print(redis.ltrim('list', 1, 3))
print(redis.lindex('list', 1))
print(redis.lset('list', 1, 666))
print(redis.lrem('list', 1, 1))
print(redis.lpop('list'))
print(redis.rpop('list'))
예제 #12
0
# Exibir chaves existentes.
print(f'\nChaves existentes: {con.keys()}')

# Inserindo uma chave e valor.
print('\nCriando uma nova chave:')
con.set('chave1', 'valor1')
print(f'Chaves existentes: {con.keys()}')

# Acessando valor de uma chave.
print('Valor da chave:', con.get('chave1'))

# Criando outra chave.
print('\nCriando outra chave:')
con.set('chave2', 1)
print(f'Chaves existentes: {con.keys()}')
print('Valor da chave2:', con.get('chave2'))

# Incrementando valor da chave.
print('incrementando a chave2:', con.incr('chave2'))
print('incrementando a chave2:', con.incr('chave2'))
print('decrementando a chave2:', con.decr('chave2'))

# Removendo uma chave.
print('\nRemovendo a chave2:')
con.delete('chave2')

# Caso a chave não exista é retornado None.
print('Tentando exibir a chave que foi removida')
print('Valor da chave:', con.get('chave2'))
print(f'Chaves existentes: {con.keys()}')
예제 #13
0
파일: cache.py 프로젝트: jmckinlay/WALKOFF
class RedisCacheAdapter(object):
    _requires = ['redis']

    def __init__(self, **opts):
        from redis import StrictRedis
        self.cache = StrictRedis(**opts)

    def set(self, key, value, expire=None, **opts):
        """Set a value for a key in the cache

        Args:
            key: The key to use for this data
            value: The value to set this key to
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was this key set?
        """
        return self.cache.set(key, value, px=expire, **opts)

    def get(self, key, **opts):
        """Gets the value stored in the key

        Args:
            key: The key to get the value from
            **opts: Additional options to use. See `FanoutCache` for more details.

        Returns:
            The value stored in the key
        """
        return self._decode_response(self.cache.get(key))

    def add(self, key, value, expire=None, **opts):
        """Add a key and a value to the cache if the key is not already in the cache

        Args:
            key: The key to store the value to
            value: Teh value to store in the key
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was the key set?
        """
        return self.cache.set(key, value, px=expire, nx=True, **opts)

    def incr(self, key, amount=1):
        """Increments a key by an amount.

        If the key is not found, then its value becomes the increment amount specified

        Args:
            key: The key to increment
            amount (int, optional): The amount to increment the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The incremented value
        """
        return int(self.cache.incr(key, amount))

    def decr(self, key, amount=1):
        """Decrements a key by an amount.

        If the key is not found, then its value becomes the decrement amount specified

        Args:
            key: The key to decrement
            amount (int, optional): The amount to decrement the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The decremented value
        """
        return int(self.cache.decr(key, amount))

    def rpush(self, key, *values):
        """Pushes a value to the right of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.rpush(key, *values)

    def rpop(self, key):
        """Pops a value from the right of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The rightmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.rpop(key))

    def lpush(self, key, *values):
        """Pushes a value to the left of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.lpush(key, *values)

    def lpop(self, key):
        """Pops a value from the left of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The leftmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.lpop(key))

    @staticmethod
    def _decode_response(response):
        if response is None:
            return response
        try:
            return response.decode('utf-8')
        except UnicodeDecodeError:
            return response

    def subscribe(self, channel):
        """Subscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (RedisSubscription): The subscription for this channel
        """
        subscription = self.cache.pubsub()
        subscription.subscribe(channel)
        subscription.get_message()
        return RedisSubscription(channel, subscription)

    def unsubscribe(self, channel):
        """Unsubscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (int): The number of subscribers unsubscribed ffrom this channel
        """
        return self.cache.publish(channel, unsubscribe_message)

    def publish(self, channel, data):
        """Publish some data to a channel

        Args:
            channel (str): The name of the channel to publish the data to
            data: The data to publish

        Returns:
            The number of subscriptions which received the data
        """
        return self.cache.publish(channel, data)

    def shutdown(self):
        """Shuts down the connection to the cache

        For the Redis cache, this is not necessary. Redis's ConnectionPool should handle it
        """
        pass

    def clear(self):
        """Clears all values in the cache
        """
        self.cache.flushdb()

    def check(self):
        self.cache.info()

    def register_callbacks(self):
        """Registers callbacks for the PubSubs for the current thread.

        For the RedisCacheAdapter, this is not necessary
        """
        pass

    @classmethod
    def from_json(cls, json_in):
        """Constructs this cache from its JSON representation

        Args:
            json_in (dict): The JSON representation of this cache configuration

        Returns:
            (RedisCacheAdapter): A RedisCacheAdapter with a configuration reflecting the values in the JSON
        """
        password = os.getenv('WALKOFF_REDIS_PASSWORD')
        if password is not None:
            json_in['password'] = password
        if 'timeout' in json_in and json_in['timeout'] > 0:
            json_in['socket_timeout'] = json_in.pop('timeout')
        return cls(**json_in)
예제 #14
0
파일: cache.py 프로젝트: jkohrman/WALKOFF
class RedisCacheAdapter(object):
    instance = None

    def __new__(cls, *args, **kwargs):
        if cls.instance is None:
            cls.instance = super(RedisCacheAdapter, cls).__new__(cls)
            logger.info('Created redis cache connection')
        return cls.instance

    def __init__(self, **opts):
        self.cache = StrictRedis(**opts)
        logger.info(
            'Created redis cache connection with options: {}'.format(opts))

    def set(self, key, value, expire=None, **opts):
        """Set a value for a key in the cache

        Args:
            key: The key to use for this data
            value: The value to set this key to
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was this key set?
        """
        return self.cache.set(key, value, px=expire, **opts)

    def get(self, key, **opts):
        """Gets the value stored in the key

        Args:
            key: The key to get the value from
            **opts: Additional options to use.

        Returns:
            The value stored in the key
        """
        return self._decode_response(self.cache.get(key))

    def add(self, key, value, expire=None, **opts):
        """Add a key and a value to the cache if the key is not already in the cache

        Args:
            key: The key to store the value to
            value: Teh value to store in the key
            expire (int|datetime.timedelta, optional): The expiration for this value. If `int` is passed, it indicates
                milliseconds
            **opts: Additional options to use. See `FanoutCache` for more details

        Returns:
            (bool): Was the key set?
        """
        return self.cache.set(key, value, px=expire, nx=True, **opts)

    def delete(self, key):
        """Deletes a key
        """
        return self.cache.delete(key)

    def incr(self, key, amount=1):
        """Increments a key by an amount.

        If the key is not found, then its value becomes the increment amount specified

        Args:
            key: The key to increment
            amount (int, optional): The amount to increment the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The incremented value
        """
        return int(self.cache.incr(key, amount))

    def decr(self, key, amount=1):
        """Decrements a key by an amount.

        If the key is not found, then its value becomes the decrement amount specified

        Args:
            key: The key to decrement
            amount (int, optional): The amount to decrement the key by. Defaults to 1
            retry (bool, optional): Should this operation be retried if the transaction times out? Defaults to
                `self.retry`

        Returns:
            (int): The decremented value
        """
        return int(self.cache.decr(key, amount))

    def rpush(self, key, *values):
        """Pushes a value to the right of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.rpush(key, *values)

    def rpop(self, key):
        """Pops a value from the right of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The rightmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.rpop(key))

    def lpush(self, key, *values):
        """Pushes a value to the left of a deque.

        This operation also creates a deque for a given key if one was not already created. Otherwise it uses the
        existing deque

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque
        """
        return self.cache.lpush(key, *values)

    def lpop(self, key):
        """Pops a value from the left of a deque.

        If this key is not a deque then this function will return None.

        Args:
            key: The key of the deque to push the values to
            *values: The values to push to the deque

        Returns:
            The leftmost value on the deque or None if the key is not a deque or the deque is empty
        """
        return self._decode_response(self.cache.lpop(key))

    @staticmethod
    def _decode_response(response):
        if response is None:
            return response
        try:
            return response.decode('utf-8')
        except UnicodeDecodeError:
            return response

    def subscribe(self, channel):
        """Subscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (RedisSubscription): The subscription for this channel
        """
        subscription = self.cache.pubsub()
        subscription.subscribe(channel)
        subscription.get_message()
        return RedisSubscription(channel, subscription)

    def unsubscribe(self, channel):
        """Unsubscribe to a channel

        Args:
            channel (str): The name of the channel to subscribe to

        Returns:
            (int): The number of subscribers unsubscribed ffrom this channel
        """
        return self.cache.publish(channel, unsubscribe_message)

    def publish(self, channel, data):
        """Publish some data to a channel

        Args:
            channel (str): The name of the channel to publish the data to
            data: The data to publish

        Returns:
            The number of subscriptions which received the data
        """
        return self.cache.publish(channel, data)

    def shutdown(self):
        """Shuts down the connection to the cache

        For the Redis cache, this is not necessary. Redis's ConnectionPool should handle it
        """
        pass

    def clear(self):
        """Clears all values in the cache
        """
        self.cache.flushdb()

    def check(self):
        self.cache.info()

    def ping(self):
        """Pings the Redis cache to test the connection

        Returns:
            (Bool): True if the ping was successful, False otherwise.
        """
        return self.cache.ping()

    def scan(self, pattern=None):
        """Scans through all keys in the cache

        Args:
            pattern (str, optional): Regex Pattern to search for

        Returns:
            Iterator(str): The keys in the cache matching the pattern if specified. Else all the keys in the cache
        """
        return (key.decode('utf-8') for key in self.cache.scan_iter(pattern))

    def exists(self, key):
        """Checks to see if a key exists in the cache

        Args:
            key: The key to check

        Returns:
            bool: Does the key exist?
        """
        return bool(self.cache.exists(key))

    def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None):
        """Gets a distributed lock backed by the cache

        Args:
            name (str): The name of the lock
            timeout (float): The maximum life for the lock in seconds. If none is specified, it will remain locked
                until release() is called on the lock
            sleep (float): The amount of time to sleep per loop iteration when the lock is in blocking mode and another
                client is currently holding the lock
            blocking_timeout (float): The maximum amount of time in seconds to spend trying to acquire the lock. If
                none is specified, the lock will continue trying forever
        Returns:
            A lock
        """
        return self.cache.lock(name,
                               timeout=timeout,
                               sleep=sleep,
                               blocking_timeout=blocking_timeout)

    @classmethod
    def from_json(cls, json_in):
        """Constructs this cache from its JSON representation

        Args:
            json_in (dict): The JSON representation of this cache configuration

        Returns:
            (RedisCacheAdapter): A RedisCacheAdapter with a configuration reflecting the values in the JSON
        """
        password = os.getenv('WALKOFF_REDIS_PASSWORD')
        if password is not None:
            json_in['password'] = password
        if 'timeout' in json_in and json_in['timeout'] > 0:
            json_in['socket_timeout'] = json_in.pop('timeout')
        return cls(**json_in)
예제 #15
0
파일: cli.py 프로젝트: LiBuchauer/pyABC
def work_on_population(redis: StrictRedis, start_time: int, max_runtime_s: int,
                       kill_handler: KillHandler):
    """
    Here the actual sampling happens.
    """

    # set timers
    population_start_time = time()
    cumulative_simulation_time = 0

    # read from pipeline
    pipeline = redis.pipeline()
    # extract bytes
    ssa_b, batch_size_b, all_accepted_b, n_req_b, n_acc_b \
        = (pipeline.get(SSA).get(BATCH_SIZE)
           .get(ALL_ACCEPTED).get(N_REQ).get(N_ACC).execute())

    if ssa_b is None:
        return

    kill_handler.exit = False

    if n_acc_b is None:
        return

    # convert from bytes
    simulate_one, sample_factory = pickle.loads(ssa_b)
    batch_size = int(batch_size_b.decode())
    all_accepted = bool(int(all_accepted_b.decode()))
    n_req = int(n_req_b.decode())

    # notify sign up as worker
    n_worker = redis.incr(N_WORKER)
    logger.info(f"Begin population, batch size {batch_size}. "
                f"I am worker {n_worker}")

    # counter for number of simulations
    internal_counter = 0

    # create empty sample
    sample = sample_factory()

    # loop until no more particles required
    while int(redis.get(N_ACC).decode()) < n_req \
            and (not all_accepted or int(redis.get(N_EVAL).decode()) < n_req):
        if kill_handler.killed:
            logger.info(f"Worker {n_worker} received stop signal. "
                        f"Terminating in the middle of a population "
                        f"after {internal_counter} samples.")
            # notify quit
            redis.decr(N_WORKER)
            sys.exit(0)

        # check whether time's up
        current_runtime = time() - start_time
        if current_runtime > max_runtime_s:
            logger.info(f"Worker {n_worker} stops during population because "
                        f"runtime {current_runtime} exceeds "
                        f"max runtime {max_runtime_s}")
            # notify quit
            redis.decr(N_WORKER)
            return

        # increase global number of evaluations counter
        particle_max_id = redis.incr(N_EVAL, batch_size)

        # timer for current simulation until batch_size acceptances
        this_sim_start = time()
        # collect accepted particles
        accepted_samples = []

        # make batch_size attempts
        for n_batched in range(batch_size):
            # increase evaluation counter
            internal_counter += 1
            try:
                # simulate
                new_sim = simulate_one()
                # append to current sample
                sample.append(new_sim)
                # check for acceptance
                if new_sim.accepted:
                    # the order of the IDs is reversed, but this does not
                    # matter. Important is only that the IDs are specified
                    # before the simulation starts

                    # append to accepted list
                    accepted_samples.append(
                        cloudpickle.dumps(
                            (particle_max_id - n_batched, sample)))
                    # initialize new sample
                    sample = sample_factory()
            except Exception as e:
                logger.warning(f"Redis worker number {n_worker} failed. "
                               f"Error message is: {e}")
                # initialize new sample to be sure
                sample = sample_factory()

        # update total simulation-specific time
        cumulative_simulation_time += time() - this_sim_start

        # push to pipeline if at least one sample got accepted
        if len(accepted_samples) > 0:
            # new pipeline
            pipeline = redis.pipeline()
            # update particles counter
            pipeline.incr(N_ACC, len(accepted_samples))
            # note: samples are appended 1-by-1
            pipeline.rpush(QUEUE, *accepted_samples)
            # execute all commands
            pipeline.execute()

    # end of sampling loop

    # notify quit
    redis.decr(N_WORKER)
    kill_handler.exit = True
    population_total_time = time() - population_start_time
    logger.info(f"Finished population, did {internal_counter} samples. "
                f"Simulation time: {cumulative_simulation_time:.2f}s, "
                f"total time {population_total_time:.2f}.")
예제 #16
0
class Redis(object):
    def __init__(self, host: str = "localhost", port: int = 6379, db: int = 0, password: str = None):
        self.redis = StrictRedis(host=host, port=port, db=db, password=password)

    @staticmethod
    def help():
        """
        打印帮助信息
        """
        print("open in browser => https://cloud.tencent.com/developer/article/1151834")


    def increase(self, name, amount=1) -> int:
        """
        增加操作
        :param name: 名字
        :param amount: 增加的值,默认1
        :return: 增加后的值
        """
        return self.redis.incr(name, amount)

    def decrease(self, name, amount=1) -> int:
        """
        减少操作
        :param name: 名字
        :param amount: 减少的值,默认1
        :return: 减少后的值
        """
        return self.redis.decr(name, amount)

    def set_keys(self, **kwargs):
        """
        以字符串保存
        :param kwargs: 要插入的字符串键值对
        """
        self.redis.mset(kwargs)

    def get_keys(self, *string_name, decode="utf-8") -> list:
        """
        返回字符串的字,解码
        :param string_name: 要查询的字符串名
        :param decode: 是否返回解码后的字符串,默认是
        """
        if decode:
            return decode_list(self.redis.mget(string_name), decode)
        return self.redis.mget(string_name)

    def add_list_tail(self, list_name, *value) -> int:
        """
        从后追加进列表
        :param list_name: 列表名
        :value: 值
        :return: 列表大小
        """
        return self.redis.rpush(list_name, *value)

    def add_list_head(self, list_name, *value) -> int:
        """
        从后追加进列表
        :param list_name: 列表名
        :value: 值
        :return: 列表大小
        """
        return self.redis.lpush(list_name, *value)

    def get_list_length(self, list_name) -> int:
        """
        返回列表长
        :param list_name: 列表名
        :return: 列表长度
        """
        return self.redis.llen(list_name)

    def pop_list_head(self, list_name, decode="utf-8") -> str:
        """
        弹出列表头
        :param list_name: 列表名
        :param decode: 是否解码,以和中方式解码
        :return: 返回列表第一个值
        """
        if decode:
            return self.redis.rpop(list_name).decode(decode)
        return self.redis.rpop(list_name)

    def pop_list_tail(self, list_name, decode="utf-8") -> str:
        """
        弹出列表尾
        :param list_name: 列表名
        :param decode: 是否解码,以何种方式解码
        :return: 返回列表最后一个值
        """
        if decode:
            self.redis.lpop(list_name).decode(decode)
        return self.redis.lpop(list_name)

    def get_list_value(self, list_name, start=0, end=-1, decode="utf-8") -> list:
        """
        从列表中获取元素
        :param list_name: 列表名
        :param start: 起始下标
        :param end: 截至下标
        :param decode: 是否解码,以何种方式解码
        :return: 返回[star, end]之间的值
        """
        if decode:
            return decode_list(self.redis.lrange(list_name, start, end), decode)
        return self.redis.lrange(list_name, start, end)

    def set_list_value(self, list_name, index, value):
        """
        给列表指定下标赋值,越界报错
        :param list_name: 列表名
        :param index: 下标
        :param value: 新值
        """
        self.redis.lset(list_name, index, value)

    def remove_list_item_by_value(self, list_name, value, count=1) -> int:
        """
        删除名为list_name列表中值为value的元素count个
        :param list_name: 列表名
        :param value: 值
        :param count: 待删除的个数
        :return: 删除的个数
        """
        return self.redis.lrem(list_name, count, value)

    def add_to_set(self, set_name, *values) -> int:
        """
        向集合中增加元素
        :param set_name: 集合名
        :param values: 数据
        :return: 返回插入的个数
        """
        return self.redis.sadd(set_name, *values)

    def remove_set_item_by_value(self, set_name, *values):
        """
        向集合中删除元素
        :param set_name: 集合名
        :param values: 要删除的胡数据
        :return: 返回删除的个数
        """
        return self.redis.srem(set_name, *values)

    def get_set_length(self, set_name):
        """
        返回集合长度
        :param set_name: 集合名
        :return: 集合长度
        """
        return self.redis.scard(set_name)

    def pop_set_random(self, set_name, count=1, decode="utf-8") -> list:
        """
        从集合中随机弹出count个值
        :param set_name: 集合名
        :param count: 弹出的个数
        :param decode: 是否解码,以及编码方式
        :return: 集合中的值
        """
        if decode:
            return decode_list(self.redis.spop(set_name, count), decode)
        return self.redis.spop(set_name, count)

    def inter_set(self, set_names: list, decode="utf-8") -> set:
        """
        求列表中的集合的交集
        :param set_names: 集合名列表
        :param decode: 是否对返回结果解码,以及解码方式
        :return: 交集,集合类型
        """
        if decode:
            return set(decode_list(self.redis.sinter(set_names), decode))
        return self.redis.sinter(set_names)

    def union_set(self, set_names: list, decode="utf-8") -> set:
        """
        求列表中的集合的并集
        :param set_names: 集合名列表
        :param decode: 是否对返回结果解码,以及解码方式
        :return: 并集,集合类型
        """
        if decode:
            return set(decode_list(self.redis.sunion(set_names), decode))
        return self.redis.sunion(set_names)

    def diff_set(self, set_names: list, decode="utf-8") -> set:
        """
        求列表中的集合的差集
        :param set_names: 集合名列表
        :param decode: 是否对返回结果解码,以及解码方式
        :return: 差集,集合类型
        """
        if decode:
            return set(decode_list(self.redis.sdiff(set_names), decode))
        return self.redis.sdiff(set_names)

    def get_set_all(self, set_name, decode="utf-8") -> set:
        """
        返回集合中的所有元素
        :param set_name: 集合名列表
        :param decode: 是否对返回结果解码,以及解码方式
        :return: 差集,集合类型
        """
        if decode:
            return set(decode_list(self.redis.smembers(set_name), decode))
        return self.redis.smembers(set_name)

    def get_set_random(self, set_name, count=1, decode="utf-8") -> list:
        """
        从集合中随机获取count个值
        :param set_name: 集合名
        :param count: 弹出的个数
        :param decode: 是否解码,以及编码方式
        :return: 集合中的值
        """
        if decode:
            return decode_list(self.redis.srandmember(set_name, count), decode)
        return self.redis.srandmember(set_name, count)

    def add_to_zset(self, zset_name, **map):
        """
        向有序集合中加入元素
        :param zset_name: 有序集合名
        :param map: 键值对,key为插入的元素值,value为权重int类型
        """
        self.redis.zadd(zset_name, map)

    def remove_zset_item_by_value(self, zset_name, *value):
        """
        删除有序集合中名为value的元素
        :param zset_name: 有序集合名
        :param value: 要删除的值
        """
        self.redis.zrem(zset_name, *value)

    def increase_zset_item_by_value(self, zset_name, value, amount=1):
        """
        将有序集合zset_name中值为value的权重加amount
        :param zset_name: 有序集合名
        :param value: 要增加权重的值
        :param amount: 增加的权重,负数为减
        """
        self.redis.zincrby(zset_name, amount, value)

    def item_rank_in_zset(self, zset_name, value, reverse=False) -> int:
        """
        返回value在zset中的排名
        :param zset_name: 有序集合名
        :param value: 值
        :param reverse: False -> 从小到大  True -> 从大到小
        :return: 返回value在zset中的排名
        """
        if reverse:
            return self.redis.zrevrank(zset_name, value)
        return self.redis.zrank(zset_name, value)

    def get_zset_value_by_rank(self, zset_name, start=0, end=-1, withscores=False, decode="utf-8",
                               reverse=False) -> list:
        """
        从有序集合中获取下标在指定范围的值
        :param zset_name: 有序集合名
        :param start: 开始下标
        :param end: 结束下标
        :param withscores: 是否带有权值
        :param decode: 返回结果是否解码
        :param reverse: False -> 从小到大 True -> 从大到小
        :return: 从有序集合中获取下标在指定范围的值,list类型
        """
        if withscores:
            if decode:
                return decode_list_tuple(self.redis.zrange(zset_name, start, end, reverse, withscores), decode)
            return self.redis.zrange(zset_name, start, end, reverse, withscores)
        else:
            if decode:
                return decode_list(self.redis.zrange(zset_name, start, end, reverse, withscores), decode)
            return self.redis.zrange(zset_name, start, end, reverse, withscores)

    def get_zset_value_by_score(self, zset_name, min, max, start=None, num=None, withscores=False,
                                decode="utf-8", ) -> list:
        """
        从有序集合中获取权值在指定范围的值
        :param zset_name: 有序集合名
        :param min: 最小权值
        :param max: 最大权值
        :param start: 从哪个下标开始查找
        :param num: 查找个数 start 必须 和 num同时设定
        :param withscores: 是否带有权值
        :param decode: 返回结果是否解码
        :param reverse: False -> 从小到大 True -> 从大到小
        :return: 有序集合中获取权值在指定范围的值,list类型
        """
        if withscores:
            if decode:
                return decode_list_tuple(self.redis.zrangebyscore(zset_name, min, max, start, num, withscores), decode)
            return self.redis.zrangebyscore(zset_name, min, max, start, num, withscores)
        else:
            if decode:
                return decode_list(self.redis.zrangebyscore(zset_name, min, max, start, num, withscores), decode)
            return self.redis.zrangebyscore(zset_name, min, max, start, num, withscores)

    def count_zset_value_by_score(self, zset_name, min, max) -> int:
        """
        统计在权值在区间内的数量
        :param zset_name: 有序集合名
        :param min: 最小值
        :param max: 最大值
        :return: 返回数量
        """
        return self.redis.zcount(zset_name, min, max)

    def get_zset_length(self, zset_name) -> int:
        """
        返回zset长度
        :param zset_name: 有序集合名
        :return: 返回长度
        """
        return self.redis.zcard(zset_name)

    def remove_zset_item_by_rank(self, zset_name, start, end):
        """
        从有序集合中删除下标在指定范围中的值
        :param zset_name: 有序集合名
        :param start: 开始排名
        :param end: 结束排名
        """
        self.redis.zremrangebyrank(zset_name, start, end)

    def remove_zset_item_by_score(self, zset_name, min, max):
        """
        从有序集合中删除权值在指定范围中的值
        :param zset_name: 有序集合名
        :param min: 最小值
        :param max: 最大值
        """
        self.redis.zremrangebyscore(zset_name, min, max)

    def add_to_map(self, map_name, **kwargs) -> int:
        """
        向映射中添加键值对
        :param map_name: 映射名
        :param kwargs: 要添加的键值对
        :return: 增加的键值对的个数
        """
        self.redis.hmset(map_name, kwargs)
        return len(kwargs)

    def remove_map_item_by_keys(self, map_name, *keys) -> int:
        """
        删除映射中键为keys中的元素
        :param map_name: 映射名
        :param keys: 要删除的键的list
        :return: 删除的个数
        """
        self.redis.hdel(map_name, *keys)
        return len(keys)

    def update_map_item(self, map_name, **kwargs) -> int:
        """
        修改映射键值对
        :param map_name: 映射名
        :param kwargs: 新的键值对,如果不存在则添加
        :return: 修改的个数
        """
        for key, value in kwargs.items():
            self.redis.hsetnx(map_name, key, value)
        return len(kwargs)

    def get_value_from_map_by_keys(self, map_name, *keys, decode="utf-8") -> list:
        """
        从映射中获取对应key的value
        :param map_name: 映射名
        :param keys: 要获取的key
        :param decode: 返回结果是否解码,以及以何种方式解码
        :return: 返回对应的value
        """
        if decode:
            return decode_list(self.redis.hmget(map_name, keys), decode)
        return self.redis.hmget(map_name, keys)

    def get_map_length(self, map_name) -> int:
        """
        获取映射的元素个数
        :param map_name: 映射名
        :return: 元素个数
        """
        return self.redis.hlen(map_name)

    def get_map_all_keys(self, map_name, decode="utf-8") -> list:
        """
        返回映射所有的key
        :param map_name: 映射名
        :param decode: 返回结果是否解码,以及以何种方式解码
        :return: 映射key列表
        """
        if decode:
            return decode_list(self.redis.hkeys(map_name), decode)
        return self.redis.hkeys(map_name)

    def get_map_all_values(self, map_name, decode="utf-8") -> list:
        """
        返回映射所有的value
        :param map_name: 映射名
        :param decode: 返回结果是否解码,以及以何种方式解码
        :return: 映射value列表
        """
        if decode:
            return decode_list(self.redis.hvals(map_name), decode)
        return self.redis.hvals(map_name)
예제 #17
0
def work_on_population_dynamic(analysis_id: str, t: int, redis: StrictRedis,
                               catch: bool, start_time: float,
                               max_runtime_s: float,
                               kill_handler: KillHandler):
    """Work on population in dynamic mode.
    Here the actual sampling happens.
    """
    # short-form
    ana_id = analysis_id

    def get_int(var: str):
        """Convenience function to read an int variable."""
        return int(redis.get(idfy(var, ana_id, t)).decode())

    # set timers
    population_start_time = time()
    cumulative_simulation_time = 0

    # read from pipeline
    pipeline = redis.pipeline()

    # extract bytes
    (ssa_b, batch_size_b, all_accepted_b, is_look_ahead_b,
     max_eval_look_ahead_b) = (pipeline.get(idfy(SSA, ana_id, t)).get(
         idfy(BATCH_SIZE, ana_id, t)).get(idfy(ALL_ACCEPTED, ana_id, t)).get(
             idfy(IS_LOOK_AHEAD, ana_id,
                  t)).get(idfy(MAX_N_EVAL_LOOK_AHEAD, ana_id, t)).execute())

    # if the ssa object does not exist, something went wrong, return
    if ssa_b is None:
        return

    # only allow stopping the worker at particular points
    kill_handler.exit = False

    # convert from bytes
    simulate_one, sample_factory = pickle.loads(ssa_b)
    batch_size = int(batch_size_b.decode())
    all_accepted = bool(int(all_accepted_b.decode()))
    is_look_ahead = bool(int(is_look_ahead_b.decode()))
    max_n_eval_look_ahead = float(max_eval_look_ahead_b.decode())

    # notify sign up as worker
    n_worker = redis.incr(idfy(N_WORKER, ana_id, t))
    logger.info(f"Begin generation {t}, batch size {batch_size}. "
                f"I am worker {n_worker}")

    # counter for number of simulations
    internal_counter = 0

    # create empty sample
    sample = sample_factory(is_look_ahead=is_look_ahead)

    # loop until no more particles required
    # all numbers are re-loaded in each iteration as they can dynamically
    #  update
    while get_int(N_ACC) < get_int(N_REQ) and (
            not all_accepted
            or get_int(N_EVAL) - get_int(N_FAIL) < get_int(N_REQ)):
        # check whether the process was externally asked to stop
        if kill_handler.killed:
            logger.info(f"Worker {n_worker} received stop signal. "
                        "Terminating in the middle of a population "
                        f"after {internal_counter} samples.")
            # notify quit
            redis.decr(idfy(N_WORKER, ana_id, t))
            sys.exit(0)

        # check whether time's up
        current_runtime = time() - start_time
        if current_runtime > max_runtime_s:
            logger.info(f"Worker {n_worker} stops during population because "
                        f"runtime {current_runtime} exceeds "
                        f"max runtime {max_runtime_s}")
            # notify quit
            redis.decr(idfy(N_WORKER, ana_id, t))
            # return to task queue
            return

        # check whether the analysis was terminated or replaced by a new one
        ana_id_new_b = redis.get(ANALYSIS_ID)
        if ana_id_new_b is None or str(ana_id_new_b.decode()) != ana_id:
            logger.info(f"Worker {n_worker} stops during population because "
                        "the analysis seems to have been stopped.")
            # notify quit
            redis.decr(idfy(N_WORKER, ana_id, t))
            # return to task queue
            return

        # check if the analysis left the look-ahead mode
        if is_look_ahead and not bool(
                int(redis.get(idfy(IS_LOOK_AHEAD, ana_id, t)).decode())):
            # reload SSA object
            ssa_b = redis.get(idfy(SSA, ana_id, t))
            simulate_one, sample_factory = pickle.loads(ssa_b)
            # cache
            is_look_ahead = False
            # create new empty sample for clean split
            sample = sample_factory(is_look_ahead=is_look_ahead)

        # check if in look-ahead mode and should sleep
        if is_look_ahead and get_int(N_EVAL) >= max_n_eval_look_ahead:
            # sleep ... seconds
            sleep(SLEEP_TIME)
            continue

        # all synchronized operations should be in a lock
        with redis.lock(EVAL_LOCK):
            # increase global evaluation counter (before simulation!)
            particle_max_id: int = redis.incr(idfy(N_EVAL, ana_id, t),
                                              batch_size)

            # update collection of active indices
            add_ix_to_active_set(redis=redis,
                                 ana_id=ana_id,
                                 t=t,
                                 ix=particle_max_id)

            if is_look_ahead:
                # increment look-ahead evaluation counter
                redis.incr(idfy(N_LOOKAHEAD_EVAL, ana_id, t), batch_size)

        # timer for current simulation until batch_size acceptances
        this_sim_start = time()
        # collect accepted particles
        accepted_samples = []
        # whether any particle in this iteration is preliminary
        any_prel = False

        # make batch_size attempts
        for n_batched in range(batch_size):
            # increase evaluation counter
            internal_counter += 1
            try:
                # simulate
                new_sim = simulate_one()
            except Exception as e:
                logger.warning(f"Redis worker number {n_worker} failed. "
                               f"Error message is: {e}")
                # increment the failure counter
                redis.incr(idfy(N_FAIL, ana_id, t), 1)
                if not catch:
                    raise e
                continue

            # append to current sample
            sample.append(new_sim)
            # check for acceptance
            if new_sim.accepted:
                # The order of the IDs is reversed, but this does not
                #  matter. Important is only that the IDs are specified
                #  before the simulation starts

                # append to accepted list
                accepted_samples.append(
                    pickle.dumps((particle_max_id - n_batched, sample)))
                any_prel = any_prel or any_particle_preliminary(sample)
                # initialize new sample
                sample = sample_factory(is_look_ahead=is_look_ahead)

        # update total simulation-specific time
        cumulative_simulation_time += time() - this_sim_start

        # push to pipeline if at least one sample got accepted
        if len(accepted_samples) > 0:
            # new pipeline
            pipeline = redis.pipeline()
            # update particles counter if nothing is preliminary,
            #  otherwise final acceptance is done by the sampler
            if not any_prel:
                pipeline.incr(idfy(N_ACC, ana_id, t), len(accepted_samples))
            # note: samples are appended 1-by-1
            pipeline.rpush(idfy(QUEUE, ana_id, t), *accepted_samples)
            # execute all commands
            pipeline.execute()

        # update collection of active indices
        discard_ix_from_active_set(redis=redis,
                                   ana_id=ana_id,
                                   t=t,
                                   ix=particle_max_id)

    # end of sampling loop

    # notify quit
    redis.decr(idfy(N_WORKER, ana_id, t))
    kill_handler.exit = True
    population_total_time = time() - population_start_time
    logger.info(f"Finished generation {t}, did {internal_counter} samples. "
                f"Simulation time: {cumulative_simulation_time:.2f}s, "
                f"total time {population_total_time:.2f}.")
rediss://[:password]@host:port/db   # Redis TCP+SSL 连接
unix://[:password]@/path/to/socket.sock?db=db    # Redis Unix Socket 连接

\String操作
    方法	                            作用	                                    示例	                                  示例结果
set(name, value)	            给name赋值为value	                       redis.set('name', 'Bob')	                           True
get(name)	                    返回数据库中key为name的string的value	     redis.get('name')	                                 b'Bob'
getset(name, value)	            给数据库中key为name的string赋予值value并返回上次的value	redis.getset('name', 'Mike')	           b'Bob'
mget(keys, *args)	            返回多个key对应的value	                    redis.mget(['name', 'nickname'])	                [b'Mike', b'Miker']
setnx(name, value)	            如果key不存在才设置value	                 redis.setnx('newname', 'James')	                 第一次运行True,第二次False
setex(name, time, value)	    设置可以对应的值为string类型的value,并指定此键值对应的有效期	redis.setex('name', 1, 'James')	       True
setrange(name, offset, value)	设置指定key的value值的子字符串	             redis.set('name', 'Hello') redis.setrange('name', 6, 'World')	11,修改后的字符串长度
mset(mapping)	                批量赋值	                               redis.mset({'name1': 'Durant', 'name2': 'James'})	True
msetnx(mapping)	                key均不存在时才批量赋值	                     redis.msetnx({'name3': 'Smith', 'name4': 'Curry'})	  True
incr(name, amount=1)	        key为name的value增值操作,默认1,key不存在则被创建并设为amount	redis.incr('age', 1)	              1,即修改后的值
decr(name, amount=1)	        key为name的value减值操作,默认1,key不存在则被创建并设置为-amount	redis.decr('age', 1)	          -1,即修改后的值
append(key, value)	            key为name的string的值附加value	            redis.append('nickname', 'OK')	                     13,即修改后的字符串长度
substr(name, start, end=-1)	    返回key为name的string的value的子串	         redis.substr('name', 1, 4)	                          b'ello'
getrange(key, start, end)	    获取key的value值从start到end的子字符串	      redis.getrange('name', 1, 4)	                       b'ello'

# 源码
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
        """
        Set the value at key ``name`` to ``value``
        ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
        ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
        ``nx`` if set to True, set the value at key ``name`` to ``value`` only
            if it does not exist.
        ``xx`` if set to True, set the value at key ``name`` to ``value`` only
            if it already exists.
        """
예제 #19
0
파일: views.py 프로젝트: xinluh/takeNote
def stream_frames(stream, pafy_video = None):
    r = StrictRedis('localhost')
    try:
        r.incr('counter') # keep track of how many processes are running
        demo_diff = 0
        video_length = pafy_video.length if pafy_video else (5412-demo_diff if 'rubakov1' in stream else 5000)
        if pafy_video:
            yield server_event_msg({'video_length': pafy_video.length,
                                    'video_title': pafy_video.title,
                                    'video_desc': pafy_video.description,
                                    'video_author': pafy_video.author,
                                    'video_url': pafy_video.url},
                                   'onstart')
        else:
            if 'rubakov1' in stream:
                demo_diff = 4*60 # the demo video is four min in
                yield server_event_msg({"video_author": "Galileo Galilei",
                                        "video_length": 5412-demo_diff,
                                        "video_title": "Early Universe - V. Rubakov - lecture 1/9",
                                        "video_url": "https://www.youtube.com/watch?v=XsqtPhra2f0",
                                        "video_desc": "GGI lectures on the theory of fundamental interactions, January 2015\nhttp://heidi.pd.infn.it/html/GGI/index.php"},
                                       'onstart')
            else:
                yield server_event_msg({'video_length': 5000,'video_title': stream }, 'onstart')
                
     
        hist = defaultdict(float)
        it = utils.find_text_in_video(
                 utils.get_frames_from_stream(stream,3),
                 lambda frame,base_frames: utils.find_text_in_frame(frame, base_frames, proba_threshold=0.5))
     
        for dtype, data in it:
            if dtype == 'new_frame':
                yield server_event_msg({'sec': int(data[0])},'onprogress')
            elif dtype == 'new_blob':
                yield server_event_msg({'img': utils.img_to_base64_bytes(data['blob']), #utils.img_to_base64_bytes(255-np.nan_to_num(abs(blob))),
                                                 'sec': int(data['sec']+demo_diff),
                                                 'proba': round(data['proba'],2),
                                                 'left_corner': data['left_corner'],
                                                 'size': data['blob'].shape,
                                                 'n_sameblobs': data['n_sameblobs'],
                                                 # 'frame': utils.img_to_base64_bytes(data['frame'])
                                             })
                if 'blob_bw' not in data: data['blob_bw'] = img_proc_utils.otsu_thresholded(data['blob'])
                hist[(int(data['sec']+demo_diff)/60)] += np.count_nonzero(data['blob_bw'][data['blob_bw']>0])
                
                # print hist, {'hist': [{'x': k, 'y': v} for k,v in hist.iteritems()]}
                # yield server_event_msg({'hist': [{'x': k, 'y': int(v/10.)} for k,v in hist.iteritems()]}, 'onhist')
                yield server_event_msg({'hist': [{'x': i, 'y':  hist.get(i,0)} for i in xrange(video_length/60)]}, 'onhist')
            elif dtype == "erased_blob":
                yield server_event_msg({'sec': int(data['sec']+demo_diff),
                                        'removed_sec': int(data['removed_at_sec']+demo_diff),
                                        'left_corner': data['left_corner']},
                                       'onerasure')
                hist[(int(data['removed_at_sec']+demo_diff)/60)] -= np.count_nonzero(data['blob_bw'][data['blob_bw']>0])
                yield server_event_msg({'hist': [{'x': i, 'y':  hist.get(i,0)} for i in xrange(video_length/60)]}, 'onhist')
     
        yield server_event_msg({'end':True}, 'onend')
        raise StopIteration
    finally:
        r.decr('counter')        
예제 #20
0
# print(redis.flushdb())
# 删除所有数据库所有键
# print(redis.flushall())
# 给赋值 并且返回原来的值
print(redis.getset('nickname', 'newname'))
# 返回多个键值对应的value
print(redis.mget(['nickname', 'name2']))
# 批量赋值
print(redis.mset({'n': 1, 'x': 2}))
# 如果键不存在就设置值 否则不设置值  (只新增 不更新)第二条不起作用
print(redis.setnx('ni', 'wo'))
print(redis.setnx('ni', '2'))
# 如果键不存在就设置值 否则不设置值  (只新增 不更新)第二条不起作用 批量
print(redis.msetnx({'nix': 1, 'wox': 2}))
print(redis.msetnx({'nix': 3, 'wox': 4}))
# 设置键的值以及过期时间  可以新增以及更新
print(redis.setex('wo', 120, 'Lucy'))
# 往value插入字符串 如果字符不存在会新建 如果位置不存在 会为空类型变成binary 如果是0就可以
print(redis.setrange('ni', 2, 'hao'))
print(redis.setrange('nini', 2, 'hao'))
# 增长 默认1
print(redis.incr('nix', 10))
# 减少 默认1
print(redis.decr('nix', 10))
# 值的最后位置追加
print(redis.append('nickname', 'nickname'))
# 获取子串 开始 以及结束 不传就是 -1
print(redis.substr('nickname', 3))
# 获取子串 开始 以及结束 不传就是 报错 也可以是-1
print(redis.getrange('nickname', 4, 5))
예제 #21
0
class LibRedis:

    # 默认所有key的前缀
    key_prefix = 'RAWE_'

    # redis 连接对象
    obj_redis = None

    # 默认的过期时间为3天
    DEFAULT_EXPIRE = 259200

    def __init__(self, host='127.0.0.1', port=6379, db=0, prefix=None, charset='utf-8'):
        """
        初始化
        """
        if not host or not port:
            return None

        if prefix:
            self.key_prefix = prefix.strip()
        # construct
        self.obj_redis = StrictRedis(
            host=host, port=port, db=db, charset='utf-8')

    def key_make(self, keyname=None):
        """
        处理所有key,增加前缀
        如果实例化时没有设置,则使用默认前缀
        """
        if not keyname:
            return None

        return self.key_prefix + str(keyname).strip()

    def set_expire(self, keyname=None):
        """
        设置key的过期时间,装饰器调用
        """
        if not keyname:
            return None

        return self.obj_redis.expire(self.key_make(keyname), self.DEFAULT_EXPIRE)

    # --------------------------------------------------------
    # String
    # --------------------------------------------------------

    @wraps_set_expire
    def set(self, keyname=None, value=None):
        """
        设置指定 key 的值。
        如果 key 已经存储其他值, SET 就覆写旧值,且无视类型。
        return:
        设置操作成功完成时,才返回 OK
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        if isinstance(value, str):
            value = value.strip()

        return self.obj_redis.set(keyname, value)

    def get(self, keyname=None):
        """
        获取指定 key 的值。
        return:
        key 的值
        如果 key 不存在,返回 nil。
        如果key 储存的值不是字符串类型,返回一个错误。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.get(keyname)

        return None if not result else bytes.decode(result)

    def delete(self, keyname=None):
        """
        删除已存在的键。不存在的 key 会被忽略
        return:
        被删除 key 的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.delete(keyname)

    @wraps_set_expire
    def append(self, keyname=None, value=None):
        """
        为指定的 keyname 追加值
        如果 keyname 已经存在并且是一个字符串,
        APPEND 命令将 value 追加到 keyname 原来的值的末尾。
        如果 keyname 不存在,
        APPEND 就简单地将给定 keyname 设为 value ,就像执行 SET keyname value 一样
        return:
        追加指定值之后, keyname 中字符串的长度
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        if isinstance(value, str):
            value = value.strip()
        else:
            value = str(value)

        return self.obj_redis.append(keyname, value)

    @wraps_set_expire
    def incr(self, keyname=None, expire=None):
        """
        将 keyname 中储存的数字值增一。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCR 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        执行 INCR 命令之后 key 的值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.incr(keyname, 1)

    @wraps_set_expire
    def incrBy(self, keyname=None, amount=1):
        """
        将 keyname 中储存的数字加上指定的增量值。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCRBY 命令
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        加上指定的增量值之后, key 的值
        """
        if not keyname or not amount:
            return None

        keyname = self.key_make(keyname.strip())

        if isinstance(amount, int):
            amount = max(0, amount)
        else:
            amount = 1

        return self.obj_redis.incrby(keyname, amount)

    @wraps_set_expire
    def decr(self, keyname=None):
        """
        将 key 中储存的数字值减一。
        如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECR 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内。
        return:
        执行命令之后 key 的值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.decr(keyname, 1)

    @wraps_set_expire
    def decrBy(self, keyname=None, amount=1):
        """
        将 keyname 所储存的值减去指定的减量值。
        如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECRBY 操作。
        如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
        本操作的值限制在 64 位(bit)有符号数字表示之内
        """
        if not keyname or not amount:
            return None

        keyname = self.key_make(keyname.strip())
        amount = int(amount)
        return self.obj_redis.decr(keyname, amount)

    # --------------------------------------------------------
    # Hash 哈希
    # 一个string类型的field和value的映射表,hash特别适合用于存储对象
    # 每个 hash 可以存储 232 - 1 键值对(40多亿)
    # --------------------------------------------------------

    @wraps_set_expire
    def hSet(self, keyname=None, key=None, value=None):
        """
        从哈希名为keyname中添加key1->value1 将哈希表key中的域field的值设为value。-ok -ok
        如果key不存在,一个新的哈希表被创建并进行hset操作。
        如果域field已经存在于哈希表中,旧值将被覆盖。
        错误则 返回 FALSE
        如果字段是哈希表中的一个新建字段,并且值设置成功,返回 1 。 
        如果哈希表中域字段已经存在且旧值已被新值覆盖,返回 0 。
        """
        if not keyname or not key or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        key = key.strip()
        return self.obj_redis.hset(keyname, key, value)

    @wraps_set_expire
    def hGet(self, keyname=None, key=None):
        """
        获取存储在哈希表中指定字段的值
        返回给定字段的值。如果给定的字段或 key 不存在时,返回 None 
        """
        if not keyname or not key:
            return None

        keyname = self.key_make(keyname.strip())
        key = key.strip()

        result = self.obj_redis.hget(keyname, key)
        if not result:
            return None

        # bytes to str
        return bytes.decode(result)

    @wraps_set_expire
    def hLen(self, keyname=None):
        """
        获取哈希表中字段的数量
        哈希表中字段的数量。 当 keyname 不存在时,返回 0 
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hlen(keyname)

    @wraps_set_expire
    def hKeys(self, keyname=None):
        """
        获取哈希表中的所有域(field)
        包含哈希表中所有域(field)列表。 
        当 key 不存在时,返回一个空列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hkeys(keyname)
        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def hVals(self, keyname=None):
        """
        哈希表所有域(field)的值
        包含哈希表中所有域(field)值的列表。 
        当 key 不存在时,返回一个空表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hvals(keyname)
        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def hGetAll(self, keyname=None):
        """
        获取在哈希表中指定 keyname 的所有字段和值
        返回哈希表中,所有的字段和值
        在返回值里,紧跟每个字段名(field name)之后是字段的值(value),
        所以返回值的长度是哈希表大小的两倍。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.hgetall(keyname)
        if not result:
            return None

        # bytes to str
        ret_dict = dict()
        for k, v in result.items():
            ret_dict[bytes.decode(k)] = bytes.decode(v)

        return ret_dict

    def hExists(self, keyname=None, key=None):
        """
        查看哈希表 keyname 中,是否存在键名为key的字段
        ashname含有给定字段key,返回 True。 
        keyname不存在 或 key 不存在,返回 False
        """
        if not keyname or key is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hexists(keyname, key)

    def hDel(self, keyname=None, *keys):
        """
        删除哈希表 key 中的一个或多个指定字段,不存在的字段将被忽略
        返回值
        被成功删除字段的数量,不包括被忽略的字段
        keyname 或 key 不存在则返回 0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.hdel(keyname, *keys)

    # --------------------------------------------------------
    # List 列表, 左(Left)为头部,右(Right)为尾部
    # 一个列表最多可以包含 232 - 1 个元素 (4294967295, 每个列表超过40亿个元素)
    # --------------------------------------------------------

    @wraps_set_expire
    def lPush(self, keyname=None, *values):
        """
        将一个或多个值插入到列表头部, 返回操作后列表的长度。
        如果 key 不存在,一个空列表会被创建并执行 LPUSH 操作。 
        当 key 存在但不是列表类型时,返回一个错误。
        注意:在Redis 2.4版本以前的 LPUSH 命令,都只接受单个 value 值
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lpush(keyname, *values)

    @wraps_set_expire
    def lPop(self, keyname=None):
        """
        弹出队列头部元素,移除并返回列表的第一个元素。
        返回列表的第一个元素。 当列表 key 不存在时,返回 None 
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lpop(keyname)

    @wraps_set_expire
    def rPush(self, keyname=None, *values):
        """
        将一个或多个值插入到列表的尾部(最右边), 返回操作后列表的长度。
        如果列表不存在,一个空列表会被创建并执行 RPUSH 操作。 
        当列表存在但不是列表类型时,返回一个错误。
        注意:在 Redis 2.4 版本以前的 RPUSH 命令,都只接受单个 value 值。

        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.rpush(keyname, *values)

    @wraps_set_expire
    def rPop(self, keyname=None):
        """
        移除并获取列表最后一个元素
        返回列表的最后一个元素。 当列表不存在时,返回 None
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.rpop(keyname)
        if not result:
            return None
        # bytes to str
        return bytes.decode(result)

    @wraps_set_expire
    def lLen(self, keyname=None):
        """
        获取列表长度 
        如果列表 key 不存在,则 key 被解释为一个空列表,返回 0  
        如果 key 不是列表类型,返回一个错误
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.llen(keyname)

    @wraps_set_expire
    def lTrim(self, keyname=None, start=0, end=-1):
        """
        让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
        下标 0 表示列表的第一个元素,1 表示列表的第二个元素
        -1 表示列表的最后一个元素,-2 表示列表的倒数第二个元素
        返回 True
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.ltrim(keyname, start, end)

    @wraps_set_expire
    def lGetRange(self, keyname=None, start=0, end=-1):
        """
        返回列表中指定区间内的元素,区间以偏移量 START 和 END 指定
        下标 0 表示列表的第一个元素,以 1 表示列表的第二个元素
        -1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素
        返回一个列表,包含指定区间内的元素
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.lrange(keyname, start, end)
        if not result:
            return None
        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    @wraps_set_expire
    def lRemove(self, keyname=None, value=None, count=1):
        """
        根据参数 COUNT 的值,移除列表中与参数 VALUE 相等的元素。
        COUNT 的值可以是以下几种:
        count > 0 : 从表头开始向表尾搜索,移除与 VALUE 相等的元素,数量为 COUNT 。
        count < 0 : 从表尾开始向表头搜索,移除与 VALUE 相等的元素,数量为 COUNT 的绝对值。
        count = 0 : 移除表中所有与 VALUE 相等的值。
        返回被移除元素的数量。 列表或元素不存在时返回 0
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.lrem(keyname, count, value)

    # --------------------------------------------------------
    # Set 无序集合
    # Set 是 String 类型的无序集合。集合成员是唯一的。
    # 集合是通过哈希表实现的,所以添加,删除,查找的复杂度都是 O(1)
    # 集合中最大的成员数为 232 - 1 (4294967295, 每个集合可存储40多亿个成员)
    # --------------------------------------------------------

    @wraps_set_expire
    def sAdd(self, keyname=None, *values):
        """
        将一个或多个成员元素加入到集合中,已经存在于集合的成员元素将被忽略。
        假如集合 key 不存在,则创建一个只包含添加的元素作成员的集合。
        当集合 key 不是集合类型时,返回一个错误。
        注意:在Redis2.4版本以前, SADD 只接受单个成员值。
        """
        if not keyname:
            return None
        keyname = self.key_make(keyname.strip())
        return self.obj_redis.sadd(keyname, *values)

    @wraps_set_expire
    def sCard(self, keyname=None):
        """
        获取集合key中元素的数量
        集合的数量。 当集合 key 不存在时,返回 0
        """
        if not keyname:
            return None
        keyname = self.key_make(keyname.strip())
        return self.obj_redis.scard(keyname)

    def sDiff(self, keyname=None, *keys):
        """
        差集
        返回所给key列表想减后的集合,相当于求差集
        不存在的集合 key 将视为空集。
        请注意顺序是前面的集合,减去后面的集合,求差集
        返回包含差集成员的列表
        """
        if not keyname:
            return None

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sdiff(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sDiffStore(self, store_key=None, key=None, *keys):
        """
        差集并存储
        给定所有集合的差集并存储在 store_key 中
        将给定集合之间的差集存储在指定的集合中。
        如果指定的集合 key 已存在,则会被覆盖
        返回store_key结果集中的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sdiffstore(store_key, key, *other_keys)

    def sInter(self, keyname=None, *keys):
        """
        交集
        返回给定所有给定集合的交集。 不存在的集合 key 被视为空集。 
        当给定集合当中有一个空集或key不存在时,结果也为空集(根据集合运算定律)。
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sinter(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sInterStore(self, store_key=None, key=None, *keys):
        """
        交集并存储
        将给定集合之间的交集存储在指定的集合store_key中。
        如果指定的集合已经存在,则将其覆盖
        返回store_key存储交集的集合的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sinterstore(store_key, key, *other_keys)

    def sUnion(self, keyname=None, *keys):
        """
        并集
        所给key列表所有的值,相当于求并集
        给定集合的并集。不存在的集合 key 被视为空集。
        返回并集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        result = self.obj_redis.sunion(keyname, *other_keys)
        if not result:
            return None

        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sUnionStore(self, store_key=None, key=None, *keys):
        """
        并集存储
        将给定集合的并集存储在指定的集合 store_key 中。
        如果 store_key 已经存在,则将其覆盖
        返回store_key存储并集的集合的元素数量
        """
        if not store_key or not key:
            return None

        store_key = self.key_make(store_key.strip())
        key = self.key_make(key.strip())

        other_keys = list()
        for k in keys:
            other_keys.append(self.key_make(k))

        return self.obj_redis.sunionstore(store_key, key, *other_keys)

    @wraps_set_expire
    def sIsMember(self, keyname=None, value=None):
        """
        判断成员元素是否是集合的成员
        如果成员元素是集合的成员,返回 True 
        如果成员元素不是集合的成员,或 key 不存在,返回 False
        """
        if not keyname or value is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.sismember(keyname, value)

    @wraps_set_expire
    def sMembers(self, keyname=None):
        """
        返回集合中的所有的成员。 
        不存在的集合 key 被视为空集合
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.smembers(keyname)

        if not result:
            return None
        # bytes to str
        ret_set = set()
        for v in result:
            ret_set.add(bytes.decode(v))

        return ret_set

    @wraps_set_expire
    def sRem(self, keyname=None, *values):
        """
        删除该数组中对应的值
        移除集合中的一个或多个成员元素,不存在的成员元素会被忽略。
        当 key 不是集合类型,返回一个错误。
        在 Redis 2.4 版本以前, SREM 只接受单个成员值。
        返回被成功移除的元素的数量,不包括被忽略的元素
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.srem(keyname, *values)

    @wraps_set_expire
    def sPop(self, keyname=None):
        """
        移除并返回集合中的一个随机元素
        将随机元素从集合中移除并返回
        移除的随机元素。 当集合不存在或是空集时,返回 None
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.spop(keyname)

        # bytes to str
        return None if not result else bytes.decode(result)

    @wraps_set_expire
    def sRandMember(self, keyname=None, count=1):
        """
        返回集合中的随机元素,而不对集合进行任何改动
        从 Redis 2.6 版本开始, Srandmember 命令接受可选的 count 参数:
        如果 count 为正数,且小于集合基数,
            返回一个包含 count 个元素的数组,数组中的元素各不相同。
        如果 count 大于等于集合基数,那么返回整个集合。
        如果 count 为负数,返回一个数组,
            数组中的元素可能会重复出现多次,而数组的长度为 count 的绝对值。
        返回:随机个数的元素列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())

        if isinstance(count, int):
            count = max(0, count)
        else:
            count = 1

        result = self.obj_redis.srandmember(keyname, count)

        if not result:
            return None

        # bytes to str
        ret_list = list()
        for v in result:
            ret_list.append(bytes.decode(v))

        return ret_list

    # --------------------------------------------------------
    # Zset( sorted set ) 有序集合
    # 有序集合和集合一样也是string类型元素的集合,且不允许重复的成员
    # 有序集合的成员是唯一的,但分数(score)却可以重复
    # 集合是通过哈希表实现的,所以添加,删除,查找的复杂度都是O(1)
    # 集合中最大的成员数为 232 - 1 (4294967295, 每个集合可存储40多亿个成员)
    # --------------------------------------------------------

    @wraps_set_expire
    def zAdd(self, keyname=None, **kwargs):
        """
        将一个或多个成员元素及其分数值加入到有序集当中。
        如果某个成员已经是有序集的成员,那么更新这个成员的分数值,
        并通过重新插入这个成员元素,来保证该成员在正确的位置上。
        如果有序集合 key 不存在,则创建一个空的有序集并执行 ZADD 操作。
        当 key 存在但不是有序集类型时,返回一个错误。
        返回:
            被成功添加的新成员的数量,不包括那些被更新的、已经存在的成员。
        注意: 在 Redis 2.4 版本以前, ZADD 每次只能添加一个元素
        **kwargs: name1=score1, name2=score2
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zadd(keyname, **kwargs)

    def zRangeByScore(self, keyname=None, min=None, max=None, withscores=False):
        """
        分数值正序
        返回有序集中指定分数区间内的所有的成员。
        有序集成员按分数值递减(从大到小)的次序排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order )排列
        返回;
        指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrangebyscore(
            keyname, min, max, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRevRangeByScore(self, keyname=None, max=None, min=None, withscores=False):
        """
        分数值逆序
        返回有序集中指定分数区间内的所有的成员。
        有序集成员按分数值递减(从大到小)的次序排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order )排列。
        返回;
        指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrevrangebyscore(
            keyname, max, min, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRank(self, keyname=None, member=None):
        """
        排名正序
        返回有序集中指定成员的排名。
        其中有序集成员按分数值递增(从小到大)顺序排列
        如果成员是有序集 key 的成员,返回 member 的排名。
        如果成员不是有序集 key 的成员,返回 None 。
        """
        if not keyname or member is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrank(keyname, member)

    def zRevRank(self, keyname=None, member=None):
        """
        排名逆序
        返回有序集中指定成员的排名。
        其中有序集成员按分数值递减(从大到小)排序
        如果成员是有序集 key 的成员,返回 member 的排名。
        如果成员不是有序集 key 的成员,返回 None 。
        """
        if not keyname or member is None:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrevrank(keyname, member)

    def zRange(self, keyname=None, start=None, end=None, withscores=False):
        """
        位置正序
        返回有序集中,指定区间内的成员。
        其中成员的位置按分数值递增(从小到大)来排序。
        具有相同分数值的成员按字典序(lexicographical order )来排列
        返回指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrange(
            keyname, start, end, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRevrange(self, keyname=None, start=None, end=None, withscores=False):
        """
        位置逆序
        返回有序集中,指定区间内的成员。
        其中成员的位置按分数值递减(从大到小)来排列。
        具有相同分数值的成员按字典序的逆序(reverse lexicographical order)排列
        返回指定区间内,带有分数值(可选)的有序集成员的列表
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        result = self.obj_redis.zrevrange(
            keyname, start, end, withscores=withscores)

        if not result:
            return None

        # bytes to str
        if not withscores:
            # return list
            zret = list()
            for field in result:
                zret.append(bytes.decode(field))
        else:
            # return dict
            zret = list()
            for field, score in result:
                zret.append((bytes.decode(field), score))
            zret = dict(zret)

        return zret

    def zRem(self, keyname, *member):
        """
        移除有序集中的一个或多个成员,不存在的成员将被忽略。
        当 key 存在但不是有序集类型时,返回一个错误。
        注意: 在 Redis 2.4 版本以前, ZREM 每次只能删除一个元素。
        返回被成功移除的成员的数量,不包括被忽略的成员0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zrem(keyname, *member)

    def zRemRangeByRank(self, keyname=None, min=None, max=None):
        """
        删除正序
        移除有序集中,指定排名(rank)区间内的所有成员
        返回被移除成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zremrangebyrank(keyname, min, max)

    def zRemrangebyscore(self, keyname=None, min=None, max=None):
        """
        删除正序
        移除有序集中,指定分数(score)区间内的所有成员
        返回被移除成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zremrangebyscore(keyname, min, max)

    def zCard(self, keyname=None):
        """
        计算集合中元素的数量
        当 key 存在且是有序集类型时,返回有序集的基数。
        当 key 不存在时,返回 0
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zcard(keyname)

    def zCount(self, keyname=None, min=None, max=None):
        """
        计算有序集合中指定分数区间的成员数量
        返回分数值在 min 和 max 之间的成员的数量
        """
        if not keyname:
            return None

        keyname = self.key_make(keyname.strip())
        return self.obj_redis.zcount(keyname, min, max)
예제 #22
0
class RedisUtils(object):
    def __init__(self, host, port, db, password=None):
        if password is not None:
            self._client = StrictRedis(host=host,
                                       port=port,
                                       db=db,
                                       password=password)
        else:
            self._client = StrictRedis(host=host, port=port, db=db)

    def common_utils(self):
        """
        通用操作
        :return:
        """
        pass

    def collection_utils(self):
        """
        统计操作
        :return:
        """
        pass

    def string_utils(self):
        """
        字符串操作
        :return:
        """
        str_name = "str"
        str_value = "str_value"
        int_name = "number"
        int_value = 25
        int_str_name = "num_to_str"
        int_str_value = 10000
        int_max_name = "max_number"
        int_max_value = 2444449999999999999999999
        int_min_name = "min_number"
        int_min_value = -1111111111111111111111111
        float_name = "float_number"
        float_value = 23.45
        result = self._client.set(str_name, str_value)
        result_len = self._client.strlen(str_name)
        print("String Set key:", str_name, " value:", str_value, " result:",
              result, " len:", result_len)
        result = self._client.set(int_name, int_value)
        result_len = self._client.strlen(int_name)
        print("String Set key:", int_name, " value:", int_value, " result:",
              result, " len:", result_len)
        result = self._client.set(int_str_name, int_str_value)
        result_len = self._client.strlen(int_str_name)
        print("String Set key:", int_str_name, " value:", int_str_value,
              " result:", result, " len:", result_len)
        result = self._client.set(int_max_name, int_max_value)
        result_len = self._client.strlen(int_max_name)
        print("String Set key:", int_max_name, " value:", int_max_value,
              " result:", result, " len:", result_len)
        result = self._client.set(int_min_name, int_min_value)
        result_len = self._client.strlen(int_min_name)
        print("String Set key:", int_min_name, " value:", int_min_value,
              " result:", result, " len:", result_len)
        result = self._client.set(float_name, float_value)
        result_len = self._client.strlen(float_name)
        print("String Set key:", float_name, " value:", float_value,
              " result:", result, " len:", result_len)

        str_dict = {"str_key": "keyvalue", "int_key": 234, "float_key": 234.09}
        result = self._client.mset(str_dict)
        print("String MSet dict:", str_dict, " result:", result)

        null_str_name = ""
        null_str_value = self._client.get(null_str_name)
        print("String Get key:", null_str_name, " value:", null_str_value)
        dest_str_value = self._client.get(str_name)
        print("String Get key:", str_name, " value", dest_str_value, " type:",
              type(dest_str_value))
        dest_int_value = self._client.get(int_name)
        print("String Get key:", int_name, " value", dest_int_value, " type:",
              type(dest_int_value))
        multi_get_name = ["str_key", "int_key", "float_key"]
        result = self._client.mget(multi_get_name)
        result_dict = dict(zip(multi_get_name, result))
        print("String MGet key:", multi_get_name, " values:", result, " type:",
              type(result), " dict:", result_dict)

        ### 自增 自减 支持负数增减
        incr_name = "incr_name"
        incr_count = 2
        result = self._client.incr(incr_name, incr_count)
        print("String Incr key:", incr_name, "incr_count:", incr_count,
              " result:", result)
        incr_count = -2
        result = self._client.incr(incr_name, incr_count)
        print("String Incr key:", incr_name, "incr_count:", incr_count,
              " result:", result)
        decr_name = "decr_name"
        decr_count = -2
        result = self._client.decr(decr_name, decr_count)
        print("String Decr key:", decr_name, "incr_count:", decr_count,
              " result:", result)
        decr_count = 2
        result = self._client.decr(decr_name, decr_count)
        print("String Decr key:", decr_name, "incr_count:", decr_count,
              " result:", result)

    def list_utils(self):
        not_existed_list_name = "not exist list name"
        list_name = "list_key"
        list_str_value = "list value"
        list_int_value = 234
        # O(1)
        result = self._client.rpush(list_name, list_str_value, list_int_value)
        print("List RPUSH key:", list_name, "value:", list_int_value,
              list_str_value, "result:", result)
        list_value = ("list_value_1", "list_value_2", 234)
        # O(1)
        result = self._client.rpush(list_name, *list_value)
        print("List RPUSH key:", list_name, "value:", list_value, "result:",
              result)
        # O(X)
        result = self._client.rpushx(list_name, list_value)
        print("List RPUSHX key:", list_name, "value:", list_value, "result:",
              result)
        result = self._client.rpushx(not_existed_list_name, list_value)
        print("List RPUSHX key:", not_existed_list_name, "value:", list_value,
              "result:", result)

        # O(1)
        result = self._client.lpop(list_name)
        print("List LPOP key:", list_name, " value:", result)
        # O(1)
        result = self._client.rpop(list_name)
        print("List RPOP key:", list_name, " value:", result)

        # O(n)
        result = self._client.lrem(list_name, 1, 231)
        print("List LREM key:", list_name, "result:", result)
        # O(n)
        lstart = 0
        lend = 2
        result = self._client.ltrim(list_name, lstart, lend)
        print("List LTRIM key:", list_name, " result:", result)

        # O(1)
        result = self._client.llen(list_name)
        print("List LLEN key:", list_name, " len:", result)

        # O(X)
        result = self._client.linsert(list_name, "before", 234, "Insert Value")
        print("List LINSERT key:", list_name, " result:", result)

        lindex = 2  # Start As 0
        # O(X)
        result = self._client.lindex(list_name, lindex)
        print("List LINDEX key:", list_name, " result:", result)

    def set_utils(self):
        """
        集合操作
        分类 社交 标签
        :return:
        """
        set_name = "set_name"
        set_value = ("set_1", "set_2", 3, 4)
        # O(K)
        result = self._client.sadd(set_name, *set_value)
        print("Set SADD key:", set_name, "value:", set_value, " result:",
              result)
        # O(1)
        result = self._client.scard(set_name)
        print("Set SCARD key:", set_name, " result:", result)
        s_find_value = "set_1"
        # O(1)
        result = self._client.sismember(set_name, s_find_value)
        print("Set SISMEMBER key:", set_name, " find_value:", s_find_value,
              " result:", result)

        random_count = 2
        # O(K)
        result = self._client.srandmember(set_name, number=random_count)
        print("Set SRANDOMMEMBER key:", set_name, "result:", result)

        # O(1)
        result = self._client.spop(set_name)
        print("Set SPOP key:", set_name, " result:", result)

        # O(K)
        result = self._client.srem(set_name, *set_value)
        print("Set SREM key:", set_name, "value:", set_value, " result:",
              result)

        set_a_name = "set_a"
        set_a_value = [
            "set_value_1", "set_value_3", "set_value_6", "set_value_9",
            "set_value_10"
        ]
        set_b_name = "set_b"
        set_b_value = [
            "set_value_1", "set_value_3", "set_value_6", "set_value_8",
            "set_value_0"
        ]
        self._client.sadd(set_a_name, *set_a_value)
        self._client.sadd(set_b_name, *set_b_value)
        # O(K)
        result = self._client.sinter(set_a_name, set_b_name)
        print("Set SINTER key:", set_a_name, " key:", set_b_name, " result:",
              result)
        # O(K)
        result = self._client.sunion(set_a_name, set_b_name)
        print("Set SUNION key:", set_a_name, " key:", set_b_name, " result:",
              result)
        # O(K)
        result = self._client.sdiff(set_a_name, set_b_name)
        print("Set SDIFF key:", set_a_name, " key:", set_b_name, " result:",
              result)
        self._client.delete(set_a_name)
        self._client.delete(set_b_name)

    def zset_utils(self):
        """
        集合操作
        应用排行榜
        :return:
        """
        zset_name = "zset_name"
        zset_value = {
            "zset_1": 23,
            "zset_2": 23.56,
            "zset_3": 23,
            "zset_4": -4,
            "zset_5": 0
        }
        result = self._client.zadd(zset_name, **zset_value)
        print("zset ZADD key:", zset_name, " result:", result)
        result = self._client.zcard(zset_name)
        print("zset ZCARD key:", zset_name, " result:", result)
        zset_score_value = "zset_4"
        result = self._client.zscore(zset_name, zset_score_value)
        print("zset ZSCORE key:", zset_name, "value:", zset_score_value,
              " result:", result)
        result = self._client.zrank(zset_name, zset_score_value)
        print("zset ZRANK key:", zset_name, "value:", zset_score_value,
              " result:", result)
        result = self._client.zrevrank(zset_name, zset_score_value)
        print("zset ZREVRANK key:", zset_name, "value:", zset_score_value,
              " result:", result)
        zset_cursor = 0
        index, result = self._client.zscan(zset_name, zset_cursor)
        print("zset ZSCAN key:", zset_name, " result:", result, "type:",
              type(result))

        zset_min = 0  # Start From 0
        zset_max = 2
        zset_num = 2
        result = self._client.zrange(zset_name, zset_min, zset_max)
        print("zset ZRANGE key:", zset_name, "min:", zset_min, " max:",
              zset_max, " result:", result)
        # self._client.zrangebylex(zset_name, 0, 2, num=2)
        # self._client.zrangebyscore(zset_name, 0, 2, num=2)
        self._client.delete(zset_name)

    def hash_utils(self):
        hash_name = "hash_name"
        hash_key = "hask_key"
        hash_value = "hash_value"
        result = self._client.hset(hash_name, hash_key, hash_value)
        print("hash HSET key:", hash_name, " field:", hash_key, " value:",
              hash_value, " result:", result)
        hash_content = {"name": "lee", "age": 34, "birth": 2009}
        result = self._client.hmset(hash_name, hash_content)
        print("hash HMSET content:", hash_content, " result:", result)
        result = self._client.hlen(hash_name)
        print("hash HLEN key:", hash_name, " result:", result)
        result = self._client.hexists(hash_name, hash_key)
        print("hash HEXISTS key:", hash_name, " field:", hash_key, " result:",
              result)
        result = self._client.hget(hash_name, hash_key)
        print("hash HGET key:", hash_name, " field:", hash_key, " result:",
              result)
        hash_keys = ("name", "age")
        result = self._client.hmget(hash_name, *hash_keys)
        print("hash HMGET key:", hash_name, " field:", hash_keys, " result:",
              result)
        hash_cursor = 0
        result = self._client.hscan(hash_name, hash_cursor)
        print("hash HSCAN key:", hash_name, " result:", result)
        result = self._client.hkeys(hash_name)
        print("hash HKEYS key:", hash_name, "result:", result)
        result = self._client.hdel(hash_name, hash_key)
        print("hash HDEL key:", hash_name, " field:", hash_key, " result:",
              result)
        result = self._client.hdel(hash_name, *hash_keys)
        print("hash HDEL key:", hash_name, " field:", hash_key, " result:",
              result)