예제 #1
14
class RedisManager(NoSqlManager):
    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        self.db = params.pop('db', None)
        self.dbpass = params.pop('password', None)
        self.connection_pool = params.get('redis_connection_pool', None)
        self.expires = params.get('expires', params.get('expiretime', None))
        NoSqlManager.__init__(self,
                              namespace,
                              url=url,
                              data_dir=data_dir,
                              lock_dir=lock_dir,
                              **params)

    def open_connection(self, host, port, **params):
        if not self.connection_pool:
            self.connection_pool = ConnectionPool(host=host, port=port, db=self.db,
                    password=self.dbpass)
        self.db_conn = StrictRedis(connection_pool=self.connection_pool, **params)
    
    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]
        # If the machinery above fails, then pickup the expires time from the
        # init params.
        if not expiretime and self.expires is not None:
            expiretime = self.expires
        # Set or setex, according to whether we got an expires time or not.
        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value, 2))
        else:
            self.db_conn.set(key, pickle.dumps(value, 2))

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def _format_pool_key(self, host, port, db):
        return '{0}:{1}:{2}'.format(host, port, self.db)

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #2
0
파일: redisdb.py 프로젝트: blackusagi/blog
class RedisQueue:
    def __init__(self):
        """
        初始化Redis
        """
        self.db = StrictRedis(host=REDIS_HOST,
                              port=REDIS_PORT,
                              password=REDIS_PASSWORD)

    def add(self, request):
        """
        向队列添加序列化后的Request
        :param request: 请求对象
        :param fail_time: 失败次数
        :return: 添加结果
        """
        if isinstance(request, Requests):
            return self.db.rpush(REDIS_KEY, dumps(request))
        return False

    def pop(self):
        """
        取出下一个Request并反序列化
        :return: Request or None
        """
        if self.db.llen(REDIS_KEY):
            return loads(self.db.lpop(REDIS_KEY))
        else:
            return False

    def clear(self):
        self.db.delete(REDIS_KEY)

    def empty(self):
        return self.db.llen(REDIS_KEY) == 0
예제 #3
0
class RedisQueue:
    def __init__(self):
        """
        初始化Redis
        """
        self.db = StrictRedis(host=REDIS_HOST,
                              port=REDIS_PORT,
                              password=REDIS_PASSWORD)

    def put(self, myrequest):
        """
        向队列添加序列化后的MyRequest
        :param myrequest: 请求对象
        :return: 添加结果
        """
        if isinstance(myrequest, MyRequest):
            return self.db.rpush(REDIS_KEY, pickle.dumps(myrequest))
        return False

    def get(self):
        """
        取出下一个MyRequest并反序列化
        :return: MyRequest对象
        """
        if self.db.llen(REDIS_KEY):
            return pickle.loads(self.db.lpop(REDIS_KEY))
        else:
            return None

    def clear(self):
        self.db.delete(REDIS_KEY)

    def empty(self):
        return self.db.llen(REDIS_KEY) == 0
예제 #4
0
class RedisTestCase(SimpleTestCase):

    indices = {
        'my-app:ed54cda':
        '<html><head><meta name="my-app/config/environment" content="%7B%22baseURL%22%3A%22/%22%7D"></head><body><h1>my-app</h1><h2>ed54cda</h2></body></html>',
        'my-app:d696248':
        '<html><head><meta name="my-app/config/environment" content="%7B%22baseURL%22%3A%22/%22%7D"></head><body><h1>my-app</h1><h2>d696248</h2></body></html>',
        'my-app:7fabf72':
        '<html><head><meta name="my-app/config/environment" content="%7B%22baseURL%22%3A%22/%22%7D"></head><body><h1>my-app</h1><h2>7fabf72</h2></body></html>',
        'my-app:current':
        '<html><head><meta name="my-app/config/environment" content="%7B%22baseURL%22%3A%22/%22%7D"></head><body><h1>my-app</h1><h2>7fabf72</h2></body></html>',
        'other-app:fd54cda':
        '<html><head><base href="/" /><meta name="other-app/config/environment" content="%7B%22baseURL%22%3A%22/other-app/%22%7D"></head><body><h1>other-app</h1><h2>fd54cda</h2></body></html>',
        'other-app:e696248':
        '<html><head><base href="/" /><meta name="other-app/config/environment" content="%7B%22baseURL%22%3A%22/other-app/%22%7D"></head><body><h1>other-app</h1><h2>e696248</h2></body></html>',
        'other-app:8fabf72':
        '<html><head><base href="/" /><meta name="other-app/config/environment" content="%7B%22baseURL%22%3A%22/other-app/%22%7D"></head><body><h1>other-app</h1><h2>8fabf72</h2></body></html>',
        'other-app:current':
        '<html><head><base href="/" /><meta name="other-app/config/environment" content="%7B%22baseURL%22%3A%22/other-app/%22%7D"></head><body><h1>other-app</h1><h2>8fabf72</h2></body></html>'
    }

    def setUp(self):
        self.client = Client()
        self.redis = StrictRedis()

        for key, value in self.indices.items():
            self.redis.set(key, value)

    def tearDown(self):
        self.redis.delete(*self.indices.keys())
예제 #5
0
파일: redis.py 프로젝트: baverman/cachel
class RedisCache(object):
    def __init__(self, url=None, client=None):
        if url:  # pragma: no cover
            self.client = StrictRedis.from_url(url)
        elif client:  # pragma: no cover
            self.client = client
        else:
            self.client = StrictRedis()

    def get(self, key):
        return self.client.get(key)

    def mget(self, keys):
        return self.client.mget(keys)

    def delete(self, key):
        self.client.delete(key)

    def mdelete(self, keys):
        self.client.delete(*keys)

    def set(self, key, value, ttl):
        self.client.set(key, value, ttl)

    def mset(self, items, ttl):
        p = self.client.pipeline(transaction=False)
        for key, value in items:
            p.set(key, value, ttl)
        p.execute()
예제 #6
0
class RedisQueue():
    #初始化
    def __init__(self):
        self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)

    #向队列中添加序列化后的Request
    def add(self, request):
        #序列化请求
        if isinstance(request, WeixinRequest):
            return self.db.rpush(REDIS_KEY, dumps(request))
        return False

    #取出下一个Request并反序列化
    def pop(self):
        #反序列化
        if self.db.llen(REDIS_KEY):
            return loads(self.db.lpop(REDIS_KEY))
        else:
            return False

    def clear(self):
        self.db.delete(REDIS_KEY)

    def empty(self):
        return self.db.llen(REDIS_KEY) == 0
예제 #7
0
파일: tokens.py 프로젝트: fterdalpdx/finti
	def post_updates(self, updates, log_index):
		'''
			Update the cache with CRUD changes
		'''
		cache = StrictRedis(db=config.tokens_cache_redis_db)
		
		self.log.info('post_updates(): posting updates to local storage')
		for update in updates:			# TODO: could re-add the Redis "Pipelines" feature to combine Redis requests for better performance when available
			(user, token, date, action) = update
			if action == 'add':
				cache.hset('general', token, user)	# future method - user-by-token -- really just existence of a token
				cache.hset('users', user, token)	# future-method - token-by-user: allow lookup of previous token on token changes
				cache.set(token, user)	# Current method
				self.log.info('post_updates(): added token for user: '******'delete':
				cache.hdel('general', token)	# future method - disables the ability to authenticate
				cache.hdel('users', user)	# future method - removes history of token
				cache.delete(token)
				self.log.info('post_updates(): deleted token for user: '******'update':
				prev_token = cache.hget('users', user)
				cache.hdel('general', prev_token)	# future method - disables the ability to authenticate with previous token
				cache.hset('general', token, user)		# future method - set the new token for the user
				cache.hset('users', user, token)		# future method - set the user as possessing the new token
				cache.set(token, user)
				self.log.info('post_updates(): updated token for user: '******'post_updates(): unexpected change type: ' + action)

		if len(updates) > 0:	# don't set if there is nothing to do and also don't set if there are errors
			cache.set('log_index', log_index)
예제 #8
0
파일: redis.py 프로젝트: bvujnovac/breakbot
class RedisStorage(MachineBaseStorage):
    def __init__(self, settings):
        super().__init__(settings)
        self._key_prefix = settings.get('REDIS_KEY_PREFIX', 'SM')
        redis_config = gen_config_dict(settings)
        self._redis = StrictRedis(**redis_config)

    def _prefix(self, key):
        return "{}:{}".format(self._key_prefix, key)

    def has(self, key):
        return self._redis.exists(self._prefix(key))

    def get(self, key):
        return self._redis.get(self._prefix(key))

    def set(self, key, value, expires=None):
        self._redis.set(self._prefix(key), value, expires)

    def delete(self, key):
        self._redis.delete(self._prefix(key))

    def size(self):
        info = self._redis.info('memory')
        return info['used_memory']
예제 #9
0
class RedisSessionInterface(SessionInterface):
    def __init__(self, prefix="session:"):
        self.redis = StrictRedis(host=config.REDIS_HOST,
                                 port=config.REDIS_PORT,
                                 db=config.REDIS_DB)

        self.prefix = prefix

    def _generate_session_id(self):
        return str(uuid4())

    def open_session(self, app, request):
        """ Flask requires implementaion of open_session() and save_session()
            when creating custom session interface.
        """
        session_id = request.cookies.get(app.session_cookie_name)
        if not session_id:
            session_id = self._generate_session_id()

            return RedisSession(session_id=session_id, is_new=True)

        serialized_data = self.redis.get(self.prefix + session_id)
        if serialized_data is not None:
            data = pickle.loads(serialized_data)

            return RedisSession(data, session_id=session_id)

        return RedisSession(session_id=session_id, is_new=True)

    def _get_expiration_time_redis(self, app, session):
        if session.permanent:
            return app.permanent_session_lifetime

        return timedelta(days=1)

    def save_session(self, app, session, response):
        domain = self.get_cookie_domain(app)
        """ When session does not contain any meaningful information.
            (i.e. dict(session) -> {})
        """
        if not session:
            self.redis.delete(self.prefix + session.session_id)
            if session.modified:
                response.delete_cookie(app.session_cookie_name, domain=domain)

            return

        expiration_time_redis = self._get_expiration_time_redis(app, session)
        expiration_time_cookie = self.get_expiration_time(app, session)
        serialized_data = pickle.dumps(dict(session))

        self.redis.setex(self.prefix + session.session_id,
                         int(expiration_time_redis.total_seconds()),
                         serialized_data)

        response.set_cookie(app.session_cookie_name,
                            session.session_id,
                            expires=expiration_time_cookie,
                            httponly=True,
                            domain=domain)
예제 #10
0
    def test_group_chord_group_chain(self, manager):
        from celery.five import bytes_if_py2

        if not manager.app.conf.result_backend.startswith('redis'):
            raise pytest.skip('Requires redis result backend.')
        redis_connection = StrictRedis()
        redis_connection.delete('redis-echo')
        before = group(redis_echo.si('before {}'.format(i)) for i in range(3))
        connect = redis_echo.si('connect')
        after = group(redis_echo.si('after {}'.format(i)) for i in range(2))

        result = (before | connect | after).delay()
        result.get(timeout=TIMEOUT)
        redis_messages = list(map(
            bytes_if_py2,
            redis_connection.lrange('redis-echo', 0, -1)
        ))
        before_items = \
            set(map(bytes_if_py2, (b'before 0', b'before 1', b'before 2')))
        after_items = set(map(bytes_if_py2, (b'after 0', b'after 1')))

        assert set(redis_messages[:3]) == before_items
        assert redis_messages[3] == b'connect'
        assert set(redis_messages[4:]) == after_items
        redis_connection.delete('redis-echo')
예제 #11
0
class RedisJobStore(JobStore):
    def __init__(self, db=0, key_prefix="jobs.", pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        self.jobs = []
        self.pickle_protocol = pickle_protocol
        self.key_prefix = key_prefix

        if db is None:
            raise ValueError('The "db" parameter must not be empty')
        if not key_prefix:
            raise ValueError('The "key_prefix" parameter must not be empty')

        self.redis = StrictRedis(db=db, **connect_args)

    def add_job(self, job):
        job.id = str(uuid4())
        job_state = job.__getstate__()
        job_dict = {
            "job_state": pickle.dumps(job_state, self.pickle_protocol),
            "runs": "0",
            "next_run_time": job_state.pop("next_run_time").isoformat(),
        }
        self.redis.hmset(self.key_prefix + job.id, job_dict)
        self.jobs.append(job)

    def remove_job(self, job):
        self.redis.delete(self.key_prefix + job.id)
        self.jobs.remove(job)

    def load_jobs(self):
        jobs = []
        keys = self.redis.keys(self.key_prefix + "*")
        pipeline = self.redis.pipeline()
        for key in keys:
            pipeline.hgetall(key)
        results = pipeline.execute()

        for job_dict in results:
            job_state = {}
            try:
                job = Job.__new__(Job)
                job_state = pickle.loads(job_dict["job_state".encode()])
                job_state["runs"] = long(job_dict["runs".encode()])
                dateval = job_dict["next_run_time".encode()].decode()
                job_state["next_run_time"] = datetime.strptime(dateval, "%Y-%m-%dT%H:%M:%S")
                job.__setstate__(job_state)
                jobs.append(job)
            except Exception:
                job_name = job_state.get("name", "(unknown)")
                logger.exception('Unable to restore job "%s"', job_name)
        self.jobs = jobs

    def update_job(self, job):
        attrs = {"next_run_time": job.next_run_time.isoformat(), "runs": job.runs}
        self.redis.hmset(self.key_prefix + job.id, attrs)

    def close(self):
        self.redis.connection_pool.disconnect()

    def __repr__(self):
        return "<%s>" % self.__class__.__name__
예제 #12
0
def gdelete(request):
    groupname = request.POST.get('name', 0)
    rd = StrictRedis()
    pref = settings.MY_PREFIX
    prefg = pref + ":" + groupname
    user = str(request.user)
    print "received request for deleting", groupname, "from", user
    ismember = rd.sismember(pref + ":groups", groupname)
    if not ismember:
        return JsonResponse({
            'done': False,
            'reason': 'No such group name'
        })
    # now check whether the requesting user is the one who created the group
    d = rd.hgetall(prefg + ":hash")
    if d['owner'] != user:
        return JsonResponse({
            'done': False,
            'reason': 'Only group owner can delete the group'
        })
    rd.srem(pref + ":groups", groupname)
    rd.delete(prefg + ":hash", pref + ":" + groupname)
    rd.delete(pref + ":" + groupname)
    redis_publisher = RedisPublisher(facility=pref, broadcast=True)
    redis_publisher.publish_message(
        RedisMessage(json.dumps({
            "type": "group_delete",
            "name": groupname
        })))
    return JsonResponse({'done': True})
예제 #13
0
def deploy_async(t_id: int, user: User, self_vars: dict):
    task = Task.objects.get(id=t_id)
    sls = task.project.sls.replace(settings.SALT_STATE_DIRECTORY, '')
    hosts = get_all_hosts(task)
    values = {
        **{
            value.configure.name: value.value
            for value in Vars.objects.filter(inventory=task.inventory)
        },
        **self_vars
    }
    pipe = StrictRedis(host=settings.REDIS_HOST_SERVER,
                       port=settings.REDIS_HOST_PORT,
                       db=settings.REDIS_DB).pipeline()
    for hostname in hosts:
        pipe.delete(hostname)
        pipe.hmset(hostname, values)
        responses = pipe.execute()
        print("sync values to redis for hostname[%s], result: %s" %
              (hostname, str(responses)))
    print("project %s deployed on hosts; %s" % (task.project.name, hosts))
    p = Pepper().login()
    result = p.local_async(tgt=hosts,
                           fun='state.sls',
                           arg=[sls],
                           tgt_type='list')
    # {'return': [{'jid': '20190301083447106122', 'minions': ['074cda43674f']}]}
    task.status = TASK_STATUS[1][0]
    job_id = result['return'][0]['jid']
    task.barn = result['return'][0]['minions']
    task.occupy = job_id
    task.operator = user
    task.save()
    return job_id
예제 #14
0
class redis_op(object):
    """
        确保单例模式
    """
    def __new__(cls, *arg, **kw):
        if not hasattr(cls, "_instance"):
            cls._instance = super().__new__(cls)
        return cls._instance

    def __init__(self, redis_key):
        self.rc = StrictRedis(
            connection_pool=ConnectionPool(host="127.0.0.1", port=6379, db=0))
        self._lock = 0
        self._lock_key = f"{redis_key}_lock_key"

    def __getattr__(self, name):
        return getattr(self.rc, name)

    def __enter__(self):
        while not self._lock:
            time_out = time.time() + 10 + 1
            self._lock = self.rc.setnx(self._lock_key, time_out)
            if self._lock or (time.time() > float(self.rc.get(self._lock_key))
                              and time.time() > float(
                                  self.rc.getset(self._lock_key, time_out))):
                return self
            else:
                time.sleep(0.3)

    def __exit__(self, exc_type, exc_val, exc_tb):
        if time.time() < float(self.rc.get(self._lock_key)):
            self.rc.delete(self._lock_key)
예제 #15
0
파일: api.py 프로젝트: sleeka/Textman
    def get(self):
        args = get_parser.parse_args()
        # We've passed parameter validation.
        redis_conn = StrictRedis(connection_pool=pool)

        if not redis_conn.exists(args.username):
            return 'Username not found', 404  # NOT FOUND

        # Get all id's linked to username
        username_ids = redis_conn.lrange(args.username, 0, -1)

        # Delete username key
        redis_conn.delete(args.username)

        # Response from redis (messages)
        db_response = redis_conn.mget(*username_ids)
        # print(f'HERER {db_response}', file=sys.stderr)

        if not db_response:
            # No valid text messages for user args.username
            return 'No text messages', 404  # NOT FOUND
        # Create a JSON array
        api_response = [{
            'id': int(_id),
            'text': text.decode('utf-8')
        } for _id, text in zip(username_ids, db_response)]
        # I want to try searching and removing expired entries every 5min
        # print(f'HERER {api_response}', file=sys.stderr)
        return api_response, 200
예제 #16
0
def init_redis_for_testing_cruncher(game_file):
    buffer = Buffer(**REDIS_PARAM)
    buffer.delete(TO_CRUNCHER)
    buffer.delete(TO_USERCRUNCHER)
    with open(game_file, "r") as f:
        for game_id in f.readlines():
            buffer.lpush(TO_CRUNCHER, game_id.strip())
예제 #17
0
class Cache():
    def __init__(self):
        self.redis = StrictRedis(settings.REDIS['host'],
                                 settings.REDIS['port'], settings.REDIS['db'])

    def set(self, key, data, time=None):
        _data = pickle.dumps(data)
        if time:
            self.redis.setex(key, time, _data)
        else:
            self.redis.set(key, _data)

    def get(self, key, delete=False):
        value = self.redis.get(key)
        if delete:
            self.delete(key)
        try:
            return pickle.loads(value)
        except:
            return value

    def delete(self, key):
        self.redis.delete(key)

    def get_token(self, data):
        token = gen_token()
        self.set(token, data)
        return token

    def get_pin(self, data):
        pin = gen_pin()
        self.set(pin, data)
        return pin
예제 #18
0
def test_big_chunk():
    rclient = StrictRedis()
    rclient.delete('boo')
    cmd = rw.compile('SET', bytes, bytes)
    chunk = [cmd(b'boo', b'boo') for _ in range(10000)]

    rw.execute(chunk, client=rclient)
예제 #19
0
def zset_score_min_max_example():
    """ Example sorted set with min/max score pagination.
    """
    from uuid import uuid4
    from redis import StrictRedis
    from zato.redis_paginator import ZSetPaginator

    conn = StrictRedis()
    key = 'paginator:{}'.format(uuid4().hex)

    # 97-114 is 'a' to 'r' in ASCII
    for x in range(1, 18):
        conn.zadd(key, x, chr(96 + x))

    p = ZSetPaginator(conn, key, 2, score_min=5, score_max=13)

    print(p.count)  # 9
    print(p.num_pages)  # 5
    print(p.page_range)  # [1, 2, 3, 4, 5]

    page = p.page(3)
    print(page)  # <Page 3 of 5>
    print(page.object_list)  # ['i', 'j']

    conn.delete(key)
    def test_group_chord_group_chain(self, manager):
        from celery.five import bytes_if_py2

        if not manager.app.conf.result_backend.startswith('redis'):
            raise pytest.skip('Requires redis result backend.')
        redis_connection = StrictRedis()
        redis_connection.delete('redis-echo')
        before = group(redis_echo.si('before {}'.format(i)) for i in range(3))
        connect = redis_echo.si('connect')
        after = group(redis_echo.si('after {}'.format(i)) for i in range(2))

        result = (before | connect | after).delay()
        result.get(timeout=TIMEOUT)
        redis_messages = list(map(
            bytes_if_py2,
            redis_connection.lrange('redis-echo', 0, -1)
        ))
        before_items = \
            set(map(bytes_if_py2, (b'before 0', b'before 1', b'before 2')))
        after_items = set(map(bytes_if_py2, (b'after 0', b'after 1')))

        assert set(redis_messages[:3]) == before_items
        assert redis_messages[3] == b'connect'
        assert set(redis_messages[4:]) == after_items
        redis_connection.delete('redis-echo')
예제 #21
0
class Redis_DB(object):
    def __init__(self, host='localhost', port=6666):
        self.host = host
        self.port = port
        self.redis = StrictRedis(host=self.host,
                                 port=self.port,
                                 db=0,
                                 password='')

    def write_data(self, key, value):
        self.redis.set(key, value)

    def get_data(self, key):
        value = self.redis.get(key)
        return value

    def get_all_data(self):
        all_keys = []
        if self.redis.keys():
            for i in self.redis.keys():
                key = i.decode('ascii')
                value = self.get_data(i).decode('ascii')
                all_keys.append({key: value})
        else:
            all_keys = None
        print('find total items:\n{}'.format(all_keys))
        return all_keys

    def delete_data(self, key):
        self.redis.delete(key)
        print('delete the key: {}'.format(key))
예제 #22
0
def test_send_event(redis_client: redis.StrictRedis):

    logger.remove()
    logger.add(
        sink,
        level=logging.INFO,
        filter=lambda record: "event" in record["extra"],
    )

    bind = {
        "event": "test_event",
        "kwargs": {
            "arg1": 1,
            "arg2": [2],
            "arg3": "你好",
        },
    }
    redis_client.delete(KEY)
    logger.bind(**bind).info("logger info")
    assert redis_client.llen(KEY) == 1
    e = redis_client.lpop(KEY)
    data = msgpack.loads(e, raw=False)
    for key, value in bind.items():
        assert data[key] == value
    assert data["@metadata"] == {"beat": "py_logging", "version": "dev"}
    assert data["msg"] == "logger info"
예제 #23
0
def zset_score_min_max_example():
    """ Example sorted set with min/max score pagination.
    """
    from uuid import uuid4
    from redis import StrictRedis
    from zato.redis_paginator import ZSetPaginator
    
    conn = StrictRedis()
    key = 'paginator:{}'.format(uuid4().hex)
    
    # 97-114 is 'a' to 'r' in ASCII
    for x in range(1, 18):
        conn.zadd(key, x, chr(96 + x))
        
    p = ZSetPaginator(conn, key, 2, score_min=5, score_max=13)
    
    print(p.count)      # 9
    print(p.num_pages)  # 5
    print(p.page_range) # [1, 2, 3, 4, 5]
    
    page = p.page(3)
    print(page)             # <Page 3 of 5>
    print(page.object_list) # ['i', 'j']
        
    conn.delete(key)
예제 #24
0
파일: user.py 프로젝트: timetraq/tt-server
    def __admin_handler(self, endpoint: bytes):
        """
        Handle Admin Request

        :param bytes endpoint: Endpoint (in bytes!)
        :return: jsonified answer data
        """
        json_data = request.get_json()
        if json_data is None:
            raise BadRequest()
        if 'admin_token' not in json_data:
            raise BadRequest()
        admin_token = json_data['admin_token']
        if not isinstance(admin_token, str):
            raise BadRequest()
        if not RULE_TOKEN.match(admin_token):
            raise BadRequest()
        redis = StrictRedis(connection_pool=self.api_pool)
        ep_key = 'ADMIN_TOKEN:{:s}'.format(admin_token)
        should_endpoint = redis.get(ep_key)
        if should_endpoint is None:
            raise BadRequest()
        redis.delete(ep_key)
        if should_endpoint != endpoint:
            raise BadRequest()
        if 'data' not in json_data:
            raise BadRequest()
        data = json_data['data']
        if not isinstance(data, dict):
            raise BadRequest()
        return jsonify(self.queue_dispatcher({
            '_': 'admin:{:s}'.format(endpoint.decode('utf-8')),
            'data': data,
        }))
예제 #25
0
def clearRedis(keys):
    pool = ConnectionPool(host='localhost', port='6379', db=0)
    redis = StrictRedis(connection_pool=pool)
    for key in keys:
        if redis.exists(key):
            redis.delete(key)
    pool.disconnect()
예제 #26
0
class RedisPipelinedClient:
    def __init__(self, host='127.0.0.1', port=6379, max_async=2):
        self.r = StrictRedis(host=host, port=port,
                             db=0).pipeline(transaction=False)
        self.max_async = max_async
        self.count = 0

    def _pipe_check(self):
        self.count += 1
        if self.count % self.max_async == 0:
            self.r.execute()

    def get(self, key):
        self.r.get(key)
        self._pipe_check()

    def put(self, key, value):
        self.r.set(key, value, nx=True)
        self._pipe_check()

    def update(self, key, value):
        self.r.set(key, value, xx=True)
        self._pipe_check()

    def remove(self, key):
        self.r.delete(key)
        self._pipe_check()
예제 #27
0
class RedisDB(DBSpec):
    def __init__(self, port=12000, prefix=None, sync_epoch=False):
        self._server = StrictRedis(port=port)
        self._prefix = "" if prefix is None else prefix

        self._index = 0
        self._epoch = 0
        self._sync_epoch = sync_epoch

    @property
    def num_trajectories(self) -> int:
        num_trajectories = self._server.llen("trajectories") - 1
        return num_trajectories

    def set_sample_flag(self, sample: bool):
        self._server.set("sample_flag", int(sample))

    def get_sample_flag(self) -> bool:
        flag = int(self._server.get("sample_flag") or -1) == int(1)
        return flag

    def push_trajectory(self, trajectory):
        trajectory = {"trajectory": trajectory, "epoch": self._epoch}
        trajectory = pack(trajectory)
        self._server.rpush("trajectories", trajectory)

    def get_trajectory(self, index=None):
        index = index if index is not None else self._index
        trajectory = self._server.lindex("trajectories", index)
        if trajectory is not None:
            self._index = index + 1

            trajectory = unpack(trajectory)
            trajectory, trajectory_epoch = \
                trajectory["trajectory"], trajectory["epoch"]
            if self._sync_epoch and self._epoch != trajectory_epoch:
                trajectory = None

        return trajectory

    def clean_trajectories(self):
        self._server.delete("trajectories")
        self._index = 0

    def dump_weights(self, weights, prefix, epoch):
        self._epoch = epoch
        weights = {"weights": weights, "epoch": self._epoch}
        weights = pack(weights)
        self._server.set(f"{self._prefix}_{prefix}_weights", weights)

    def load_weights(self, prefix):
        weights = self._server.get(f"{self._prefix}_{prefix}_weights")
        if weights is None:
            return None
        weights = unpack(weights)
        self._epoch = weights.get("epoch")
        return weights["weights"]

    def clean_weights(self, prefix):
        self._server.delete(f"{self._prefix}_{prefix}_weights")
예제 #28
0
class RedisManager(NoSqlManager):
    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        self.db = params.pop('db', None)
        self.connection_pools = {}
        NoSqlManager.__init__(self,
                              namespace,
                              url=url,
                              data_dir=data_dir,
                              lock_dir=lock_dir,
                              **params)

    def open_connection(self, host, port, **params):
        pool_key = self._format_pool_key(host, port, self.db)
        if pool_key not in self.connection_pools:
            self.connection_pools[pool_key] = ConnectionPool(host=host,
                                                             port=port,
                                                             db=self.db)
        self.db_conn = StrictRedis(connection_pool=self.connection_pools[pool_key],
                                   **params)

    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        #
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]

        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value, 2))
        else:
            self.db_conn.set(key, pickle.dumps(value, 2))

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def _format_pool_key(self, host, port, db):
        return '{0}:{1}:{2}'.format(host, port, self.db)

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #29
0
파일: cache.py 프로젝트: microamp/nanodata
class CacheHelper(object):
    """A thin wrapper on top of Redis."""
    def __init__(self, host="localhost", port=6379, db=0):
        self.host = host
        self.port = port
        self.db = db

    def __enter__(self):
        self.r = StrictRedis(host=self.host, port=self.port, db=self.db)
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        try:
            if any((exc_type, exc_value, traceback,)):
                raise
        finally:
            pass

    def get(self, key):
        return self.r.get(key) if key in self.r.keys() else None

    def set(self, key, json):
        self.r.set(key, json)

    def keys(self):
        return self.r.keys()

    def reset(self):
        for key in self.keys():
            self.r.delete(key)
예제 #30
0
파일: tests.py 프로젝트: reedboat/dcron
class JobStatsTest(TestCase):
    def setUp(self):
        self.redis = StrictRedis(**settings.REDISES['default'])
        self.stats = JobStats(self.redis)

    def tearDown(self):
        self.redis.delete('job_stats.1')

    def testIncr(self):
        job_id = 1
        field = 'success'
        count = self.stats.get(job_id, field)
        self.assertEqual(0, count)

        self.stats.incr(job_id, field, 1)
        count = self.stats.get(job_id, field)
        self.assertEqual(1, count)

        self.stats.incr(job_id, field, 2)
        count = self.stats.get(job_id, field)
        self.assertEqual(3, count)

        self.stats.incr(job_id, 'failed')
        self.stats.get(job_id, field)
        self.assertEqual(3, count)

    def testGet(self):
        job_id = 1
        self.stats.incr(job_id, 'failed', 1)
        self.stats.incr(job_id, 'success', 2)
        counts = self.stats.get(job_id)
        self.assertEqual(1, counts['failed'])
        self.assertEqual(2, counts['success'])
예제 #31
0
파일: db.py 프로젝트: Jianfu-She/PySpider
class RedisQueue(object):
    def __init__(self):
        self.db = StrictRedis(host=REDIS_HOST,
                              port=REDIS_PORT,
                              password=REDIS_PASSWORD)

    def add(self, request):
        """
        向队列添加序列化后的Request
        :param request: 请求对象
        :return:
        """
        if isinstance(request, WeixinRequest):
            return self.db.rpush(REDIS_KEY, dumps(request))
        return False

    def pop(self):
        """
        取出下一个Request并反序列化
        :return:
        """
        if self.db.llen(REDIS_KEY):
            return loads(self.db.lpop(REDIS_KEY))
        else:
            return False

    def clear(self):
        self.db.delete(REDIS_KEY)

    def empty(self):
        return self.db.llen(REDIS_KEY) == 0
예제 #32
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.connection_pool = params.pop('connection_pool', None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = StrictRedis(host=host, port=int(port), connection_pool=self.connection_pool, **params)

    def __contains__(self, key):
        log.debug('%s contained in redis cache (as %s) : %s'%(key, self._format_key(key), self.db_conn.exists(self._format_key(key))))
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #XXX: beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it(until version 1.6.3) never sets expiretime param. Why?

        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value))
        else:
            self.db_conn.set(key, pickle.dumps(value))

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #33
0
def zset_example():
    """ Example sorted set pagination.
    """
    from uuid import uuid4
    from redis import StrictRedis
    from zato.redis_paginator import ZSetPaginator

    conn = StrictRedis()
    key = 'paginator:{}'.format(uuid4().hex)

    # 97-114 is 'a' to 'r' in ASCII
    for x in range(1, 18):
        conn.zadd(key, x, chr(96 + x))

    p = ZSetPaginator(conn, key, 6)

    print(p.count)  # 17
    print(p.num_pages)  # 3
    print(p.page_range)  # [1, 2, 3]

    page = p.page(3)
    print(page)  # <Page 3 of 3>
    print(page.object_list)  # ['m', 'n', 'o', 'p', 'q']

    conn.delete(key)
예제 #34
0
def main():
    # 创建一个StrictRedis对象,连接redis
    try:
        strict_redis = StrictRedis(host='114.67.89.253', port=6379, db=9, password="******")
        # String 类型;设置key value
        # set: 如果不存在key,则设置,如果存在,则修改
        result = strict_redis.set(name="name", value="tom")
        print(result)
        # 获取值
        name = strict_redis.get("name")
        print("name: %s" % name)
        # 删除key以及对应的值
        result = strict_redis.delete("name")
        print("delete: %d" % result)
        # 删除多个key value
        strict_redis.set(name="name", value="tom")
        strict_redis.set(name="name1", value="tom1")
        result = strict_redis.delete("name", "name1")
        print("delete: %d" % result)

        # 获取所有的key pattern默认为*
        strict_redis.set(name="name", value="tom")
        strict_redis.set(name="name1", value="tom1")
        result = strict_redis.keys(pattern="*")
        print(result)
        pass
    except Exception as e:
        print(e)
        pass

    pass
예제 #35
0
class RedisHandler:
    def __init__(self):
        self._config = CacheConfig()
        self._redis = StrictRedis(
            host=self._config.host,
            port=self._config.port,
            password=self._config.password,
            db=self._config.db,
        )

    @property
    def redis(self):
        return self._redis

    @trace_service("Redis (save)", open_tracing)
    def save(self, key: str, value: str):
        self._redis.set(key, value)

    @trace_service("Redis (find)", open_tracing)
    def find_by_key(self, key: str) -> Optional[str]:
        try:
            value = self._redis.get(key).decode()
        except:
            value = None
        return value

    @trace_service("Redis (delete)", open_tracing)
    def delete_by_key(self, key: str):
        self._redis.delete(key)
예제 #36
0
def zset_example():
    """ Example sorted set pagination.
    """
    from uuid import uuid4
    from redis import StrictRedis
    from zato.redis_paginator import ZSetPaginator
    
    conn = StrictRedis()
    key = 'paginator:{}'.format(uuid4().hex)
    
    # 97-114 is 'a' to 'r' in ASCII
    for x in range(1, 18):
        conn.zadd(key, x, chr(96 + x))
        
    p = ZSetPaginator(conn, key, 6)
    
    print(p.count)      # 17
    print(p.num_pages)  # 3
    print(p.page_range) # [1, 2, 3]
    
    page = p.page(3)
    print(page)             # <Page 3 of 3>
    print(page.object_list) # ['m', 'n', 'o', 'p', 'q']
        
    conn.delete(key)
예제 #37
0
def check_with_ehall(jasdm: str, day: int, jc: str, zylxdm: str):
    redis = StrictRedis(connection_pool=__Application.redis_pool)
    lock = Lock(redis, "Spider")
    if lock.acquire():
        try:
            save_cookies(), save_time()
            cookies = json.loads(redis.hget("Spider", "cookies"))
            time_info = json.loads(redis.hget("Spider", "time_info"))
            redis.delete("Spider")
            res = requests.post(
                url=
                "http://ehallapp.nnu.edu.cn/jwapp/sys/jsjy/modules/jsjysq/cxyzjskjyqk.do",
                cookies=cookies,
                data={
                    'XNXQDM': time_info['XNXQDM'][0],
                    'ZC': time_info['ZC'],
                    'JASDM': jasdm
                }).json()
            kcb = json.loads(
                res['datas']['cxyzjskjyqk']['rows'][0]['BY1'])[(day + 6) % 7]
            for row in kcb:
                if jc in row['JC'].split(',') and row['ZYLXDM'] in (zylxdm,
                                                                    ''):
                    return True  # 数据一致,待纠错
            return False  # 数据不一致,待更新

        finally:
            lock.release()
예제 #38
0
def main():
    r = StrictRedis(unix_socket_path=get_socket_path('cache'))
    r.delete('cache_loaded')
    website_dir = get_homedir() / 'website'
    ip = get_config('generic', 'website_listen_ip')
    port = get_config('generic', 'website_listen_port')
    try:
        p = Popen([
            'gunicorn', '-w', '10', '--graceful-timeout', '2', '--timeout',
            '300', '-b', f'{ip}:{port}', '--log-level', 'info', 'web:app'
        ],
                  cwd=website_dir)
        set_running('website')
        while True:
            if shutdown_requested() or p.poll() is not None:
                break
            time.sleep(1)
    except KeyboardInterrupt:
        print('Website killed by user.')
    finally:
        print('Shutting down website.')
        try:
            # Killing everything if possible.
            p.send_signal(signal.SIGWINCH)
            p.send_signal(signal.SIGTERM)
        except Exception:
            pass
        unset_running('website')
예제 #39
0
class RedisDataStore(DataStore):
    """Redis-backed datastore object."""

    def __init__(self, number=0):
        redis_host = os.environ.get('REDIS_PORT_6379_TCP_ADDR')
        redis_port = os.environ.get('REDIS_PORT_6379_TCP_PORT')
        self.redis_conn = StrictRedis(host=redis_host, port=redis_port,
                                      db=number)

    def __setitem__(self, k, v):
        self.redis_conn.set(k, v)

    def __getitem__(self, k):
        return self.redis_conn.get(k)

    def __delitem__(self, k):
        self.redis_conn.delete(k)

    def get(self, k):
        return self.redis_conn.get(k)

    def __contains__(self, k):
        return self.redis_conn.exists(k)

    def todict(self):
        #TODO(tvoran): use paginate
        #TODO(tvoran): do something besides multiple gets
        data = {}
        for key in self.redis_conn.keys():
            data[key] = self.get(key)
        return data

    def clear_all(self):
        self.redis_conn.flushdb()
예제 #40
0
 def verify(self, delete_cache=False):
     redis_cli = StrictRedis(connection_pool=redis_pool)
     code = redis_cli.get(name=self.phone) or b''
     if code.decode('utf8') != self.verify_code:
         return False
     if delete_cache:
         redis_cli.delete(self.phone)
     return True
예제 #41
0
파일: rediscli.py 프로젝트: gamdwk/seaflow
def verify_code(email, code):
    cli = StrictRedis(connection_pool=pool)
    e_code = cli.get(email)
    if e_code == code:
        cli.delete(email)
        return True
    else:
        return False
예제 #42
0
def answer(data, module):
    # Extract msg
    msg = receive.parse_xml(data)
    msg_type = msg.MsgType
    to_user = msg.FromUserName
    from_user = msg.ToUserName
    if isinstance(msg, receive.TextMsg):
        msg_content = msg.Content.decode('utf-8')
    elif isinstance(msg, receive.EventMsg):
        msg_content = msg.Event.decode('utf-8')
    else:
        msg_content = 'default'

    # Initialize environment
    global dialog_module
    dialog_module = module
    global hkey
    hkey = dialog_module.REDIS_KEY % {'open_id': to_user}
    global redis_db
    redis_db = StrictRedis(host=dialog_module.REDIS_HOST,
                           port=dialog_module.REDIS_PORT,
                           password=dialog_module.REDIS_PASSWORD,
                           db=0)

    hkey = dialog_module.REDIS_KEY % {'open_id': to_user}
    hist = redis_db.get(hkey)
    # 新会话或者会话超时,创建新会话
    if not hist:
        dialog = _new_dialog(msg_type, msg_content, to_user)
        logger.debug('new_dialog')
    # 存在会话记录,重现上下文
    else:
        logger.debug('replay_dialog')
        try:
            dialog = _replay_dialog(hist, to_user)
        except StopIteration:
            logger.error('会话记录错误..重新创建会话..')
            dialog = _new_dialog(msg_type, msg_content, to_user)
    # 发送消息
    while True:
        try:
            type, msg = _redis_send(hkey, dialog, msg_content)
            break
        except StopIteration as e:
            # 会话已结束,删去redis中的记录
            type, msg = e.value
            redis_db.delete(hkey)
            break
        except UnexpectAnswer:
            # 用户发送了一个不合法的回复时抛出这个异常
            # BOT会认为用户希望开启一段新的会话
            redis_db.delete(hkey)
            dialog = _new_dialog(msg_type, msg_content, to_user)
            continue

    wechat_reply = getattr(reply, type)
    print(wechat_reply(to_user, from_user, msg).format())
    return wechat_reply(to_user, from_user, msg)
예제 #43
0
class RedisManager(NoSqlManager):

    connection_pools = {}

    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.db = params.pop("db", None)
        self.dbpass = params.pop("password", None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        pool_key = self._format_pool_key(host, port, self.db)
        if pool_key not in self.connection_pools:
            self.connection_pools[pool_key] = ConnectionPool(host=host, port=port, db=self.db, password=self.dbpass)
        self.db_conn = StrictRedis(connection_pool=self.connection_pools[pool_key], **params)

    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        #
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]

        if self.serializer == "json":
            serialized_value = json.dumps(value, ensure_ascii=True)
        else:
            serialized_value = pickle.dumps(value, 2)

        if expiretime:
            self.db_conn.setex(key, expiretime, serialized_value)
        else:
            self.db_conn.set(key, serialized_value)

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return "beaker:%s:%s" % (self.namespace, key.replace(" ", "\302\267"))

    def _format_pool_key(self, host, port, db):
        return "{0}:{1}:{2}".format(host, port, self.db)

    def do_remove(self):
        for key in self.keys():
            self.db_conn.delete(key)

    def keys(self):
        return self.db_conn.keys("beaker:%s:*" % self.namespace)
예제 #44
0
def do_deletes():
    """DELETE"""
    db_pipe = StrictRedis().pipeline()

    print 'performing a few deletes...'
    del_keys = (260, 271, 285, 300, 310)
    for key in del_keys:
        db_pipe.delete(key)

    print 'db_pipe.command_stack:\n{}'.format(db_pipe.command_stack)
    db_pipe.execute()
예제 #45
0
파일: views.py 프로젝트: henyhollar/tPi
    def post(self, request):
        key = request.data['course_code']

        r = StrictRedis(host='localhost', port=6379, db=0)
        r.delete(key)

        redis_can_take_course = RedisList(key)
        can_take_course = set(request.data['can_take_course'])
        for item in can_take_course:
            redis_can_take_course.append(item.upper())

        return Response({'success': 'can_take_course'})
예제 #46
0
파일: views.py 프로젝트: henyhollar/tPi
    def get(self, request):
        r = StrictRedis(host='localhost', port=6379, db=0)
        r.delete(r.keys('active_class:*')[0])

        try:
            staff = User.objects.get(is_staff=True)
            Token.objects.filter(~Q(user=staff)).delete()
        except MultipleObjectsReturned:
            Token.objects.all().delete()
    # warn the admin of multiple staff entry possibly due to breach of password

        return Response('Class ends successfully')
예제 #47
0
파일: storage.py 프로젝트: calinf/pushy
class RedisStorage(object):
    def __init__(self, host, port):
        self.connection = StrictRedis(host=host, port=port)

    def get(self, item_id):
        return self.connection.get(item_id)

    def set(self, item_id, item_value):
        self.connection.rpush(item_id, item_value)

    def rm(self, item_id):
        self.connection.delete(item_id)
예제 #48
0
class RedisDict(MutableMapping):
    """ RedisDict - a dictionary-like interface for ``redis`` key-stores
    """
    def __init__(self, namespace, collection_name='redis_dict_data',
                 connection=None):
        """
        The actual key name on the redis server will be
        ``namespace``:``collection_name``

        In order to deal with how redis stores data/keys,
        everything, i.e. keys and data, must be pickled.

        :param namespace: namespace to use
        :param collection_name: name of the hash map stored in redis
                                (default: redis_dict_data)
        :param connection: ``redis.StrictRedis`` instance.
                           If it's ``None`` (default), a new connection with
                           default options will be created

        """
        if connection is not None:
            self.connection = connection
        else:
            self.connection = Redis()
        self._self_key = ':'.join([namespace, collection_name])

    def __getitem__(self, key):
        result = self.connection.hget(self._self_key, pickle.dumps(key))
        if result is None:
            raise KeyError
        return pickle.loads(bytes(result))

    def __setitem__(self, key, item):
        self.connection.hset(self._self_key, pickle.dumps(key),
                             pickle.dumps(item))

    def __delitem__(self, key):
        if not self.connection.hdel(self._self_key, pickle.dumps(key)):
            raise KeyError

    def __len__(self):
        return self.connection.hlen(self._self_key)

    def __iter__(self):
        for v in self.connection.hkeys(self._self_key):
            yield pickle.loads(bytes(v))

    def clear(self):
        self.connection.delete(self._self_key)

    def __str__(self):
        return str(dict(self.items()))
예제 #49
0
class TestRedisHashDict(TestCase):
    def setUp(self):
        self.redis = StrictRedis()
        self.redis.delete('foobar')
        self.dict = RedisHashDict(self.redis, 'foobar')

    def test_items(self):
        self.dict[b'foo'] = b'bar'
        self.dict[b'bob'] = b'alice'

        truth = (b'foo', b'bar'), (b'bob', b'alice')

        for item in self.dict.items():
            self.assertIn(item, truth)
예제 #50
0
    def test_Redis(self):
        host = "localhost"
        r = StrictRedis(host, socket_timeout=60)
        msg = "Hello world"

        r.delete("channel")
        r.rpush("channel", msg)
        m = r.lpop("channel")
        print m
        assert(m == msg)

        r.rpush("channel", [1, 2, 3])
        m = ast.literal_eval(r.lpop("channel"))
        print m, type(m)
예제 #51
0
파일: test_rmodels.py 프로젝트: 0-T-0/copr
class TestRModels(object):

    def setup_method(self, method):
        self.rc = StrictRedis()
        self.disabled = False
        try:
            self.rc.ping()
        except ConnectionError:
            self.disabled = True
        self.prefix = "copr:test:r_models"

        self.time_now = time.time()

    def teardown_method(self, method):
        if self.disabled:
            return

        keys = self.rc.keys('{}*'.format(self.prefix))
        if keys:
            self.rc.delete(*keys)

    def test_timed_stats_events(self):
        if self.disabled:
            return

        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now, )

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 1
        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now, count=2)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 3

        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now - 1000000, count=2)
        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now - 3000000, count=3)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 3
        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 2000000) == 5
        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 5000000) == 8

        TimedStatEvents.trim_before(self.rc, name="foobar",
                                    prefix=self.prefix, threshold_timestamp=self.time_now - 200000)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 5000000) == 3
예제 #52
0
class TaskCache(ITaskCache):
    """
    `self.user_store': {'user_id': <list of task_ids>}
    `self.task_store`: {'task_id`: <AsyncResult>}
    `self.progress_store`: {`task_id`: `progress_value(int)`}
    """
    def __init__(self, user_kwargs, task_kwargs ,progress_kwargs):
        self.user_store = StrictRedis(**user_kwargs)
        self.task_store = StrictRedis(**task_kwargs)
        self.progress_store = LingvodocRedisClient(**progress_kwargs)

    def get(self, user, remove_finished=False):
        result = dict()
        tasks = self.user_store.get(user.id)
        if tasks is None:
            return {}
        tasks = pickle.loads(tasks)
        remained_tasks = list()
        for t in tasks:
            val = self.task_store.get(t)
            if val is None:
                continue
            async_result = pickle.loads(val)
            progress = self.progress_store.get(t)
            # Redis client returns byte array. We need to decode it
            if progress is not None:
                progress = int(progress.decode())
            result[t] = {'finished': async_result.ready(),
                         'progress': progress}
            if remove_finished:
                if async_result.ready():
                    self.task_store.delete(t)
                else:
                    remained_tasks.append(t)
        if remove_finished:
            self.user_store.set(user.id, pickle.dumps(remained_tasks))
        return result


    # TODO: add try/catch handlers.
    # we should remove the task from caches (and queue?) if exception is raised
    def set(self, user, task_key, async_task):
        self.task_store.set(task_key, pickle.dumps(async_task))
        cached = self.user_store.get(user.id)
        if cached is None:
            tmp_tasks = [task_key]
        else:
            tmp_tasks = pickle.loads(cached)
            tmp_tasks.append(task_key)
        self.user_store.set(user.id, pickle.dumps(tmp_tasks))
예제 #53
0
class Repository:

    def __init__(self, host, port):
        self.r = StrictRedis(host=host, port=port)

# store dictionary
#    redis.hmset(hkey, dict_to_store)
# retrieve dictionary
#    redis.hmget(hkey)
# lists
#    redis.lappend(hkey, string)
#    redis.llen

    def get_item(self, key_store, index):
        length = self.r.llen(key_store)
        if length == 0:
            return None
        if index >= length:
            raise Exception('Index out of range.')
        item_json = self.r.lindex(key_store, length - (index + 1))
        item_dict = json.loads(item_json)
        return item_dict

    def append_item(self, key_store, item):
        item_json = json.dumps(item)
        if not self.r.lpush(key_store, item_json):
            raise Exception('Unable to write key_store: [%s]' % item_json)

    def fetch_all(self, key_store):
        q = []
        length = self.r.llen(key_store)
        for i in range(length):
            item_json = self.r.lindex(key_store, length - (i + 1))
            item_dict = json.loads(item_json)
            q.append(item_dict)
        return q

    def set(self, key_store, list):
        self.r.delete(key_store)
        for item_dict in list:
            item_json = json.dumps(item_dict)
            if not self.r.lpush(key_store, item_json):
                raise Exception('Unable to write key_store: [%s]' % item_json)

    def delete(self, key_store):
        self.r.delete(key_store)

    def info(self):
        return self.r.info()
예제 #54
0
def delete(obj, key=None):
    """
    Delete a single key if specified, or all namespace if key is none
    :param obj: settings object
    :param key: key to delete from store location
    :return: None
    """
    client = StrictRedis(**obj.REDIS_FOR_DYNACONF)
    holder = "DYNACONF_%s" % obj.DYNACONF_NAMESPACE
    if key:
        client.hdel(holder.upper(), key.upper())
        obj.unset(key)
    else:
        keys = client.hkeys(holder.upper())
        client.delete(holder.upper())
        obj.unset_all(keys)
예제 #55
0
class VisualDebugger:

    def __init__(self):
        self.available_buckets = config["visual_debugger"]["available_buckets"]
        self.bucket_generator = itertools.cycle(self.available_buckets)

        self.redis_client = StrictRedis(**config["redis"])
        self.clear_image_data()

    def store_image_data(self, image_data, image_shape, bucket="debug"):
        self.redis_client.lpush(f"{config['visual_debugger']['redis_key_prefix']}:{bucket}:SHAPE", pickle.dumps(image_shape))
        self.redis_client.lpush(f"{config['visual_debugger']['redis_key_prefix']}:{bucket}", image_data.tobytes())

    def retrieve_image_data(self):
        bucket = next(self.bucket_generator)
        bucket_key = f"{config['visual_debugger']['redis_key_prefix']}:{bucket}"

        response = self.redis_client.rpop(bucket_key)

        if response is not None:
            bucket = bucket_key.split(":")[-1]

            image_shape = self.redis_client.rpop(f"{config['visual_debugger']['redis_key_prefix']}:{bucket}:SHAPE")
            image_shape = pickle.loads(image_shape)

            image_data = np.fromstring(response, dtype="uint8").reshape(image_shape)

            return bucket, image_data

        return None

    def save_image_data(self, bucket, image_data):
        if bucket in self.available_buckets:
            if image_data.dtype == "bool" or (image_data.dtype == "uint8" and 1 in np.unique(image_data)):
                image_data = image_data.astype("uint8") * 255

            skimage.io.imsave(f"{bucket}.png", image_data)

    def clear_image_data(self):
        visual_debugger_keys = self.redis_client.keys(f"{config['visual_debugger']['redis_key_prefix']}*")

        for key in visual_debugger_keys:
            self.redis_client.delete(key.decode("utf-8"))

    def get_bucket_queue_length(self, bucket):
        return self.redis_client.llen(f"{config['visual_debugger']['redis_key_prefix']}:{bucket}")
예제 #56
0
파일: cache.py 프로젝트: Esiravegna/domus
class RedisCache(object):

    def __init__(self, params={}):
        self._validate(params)

        if not self.server:
            raise Exception('Redis Server Not Defined')

        try:
            log.debug('Connecting to redis at [%s]?[%s]' % (self.server, self.database))
            self.cache = StrictRedis(self.server, port=self.port, db=self.database)
        except ConnectionError as ex:
            raise Exception("Unable to connect to Redis", ex)

    def get(self, key):
        """
        Fetch a given key from the cache. If the key does not exist, return
        default, which itself defaults to None.
        """
        ckey = self._create_key(key)
        log.debug("Getting the cache key [%s]" % ckey)
        return self.cache.get(ckey)

    def ping(self):
        """
        This command is often used to test if the cache is still alive, or to measure latency.
        """
        log.debug("Ping to the cache")
        return self.cache.ping()

    def store(self, key, value, expires=None):
        """
        Set a value in the cache. If timeout is given, that timeout will be
        used for the key; otherwise the default cache timeout will be used.
        """
        ckey = self._create_key(key)
        log.debug("Storing the cache key [%s]" % ckey)
        return self.cache.set(ckey, value, ex=expires)

    def delete(self, key):
        """
        Delete a key from the cache, failing silently.
        """
        ckey = self._create_key(key)
        log.debug("Deleting the cache key [%s]" % ckey)
        return self.cache.delete(ckey)

    def _validate(self, params):
        """
        Initialize all the needed parameters
        """
        self.server = params.get('server', 'localhost')
        self.port = params.get('port', 6379)
        self.database = params.get('database', 2)
        self.key_prefix = params.get('key_prefix', 'domus')

    def _create_key(self, key):
        return "%s.%s" % (self.key_prefix, key)
예제 #57
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.expiretime = params.pop('expiretime', None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = StrictRedis(host=host, port=int(port), **params)

    def __getitem__(self, key):
        return pickle.loads(self.db_conn.hget(self._format_key(key), 'data'))

    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        #
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]

        self.db_conn.hset(key, 'data', pickle.dumps(value))
        self.db_conn.hset(key, 'accessed', datetime.now())
        self.db_conn.hsetnx(key, 'created', datetime.now())

        if expiretime or self.expiretime:
            self.db_conn.expire(key, expiretime or self.expiretime)

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flushdb()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
예제 #58
0
파일: redismq.py 프로젝트: ezbake/redisMQ
class RedisConsumer(object):
    def __init__(self, timeout, group_id, hostname = 'localhost', port = 6379):
        self.group_id = group_id
        self.timeout = timeout
        log.debug("Initializing RedisConsumer with hostname of %s and port %s" % (hostname, port))
        self.r = StrictRedis(host = hostname, port = port)

    def poll(self, topic):
        result = None
        current_index_key = get_next_index_for_topic_key(topic)
        end_millis = time.time() * 1000 + self.timeout
        log.debug("Polling topic %s" % topic)

        while time.time() * 1000 < end_millis:
            if self.r.exists(current_index_key):
                current_index = long(self.r.get(current_index_key))
                consumer_index_key = get_next_index_for_group_id_key(topic, self.group_id)
                pl = self.r.pipeline()

                pl.watch(consumer_index_key)
                consumer_index = 0
                if self.r.exists(consumer_index_key):
                    consumer_index = long(self.r.get(consumer_index_key))

                if current_index > consumer_index:
                    try:
                        pl.multi()
                        pl.incr(consumer_index_key)

                        incr_result = pl.execute()

                        if not incr_result is None and len(incr_result) > 0:
                            consumer_index = long(incr_result[0])
                            key = get_message_key(topic, consumer_index)
                            if self.r.exists(key):
                                result = self.r.get(key)
                                break
                    except WatchError:
                        log.debug("Redis keys changed for topic %s and group %s, trying again" % (topic, self.group_id))
                        pass

        return result

    def unsubscribe_from_topic(self, topic):
        self.r.delete(get_next_index_for_group_id_key(topic, self.group_id))
class SnapshotWriterTest(TestCase):
    def setUp(self):
        self.snapshot_queue = Queue()
        self.db = StrictRedis(host=REDIS['host'], port=REDIS['port'], db=SNAPSHOTS_DB)
        self.writer = SnapshotWriter(self.snapshot_queue)

    def test_should_read_snapshots_from_queue_periodically_and_write_them_to_redis_db(self):
        self.writer.start()

        snapshot_one = {'id': 1, 'other_details': 'Vehicle data', 'time': 1.0}
        snapshot_two = {'id': 2, 'other_details': 'Vehicle data', 'time': 1.0}
        [self.snapshot_queue.put(item) for item in [snapshot_one, snapshot_two]]

        sleep(0.3)
        self.writer.shutdown()

        snapshots_in_db = [self.db.hgetall(key) for key in ['vehicle:1:1.000000', 'vehicle:2:1.000000']]
        expected_snapshots = [self.redisify_snapshot(snapshot) for snapshot in [snapshot_one, snapshot_two]]

        self.assertListEqual(snapshots_in_db, expected_snapshots)

    def test_should_write_snapshot_with_key_a_concatenation_of_vehicle_id_and_time_stamp_to_six_decimal_places(self):
        self.writer.start()

        snapshot = {'id': 1, 'other_details': 'Vehicle data', 'time': 1.123459881213}
        self.snapshot_queue.put(snapshot)

        sleep(0.1)
        self.writer.shutdown()

        self.assertTrue('vehicle:1:1.123460' in self.db.keys("*"))

    def test_should_stop_reading_snapshots_if_shutdown_is_requested(self):
        writer = SnapshotWriter(self.snapshot_queue)
        writer.start()
        writer.shutdown()
        self.assertTrue(True)  # Tests that we get here

    def redisify_snapshot(self, snapshot):
        snapshot.update({'id': str(snapshot['id']), 'time': "%.1f" % snapshot['time']})
        return snapshot

    def tearDown(self):
        for key in self.db.keys("*"):
            self.db.delete(key)