Exemplo n.º 1
0
    def test_fix(self):
        def migrate_one_slot(nodes, _):
            if nodes[0].port == 7100:
                source, target = nodes
            else:
                target, source = nodes
            return [(source, target, 1)]

        comm.create([('127.0.0.1', 7100)])
        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)
        comm.join_cluster('127.0.0.1',
                          7100,
                          '127.0.0.1',
                          7101,
                          balance_plan=migrate_one_slot)

        rc.set('h-893', 'I am in slot 0')
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        t7100 = Connection('127.0.0.1', 7100)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))

        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        t7100.execute('cluster', 'setslot', 0, 'importing', n7101.node_id)

        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7101 = Connection('127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7100.execute('cluster', 'setslot', 0, 'migrating', n7101.node_id)
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        comm.quit_cluster('127.0.0.1', 7101)
        rc.delete('h-893')
        comm.shutdown_cluster('127.0.0.1', 7100)

        t7100.close()
        t7101.close()
Exemplo n.º 2
0
    def test_start_with_max_slots_set(self):
        comm.create([('127.0.0.1', 7100)], max_slots=7000)
        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))
        rc.delete('key')
        comm.shutdown_cluster('127.0.0.1', 7100)

        comm.start_cluster_on_multi(
            [('127.0.0.1', 7100), ('127.0.0.1', 7101)], max_slots=7000)
        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))
        rc.delete('key')
        comm.quit_cluster('127.0.0.1', 7101)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 3
0
def file_write_redis():
    '''
    将源文件中的内容写入redis中
    :return:
    '''
    gaode_file_path = file_exists()  #
    # fanyule_two_game_file_path = r'/ftp_samba/112/file_4spider/dmn_fanyule2_game/'    #
    startup_nodes = [{'host': 'redis1', 'port': '6379'}]
    r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)

    # 高德
    gaode_file = open(gaode_file_path, 'r')
    gaode_keyword_length = r.llen(
        'spider:python:gaode:keyword')  # redis中gaode的数据量
    print 'redis中gaode_keyword列表长度:', gaode_keyword_length
    if gaode_keyword_length != 0:
        r.delete('spider:python:gaode:keyword')
        print 'redis中gaode_keyword列表长度不为0, 删除后的列表长度:', r.llen(
            'spider:python:gaode:keyword')
    for line in gaode_file:
        new_line = line.strip()
        if new_line:
            r.rpush('spider:python:gaode:keyword', new_line)
    gaode_keyword_length = r.llen('spider:python:gaode:keyword')
    print '重新写入后redis中gaode_keyword列表长度:', gaode_keyword_length
Exemplo n.º 4
0
def redis_cluster():
    redis_nodes = [{
        'host': '172.20.4.91',
        'port': 7000
    }, {
        'host': '172.20.4.91',
        'port': 7001
    }, {
        'host': '172.20.4.92',
        'port': 7002
    }, {
        'host': '172.20.4.92',
        'port': 7003
    }, {
        'host': '172.20.4.95',
        'port': 7004
    }, {
        'host': '172.20.4.95',
        'port': 7005
    }]
    try:
        redisconn = StrictRedisCluster(startup_nodes=redis_nodes)
    except Exception:
        print("Connect Error!")
        sys.exit(1)

    keys = redisconn.keys("rc.item.ft.*")
    for key in keys:
        redisconn.delete(key)
    print(len(keys))
Exemplo n.º 5
0
    def test_quit_problems(self):
        comm.start_cluster('127.0.0.1', 7100)
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102)
        time.sleep(1)

        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)

        for i in xrange(20):
            rc.set('key_%s' % i, 'value_%s' % i)
        for i in xrange(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(3, len(nodes))
        self.assertEqual(range(8192),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(range(8192, 16384),
                         nodes[('127.0.0.1', 7100)].assigned_slots)
        for i in xrange(20):
            rc.delete('key_%s' % i)

        self.assertRaisesRegexp(ValueError, '^The master still has slaves$',
                                comm.quit_cluster, '127.0.0.1', 7100)
        comm.quit_cluster('127.0.0.1', 7102)
        comm.quit_cluster('127.0.0.1', 7101)
        self.assertRaisesRegexp(ValueError, '^This is the last node',
                                comm.quit_cluster, '127.0.0.1', 7100)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 6
0
class RedisEngine:
    @property
    def engine(self):
        return self.redis_eng

    def __init__(self, redis_host, redis_port, redis_password, db=0):
        if isinstance(redis_host, list) and len(redis_host) == 1:
            redis_host = redis_host[0]
        if isinstance(redis_host, str):
            conn_pool = ConnectionPool(host=redis_host,
                                       port=redis_port,
                                       db=db,
                                       password=redis_password)
            self.redis_eng = StrictRedis(
                connection_pool=conn_pool,
                max_connections=10,
            )
        elif isinstance(redis_host, list):
            if isinstance(redis_port, int):
                startup_nodes = [{
                    "host": host,
                    "port": redis_port
                } for host in redis_host]
            elif isinstance(redis_port, list):
                startup_nodes = [{
                    "host": host,
                    "port": port
                } for host, port in zip(redis_host, redis_port)]
            self.redis_eng = StrictRedisCluster(startup_nodes=startup_nodes,
                                                password=redis_password)

    def lpush(self, key, val):
        self.redis_eng.lpush(key, val)

    def lpop(self, key):
        return self.redis_eng.lpop(key)

    def rpush(self, key, val):
        self.redis_eng.rpush(key, val)

    def rpop(self, key):
        return self.redis_eng.rpop(key)

    def add_task(self, key, val, **kwargs):
        self.redis_eng.lpush(key, val)

    def get_task(self, key):
        return self.rpop(key)

    def save_result(self, key, val, expire_time):
        res = self.redis_eng.set(key, val, nx=False, px=expire_time)
        if not res:
            print("id : %s has exists!" % key)

    def get_result(self, key):
        res = self.redis_eng.get(key)
        if res is not None:
            self.redis_eng.delete(key)
        return res
Exemplo n.º 7
0
def delete_state_model(session_id):
    """ Deletes the state model from storage.

    session_id (str): ID of the model to delete

    """
    LOGGER.info('deleting_state_model | lambda_progress=in-progress')
    startup_nodes = [{"host": REDIS_HOST, "port": REDIS_PORT}]
    redis_connection = StrictRedisCluster(startup_nodes=startup_nodes,
                                          decode_responses=True,
                                          skip_full_coverage_check=True)
    redis_connection.delete(session_id)
    LOGGER.info('deleted_state_model | lambda_progress=in-progress')
Exemplo n.º 8
0
class myRedisCluster(object):
    conn = ''
    redis_nodes = [
        {
            'host': '192.168.128.128',
            'port': 6379
        },
    ]

    def __init__(self):
        try:
            self.conn = StrictRedisCluster(startup_nodes=self.redis_nodes)
        except Exception as e:
            print e
            sys.exit(1)

    def add(self, key, value):
        self.conn.set(key, value)

    def get(self, key):
        self.conn.get(key)

    def rem(self, key):
        result = self.conn.delete(key)
        if result != 0:
            return 1
        else:
            return 0
Exemplo n.º 9
0
class redis_cluster():
    def __init__(self):
        redis_nodes = [{
            'host': '172.16.0.33',
            'port': "9001"
        }, {
            'host': '172.16.0.33',
            'port': "9002"
        }, {
            'host': '172.16.0.33',
            'port': "9003"
        }, {
            'host': '172.16.0.33',
            'port': "9004"
        }, {
            'host': '172.16.0.33',
            'port': "9005"
        }, {
            'host': '172.16.0.33',
            'port': "9006"
        }]
        try:
            self.redisconn = StrictRedisCluster(startup_nodes=redis_nodes,
                                                decode_responses=True,
                                                password='******')
        except Exception as e:
            print("Connect Error!  reason is %s" % (e))
            sys.exit(1)

    def flushdb(self, params):
        for i in range(0, len(params)):
            result = self.redisconn.delete(*self.redisconn.keys(
                pattern=params[i]))
            print(result)
        return
Exemplo n.º 10
0
class RedisHandle(object):
    def __init__(self, host, port, db=0):
        nodes = [{"host": host, "port": port}]
        self.redis_client = StrictRedisCluster(startup_nodes=nodes,
                                               decode_responses=True)
        # self.redis_client = redis.StrictRedis(host=host, port=port, db=db)

    def get(self, redis_key):
        logging.info("start to get value of redis_key: %s" % (redis_key))
        return self.redis_client.get(redis_key)

    def delete(self, redis_key):
        logging.info("start to match redis_key: %s" % (redis_key))
        keys = self.redis_client.keys(redis_key + "*")
        for key in keys:
            logging.info("start to delete redis_key: %s" % (key))
            self.redis_client.delete(key)
Exemplo n.º 11
0
    def test_api(self):
        comm.start_cluster('127.0.0.1', 7100)
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102)
        time.sleep(1)

        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)

        for i in range(20):
            rc.set('key_%s' % i, 'value_%s' % i)
        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(3, len(nodes))
        self.assertEqual(
            list(range(8192)), nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(
            list(range(8192, 16384)), nodes[('127.0.0.1',
                                             7100)].assigned_slots)

        comm.quit_cluster('127.0.0.1', 7101)

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(
            list(range(16384)), nodes[('127.0.0.1', 7100)].assigned_slots)

        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        for i in range(20):
            rc.delete('key_%s' % i)

        comm.quit_cluster('127.0.0.1', 7102)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 12
0
class RedisManager:
    def __init__(self, redis_server, pw=None):
        #self.redis_client_ = redis.Redis(host='127.0.0.1', port=9966)
        if pw:
            pool = ClusterConnectionPool(startup_nodes=redis_server, password=pw, skip_full_coverage_check=True, decode_responses=True)
            self.redis_client_ = StrictRedisCluster(connection_pool=pool)
        else:
            pool = ClusterConnectionPool(startup_nodes=redis_server, skip_full_coverage_check=True, decode_responses=True)
            self.redis_client_ = StrictRedisCluster(connection_pool=pool)

    def Set(self, k, v, is_str, expire=None):
        if not is_str:
            v = json.dumps(v)
        return self.redis_client_.set(k, v, ex=expire)

    def Get(self, k):
        return self.redis_client_.get(k)


    def Delete(self, k):
        return self.redis_client_.delete(k)

    def HashMultiSet(self, k, d, expire=None):
        self.redis_client_.hmset(k, d)
        if expire:
            self.redis_client_.expire(k, expire)

    def HashGetAll(self, k):
        return self.redis_client_.hgetall(k)

    def Pushback(self, k, v, expire=None):
        self.redis_client_.rpush(k, v)
        if expire:
            self.redis_client_.expire(k, expire)

    def SetList(self, k, l, expire=None):
        self.redis_client_.rpush(k, *l)
        if expire:
            self.redis_client_.expire(k, expire)

    def SetSet(self, k, v, expire=None):
        self.redis_client_.sadd(k, v)
        if expire:
            self.redis_client_.expire(k, expire)

    def SortSetSet(self, k, v, expire=None):
        self.redis_client_.zadd(k, v[0], v[1])
        if expire:
            self.redis_client_.expire(k, expire)

    def Handle(self):
        return self.redis_client_
Exemplo n.º 13
0
class RedisProcess(object):
    cluster_nodes = []
    redisconn = None

    def __init__(self, cluster_nodes):
        self.cluster_nodes = cluster_nodes
        self.connect()

    def connect(self):
        try:
            self.redisconn = StrictRedisCluster(
                startup_nodes=self.cluster_nodes)
        except Exception as e:
            print("Connect Error!")
            sys.exit(1)

    def clear(self, pattern):
        print('Clean keys with pattern: ' + pattern)
        print('started')
        if self.redisconn:
            for key in self.redisconn.keys(pattern):
                self.redisconn.delete(key)
                print(str(key) + ' removed')
Exemplo n.º 14
0
    def test_start_with_max_slots_set(self):
        comm.create([('127.0.0.1', 7100)], max_slots=7000)
        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))
        rc.delete('key')
        comm.shutdown_cluster('127.0.0.1', 7100)

        comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101)],
                                    max_slots=7000)
        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))
        rc.delete('key')
        comm.quit_cluster('127.0.0.1', 7101)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 15
0
    def test_join_no_load(self):
        comm.create([('127.0.0.1', 7100)])

        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)
        rc.set('x-{h-893}', 'y')
        rc.set('y-{h-893}', 'zzZ')
        rc.set('z-{h-893}', 'w')
        rc.incr('h-893')

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]

        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7102)
        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [0])

        nodes = base.list_nodes('127.0.0.1', 7102)
        self.assertEqual(3, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        n7102 = nodes[('127.0.0.1', 7102)]

        self.assertEqual(16383, len(n7100.assigned_slots))
        self.assertEqual(1, len(n7101.assigned_slots))
        self.assertEqual(0, len(n7102.assigned_slots))

        try:
            t = n7101.get_conn()
            m = t.execute('get', 'h-893')
            self.assertEqual('1', m)

            m = t.execute('get', 'y-{h-893}')
            self.assertEqual('zzZ', m)

            comm.quit_cluster('127.0.0.1', 7102)
            comm.quit_cluster('127.0.0.1', 7101)
            t = n7100.get_conn()

            rc.delete('x-{h-893}')
            rc.delete('y-{h-893}')
            rc.delete('z-{h-893}')
            rc.delete('h-893')
            comm.shutdown_cluster('127.0.0.1', 7100)
        finally:
            n7100.close()
            n7101.close()
Exemplo n.º 16
0
    def test_quit_problems(self):
        comm.start_cluster('127.0.0.1', 7100)
        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102)
        time.sleep(1)

        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)

        for i in range(20):
            rc.set('key_%s' % i, 'value_%s' % i)
        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(3, len(nodes))
        self.assertEqual(
            list(range(8192)), nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(
            list(range(8192, 16384)), nodes[('127.0.0.1',
                                             7100)].assigned_slots)
        for i in range(20):
            rc.delete('key_%s' % i)

        six.assertRaisesRegex(self, ValueError,
                              '^The master still has slaves$',
                              comm.quit_cluster, '127.0.0.1', 7100)
        comm.quit_cluster('127.0.0.1', 7102)
        comm.quit_cluster('127.0.0.1', 7101)
        six.assertRaisesRegex(self, ValueError, '^This is the last node',
                              comm.quit_cluster, '127.0.0.1', 7100)
        comm.shutdown_cluster('127.0.0.1', 7100)
Exemplo n.º 17
0
    def test_join_no_load(self):
        comm.create([('127.0.0.1', 7100)])

        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)
        rc.set('x-{h-893}', 'y')
        rc.set('y-{h-893}', 'zzZ')
        rc.set('z-{h-893}', 'w')
        rc.incr('h-893')

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]

        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7102)
        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [0])

        nodes = base.list_nodes('127.0.0.1', 7102)
        self.assertEqual(3, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        n7102 = nodes[('127.0.0.1', 7102)]

        self.assertEqual(16383, len(n7100.assigned_slots))
        self.assertEqual(1, len(n7101.assigned_slots))
        self.assertEqual(0, len(n7102.assigned_slots))

        try:
            t = n7101.get_conn()
            m = t.execute('get', 'h-893')
            self.assertEqual('1', m)

            m = t.execute('get', 'y-{h-893}')
            self.assertEqual('zzZ', m)

            comm.quit_cluster('127.0.0.1', 7102)
            comm.quit_cluster('127.0.0.1', 7101)
            t = n7100.get_conn()

            rc.delete('x-{h-893}')
            rc.delete('y-{h-893}')
            rc.delete('z-{h-893}')
            rc.delete('h-893')
            comm.shutdown_cluster('127.0.0.1', 7100)
        finally:
            n7100.close()
            n7101.close()
Exemplo n.º 18
0
class RedisClusterClient:
    def __init__(self, host='127.0.0.1', port=6379):
        self.r = StrictRedisCluster(startup_nodes=[{
            "host": host,
            "port": port
        }],
                                    decode_responses=True,
                                    skip_full_coverage_check=True)

    def get(self, key):
        return self.r.get(key)

    def put(self, key, value):
        return self.r.set(key, value, nx=True)

    def update(self, key, value):
        return self.r.set(key, value, xx=True)

    def remove(self, key):
        return self.r.delete(key)

    def remove_all(self):
        return self.r.flushdb()
Exemplo n.º 19
0
class TcRedis():
    def __init__(self, *nodelist):
        self.redisinst = None
        if len(nodelist) == 0:
            self.redisinst = redis.StrictRedis(host='localhost',
                                               port=6379,
                                               db=0)
        elif len(nodelist) == 1:
            host, port = nodelist[0].split(':')
            self.redisinst = redis.StrictRedis(host=host, port=int(port))
        elif len(nodelist) > 1:
            redis_nodes = []
            for node in nodelist:
                host, port = node.split(':')
                redis_nodes.append({"host": host, "port": int(port)})
            self.redisinst = StrictRedisCluster(startup_nodes=redis_nodes,
                                                decode_responses=True)

    def set(self, key, value):
        return self.redisinst.set(key, value)

    def get(self, key):
        return self.redisinst.get(key)

    def delete(self, key):
        return self.redisinst.delete(key)

    def ttl(self, key):
        return self.redisinst.ttl(key)

    def scan(self, cursor=0, match=None, count=2000):
        depth = 1000
        cursor = 0
        result = []
        for i in range(depth):
            cursor, result0 = self.redisinst.scan(cursor, match=match)
            if len(result0) != 0:
                result = result + result0
            if cursor == 0:
                break
        return result

    def hset(self, key, name, value):
        return self.redisinst.hset(key, name, value)

    def hget(self, key, name):
        return self.redisinst.hget(key, name)

    def lpush(self, key, *values):
        return self.redisinst.lpush(key, *values)

    def lpop(self, key):
        return self.redisinst.lpop(key)

    def sadd(self, key, *values):
        return self.redisinst.sadd(key, *values)

    def smembers(self, key):
        return self.redisinst.smembers(key)

    def zadd(self, key, *args, **kwargs):
        return self.redisinst.zadd(key, *args, **kwargs)

    def zrange(self, key, start, end):
        return self.redisinst.zrange(key, start, end)
Exemplo n.º 20
0
      if len(k):
        u = randint(0,len(k)-1)
        #Insert new value
        start = current_milli_time()
        r.set(name + str(k[u]),sessions[k[u]] + 1)
        end = current_milli_time()
        print(str(end-start)+'ms upd ' + str(k[u]))
        sessions[k[u]] = sessions[k[u]] + 1 
    else:
      #Delete
      k = list(sessions.keys())
      if len(k):
        u = randint(0,len(k)-1)
        #Insert into redis
        start = current_milli_time()
        r.delete(name + str(k[u]))
        end = current_milli_time()
        print(str(end-start)+'ms del ' + str(k[u]))
        deletes.append(k[u])
        del(sessions[k[u]])
  except (ClusterError, ConnectionError):
    print('Cluster Timeout') 


#Compare redis sessions to
for key in sessions.keys():
  a =  sessions[key]
  result = r.get(name + str(key))
  if not result:
    print('error missing ' + str(key))
  else:
Exemplo n.º 21
0
#!/usr/bin/env python
# _*_coding:utf-8_*_
# Author: create by yang.hong
# Time: 2018-08-17 11:06
"""
集群模式下,匹配方式删除多个key
"""

from rediscluster import StrictRedisCluster

redis_cluster = [{'host': '172.28.246.12', 'port': '28000'},
                 {'host': '172.28.246.12', 'port': '28001'},
                 {'host': '172.28.246.12', 'port': '28002'},
                 {'host': '172.28.246.12', 'port': '28003'},
                 {'host': '172.28.246.12', 'port': '28004'},
                 {'host': '172.28.246.12', 'port': '28005'},
                 ]

r = StrictRedisCluster(startup_nodes=redis_cluster, decode_responses=True)
print r.keys(pattern="*mc_payChannel_router*")
r.delete(*r.keys(pattern="*mc_payChannel_router*"))
Exemplo n.º 22
0
class Session(object):
    _uui = None
    _max_retry = 5
    _retry_num = 0

    def __init__(self, **options):
        self.options = {
            'key_prefix': 'session',
            'expire': 7200,
        }
        self.options.update(options)
        self.logger = Logger('session').get()
        self.connect()

    def connect(self):
        def func():
            self.redis = StrictRedisCluster(
                startup_nodes=self.options['startup_nodes'])
        self._safe_way(func)

    def get(self, uui):
        self.logger.debug('Try to get session')

        def _get():
            return self._get_session(uui)
        return self._safe_way(_get)

    def set(self, uui):
        self.logger.debug('Try to set new session: '
                          'uuid {name}'.format(name=uui))
        self._uui = uui

        def _set():
            self._set_session(uui,
                              str(datetime.datetime.now().time()))
        self._safe_way(_set)

    def _safe_way(self, func):
        try:
            return func()
        except:
            if self._max_retry == self._retry_num:
                self.logger.debug('Max try to reconnect. Closed')
                tornado.ioloop.IOLoop.instance().stop()
            else:
                self._retry_num += 1
                self.logger.debug('Fail to connect.'
                                  'Try to reconnect after 6 sec')
                time.sleep(6)
                self.connect()
                return func()

    def delete(self):
        self.logger.debug('Try to delete session')
        self.redis.delete(self._prefixed(self._uui))

    def _prefixed(self, sid):
        return '%s:%s' % (self.options['key_prefix'], sid)

    def _get_session(self, uui):
        try_to_get = self._prefixed(uui)
        self.logger.debug('Get session data: {data}'.format(data=try_to_get))
        data = self.redis.hget(try_to_get, 'user')
        session = pickle.loads(data) if data else None
        self.logger.debug('Got: %s' % session)
        return session

    def _set_session(self, uui, session_data):
        expiry = self.options['expire']
        try_to_set = self._prefixed(uui)
        self.logger.debug('Set session data: {data}'.format(data=try_to_set))
        self.redis.hset(try_to_set,
                        'user',
                        pickle.dumps(session_data))
        if expiry:
            self.redis.expire(self._prefixed(uui), expiry)
Exemplo n.º 23
0
    def test_api(self):
        comm.create([('127.0.0.1', 7100)])
        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7100
        }],
                                decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))

        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        for i in range(20):
            rc.set('key_%s' % i, 'value_%s' % i)

        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)

        self.assertEqual(2, len(nodes))
        self.assertEqual(list(range(8192)),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(list(range(8192, 16384)),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(list(range(8193)),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(list(range(8193, 16384)),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101,
                           [8193, 8194, 8195])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(list(range(8196)),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(list(range(8196, 16384)),
                         nodes[('127.0.0.1', 7100)].assigned_slots)

        six.assertRaisesRegex(self, ValueError, 'Not all slot held by',
                              comm.migrate_slots, '127.0.0.1', 7100,
                              '127.0.0.1', 7101, [8192])

        six.assertRaisesRegex(self, ValueError, 'Not all slot held by',
                              comm.migrate_slots, '127.0.0.1', 7100,
                              '127.0.0.1', 7101, [8195, 8196])

        six.assertRaisesRegex(self, ValueError,
                              'Two nodes are not in the same cluster',
                              comm.migrate_slots, '127.0.0.1', 7100,
                              '127.0.0.1', 7102, [8196])

        comm.quit_cluster('127.0.0.1', 7100)

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(1, len(nodes))
        self.assertEqual(0, len(nodes[('127.0.0.1', 7100)].assigned_slots))
        nodes = base.list_nodes('127.0.0.1', 7101)
        self.assertEqual(1, len(nodes))
        self.assertEqual(list(range(16384)),
                         nodes[('127.0.0.1', 7101)].assigned_slots)
        rc = StrictRedisCluster(startup_nodes=[{
            'host': '127.0.0.1',
            'port': 7101
        }],
                                decode_responses=True)
        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))
        self.assertEqual('value', rc.get('key'))

        six.assertRaisesRegex(self, RedisStatusError, 'still contains keys',
                              comm.shutdown_cluster, '127.0.0.1', 7101)

        rc.delete('key', *['key_%s' % i for i in range(20)])
        comm.shutdown_cluster('127.0.0.1', 7101)

        six.assertRaisesRegex(
            self, RedisClusterException,
            'All slots are not covered after query all startup_nodes. .*',
            rc.get, 'key')
Exemplo n.º 24
0
class RedisMiddleware(object):
    """
    任务管理器,负责任务相关操作,如校验是否新增,读取已抓取任务文本
    """
    def __init__(self, redis_params):
        self.redis_cli = StrictRedisCluster(
            startup_nodes=redis_params.get('startup_nodes', ''),
            password=redis_params.get('password', ''))
        self.bloom_filter = BloomFilter(
            self.redis_cli, blockNum=5,
            key='bloomfilter_weibo')  # url的过滤器,分6个块存,内存空间默认512M

    def redis_del(self, key=None):
        """
        删除redis对应的键
        目前用在循环抓取时候,清空列表url,
        列表url每次循环只抓取一遍,直至下次循环
        :return:
        """
        if not key:
            return
        res = self.redis_cli.delete(key)
        return res

    def redis_rpush(self, name, data):
        """
        推入数据到redis指定任务列表中
        rpush,将新的数据放在最后面
        :return:
        """

        try:
            if isinstance(data, list):
                for each in data:
                    self.redis_cli.rpush(name, each)
            else:
                self.redis_cli.lpush(name, data)
        except:
            return

    def redis_lpush(self, name, data):
        """
        推入数据到redis指定任务列表中
        lpush,将新的数据放在最前面
        :return:
        """

        try:
            if isinstance(data, list):
                for each in data:
                    self.redis_cli.lpush(name, each)
            else:
                self.redis_cli.lpush(name, data)
        except:
            return

    def redis_rpop(self, name):
        """
        从指定任务列表中获取数据
        rpop,从最后取
        :return:
        """
        try:
            res = self.redis_cli.rpop(name)
            return res
        except:
            return

    def redis_lpop(self, name):
        """
        从指定任务列表中获取数据
        lpop,从头部取
        :return:
        """
        try:
            res = self.redis_cli.lpop(name)
            return res
        except:
            return

    def redis_brpop(self, name, timeout=1):
        """
        从指定任务列表中获取数据
        brpop,阻塞,从最后取
        :return:
        """
        try:
            unuse, res = self.redis_cli.brpop(name, timeout=timeout)
            return res
        except Exception as e:
            print(e)
            return

    def redis_query(self, name):
        """
        查询指定任务列表中数据
        :param name:
        :return:
        """
        try:
            res = self.redis_cli.llen(name)
            return res
        except:
            return

    def redis_sadd(self, name, data):
        """
        集合中插入数据
        :return:
        """
        try:
            if isinstance(data, list) or isinstance(data, set):
                for each in data:
                    self.redis_cli.sadd(name, each)
            else:
                self.redis_cli.sadd(name, data)
        except:
            return

    def redis_sismember(self, name, data):
        """
        校验元素是否存在于集合中
        :return:
        """
        return self.redis_cli.sismember(name, data)

    def redis_scard(self, name):
        """
        返回集合成员个数
        :return:
        """
        return int(self.redis_cli.scard(name))

    def redis_spop(self, name):
        """
        获取集合中的随机一个元素
        :param name:
        :return:
        """
        return self.redis_cli.spop(name)

    def redis_srem(self, name, data):
        """
        移除指定成员
        :param name:
        :param data:
        :return:
        """
        self.redis_cli.srem(name, data)
from rediscluster import StrictRedisCluster

import sys

if len(sys.argv) < 3:
    raise ValueError("参数太少了!%d" % len(sys.argv))

address_array = sys.argv[1].split(",")
filename = sys.argv[2]

if len(address_array) < 1:
    raise ValueError("参数格式不对了!")

startup_nodes = []
for address in address_array:
    address_str = address.split(":")
    host = address_str[0]
    port = address_str[1]
    startup_nodes.append({"host": host, "port": port})

rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)

# 删除redis的值
with open(filename, "r") as f:
    for s in f.readlines():
        s = s.strip()
        if s == "":
            continue
        print "delete: %s %s " % (str(rc.delete(s)), s)
Exemplo n.º 26
0
class RedisCluster:
    def __init__(self):
        try:
            self.rc = StrictRedisCluster(startup_nodes=StartupNodesServer,
                                         decode_responses=True)
        except:
            traceback.print_exc()

    def count_keys(self):  # 查询当前库里有多少key
        return self.rc.dbsize()

    def exists_key(self, key):
        return self.rc.exists(key)

    def delete_key(self, key):
        self.rc.delete(key)

    def rename_key(self, key1, key2):
        self.rc.rename(key1, key2)

    # String操作
    def set_key_value(self, key, value):
        self.rc.set(key, value)

    def get_key_value(self, key):  # 没有对应key返回None
        return self.rc.get(key)

    # Hash操作
    def set_hash(self, key, mapping):  # mapping为字典, 已存在key会覆盖mapping
        self.rc.hmset(key, mapping)

    def delete_hash_field(self, key, field):  # 删除hash表中某个字段,无论字段是否存在
        self.rc.hdel(key, field)

    def exists_hash_field(self, key, field):  # 检查hash表中某个字段存在
        return self.rc.hexists(key, field)

    def get_hash_field(self, key, field):  # 获取hash表中指定字段的值, 没有返回None
        return self.rc.hget(key, field)

    def get_hash_all_field(self, key):  # 获取hash表中指定key所有字段和值,以字典形式,没有key返回空字典
        return self.rc.hgetall(key)

    def increase_hash_field(self, key, field,
                            increment):  # 为hash表key某个字段的整数型值增加increment
        self.rc.hincrby(key, field, increment)

    # List操作
    def rpush_into_lst(self, key, value):  # url从头至尾入列
        self.rc.rpush(key, value)

    def lpush_into_lst(self, key, value):  # url从尾至头入列
        self.rc.lpush(key, value)

    def lpop_lst_item(self, key):  # 从头取出列表第一个元素,没有返回None
        return self.rc.lpop(key)

    def blpop_lst_item(
            self, key):  # 从头取出列表第一个元素(元组形式,值为元祖[1], 元祖[0]为key名),并设置超时,超时返回None
        return self.rc.blpop(key, timeout=1)

    def rpop_lst_item(self, key):  # 从尾取出列表最后一个元素,没有返回None
        return self.rc.rpop(key)

    def brpop_lst_item(
            self,
            key):  # 从尾取出列表最后一个元素(元组形式,值为元祖[1], 元祖[0]为key名),并设置超时,超时返回None
        return self.rc.brpop(key, timeout=1)

    # Set操作
    def add_set(self, key, value):
        self.rc.sadd(key, value)

    def is_member(self, key, value):
        return self.rc.sismember(key, value)

    def pop_member(self, key):  # 随机移除一个值并返回该值,没有返回None
        return self.rc.spop(key)

    def pop_members(self, key, num):  # 随机取出num个值(非移除),列表形式返回这些值,没有返回空列表
        return self.rc.srandmember(key, num)

    def remove_member(self, key, value):  # 移除集合中指定元素
        self.rc.srem(key, value)

    def get_all_members(self, key):  # 返回集合中全部元素,不删除
        return self.rc.smembers(key)

    def remove_into(self, key1, key2, value):  # 把集合key1中value元素移入集合key2中
        self.rc.smove(key1, key2, value)

    def count_members(self, key):  # 计算集合中成员数量
        return self.rc.scard(key)
Exemplo n.º 27
0
 def delete(self, key):
     redis_conn = StrictRedisCluster(startup_nodes=self.nodes,
                                     decode_responses=True)
     redis_conn.delete(key)
     return self.type_dict[type]["DEL"](conn, name, key_name)
Exemplo n.º 28
0
    def test_api(self):
        comm.create([('127.0.0.1', 7100)])
        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)
        rc.set('key', 'value')
        self.assertEqual('value', rc.get('key'))

        comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101)
        for i in range(20):
            rc.set('key_%s' % i, 'value_%s' % i)

        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))

        nodes = base.list_nodes('127.0.0.1', 7100)

        self.assertEqual(2, len(nodes))
        self.assertEqual(
            list(range(8192)), nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(
            list(range(8192, 16384)), nodes[('127.0.0.1',
                                             7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(
            list(range(8193)), nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(
            list(range(8193, 16384)), nodes[('127.0.0.1',
                                             7100)].assigned_slots)

        comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101,
                           [8193, 8194, 8195])

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        self.assertEqual(
            list(range(8196)), nodes[('127.0.0.1', 7101)].assigned_slots)
        self.assertEqual(
            list(range(8196, 16384)), nodes[('127.0.0.1',
                                             7100)].assigned_slots)

        six.assertRaisesRegex(self, ValueError, 'Not all slot held by',
                              comm.migrate_slots, '127.0.0.1', 7100,
                              '127.0.0.1', 7101, [8192])

        six.assertRaisesRegex(self, ValueError, 'Not all slot held by',
                              comm.migrate_slots, '127.0.0.1', 7100,
                              '127.0.0.1', 7101, [8195, 8196])

        six.assertRaisesRegex(
            self, ValueError, 'Two nodes are not in the same cluster',
            comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196])

        comm.quit_cluster('127.0.0.1', 7100)

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(1, len(nodes))
        self.assertEqual(0, len(nodes[('127.0.0.1', 7100)].assigned_slots))
        nodes = base.list_nodes('127.0.0.1', 7101)
        self.assertEqual(1, len(nodes))
        self.assertEqual(
            list(range(16384)), nodes[('127.0.0.1', 7101)].assigned_slots)
        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7101
            }],
            decode_responses=True)
        for i in range(20):
            self.assertEqual('value_%s' % i, rc.get('key_%s' % i))
        self.assertEqual('value', rc.get('key'))

        six.assertRaisesRegex(self, RedisStatusError, 'still contains keys',
                              comm.shutdown_cluster, '127.0.0.1', 7101)

        rc.delete('key', *['key_%s' % i for i in range(20)])
        comm.shutdown_cluster('127.0.0.1', 7101)

        six.assertRaisesRegex(
            self, RedisClusterException,
            'All slots are not covered after query all startup_nodes. .*',
            rc.get, 'key')
Exemplo n.º 29
0
class RedisDB():
    def __init__(self, ip_ports = IP_PORTS, db = DB, user_pass = USER_PASS):
        # super(RedisDB, self).__init__()

        if not hasattr(self,'_redis'):
            self._is_redis_cluster = False

            try:
                if len(ip_ports) > 1:
                    startup_nodes = []
                    for ip_port in ip_ports:
                        ip, port = ip_port.split(':')
                        startup_nodes.append({"host":ip, "port":port})

                    self._redis = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)
                    self._pipe = self._redis.pipeline(transaction=False)

                    self._is_redis_cluster = True

                else:
                    ip, port = ip_ports[0].split(':')
                    self._redis = redis.Redis(host = ip, port = port, db = db, password = user_pass, decode_responses=True) # redis默认端口是6379
                    self._pipe = self._redis.pipeline(transaction=True) # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

            except Exception as e:
                raise
            else:
                log.info('连接到redis数据库 %s'%(tools.dumps_json(ip_ports)))

    def sadd(self, table, values):
        '''
        @summary: 使用无序set集合存储数据, 去重
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        ---------
        @result: 若库中存在 返回0,否则入库,返回1。 批量添加返回None
        '''

        if isinstance(values, list):
            if not self._is_redis_cluster: self._pipe.multi()
            for value in values:
                self._pipe.sadd(table, value)
            self._pipe.execute()

        else:
            return self._redis.sadd(table, values)

    def sget(self, table, count = 0, is_pop = True):
        datas = []
        if is_pop:
            count = count if count <= self.sget_count(table) else self.sget_count(table)
            if count:
                if count > 1:
                    if not self._is_redis_cluster: self._pipe.multi()
                    while count:
                        self._pipe.spop(table)
                        count -= 1
                    datas = self._pipe.execute()

                else:
                    datas.append(self._redis.spop(table))

        else:
            datas =  self._redis.srandmember(table, count)

        return datas

    def sget_count(self, table):
        return self._redis.scard(table)

    def sdelete(self, table):
        '''
        @summary: 删除set集合的大键(数据量大的表)
        删除大set键,使用sscan命令,每次扫描集合中500个元素,再用srem命令每次删除一个键
        若直接用delete命令,会导致Redis阻塞,出现故障切换和应用程序崩溃的故障。
        ---------
        @param table:
        ---------
        @result:
        '''
        # 当 SCAN 命令的游标参数被设置为 0 时, 服务器将开始一次新的迭代, 而当服务器向用户返回值为 0 的游标时, 表示迭代已结束
        cursor = '0'
        while cursor != 0:
            cursor, data = self._redis.sscan(table, cursor = cursor, count = 10000)
            for item in data:
                # self._redis.srem(table,item)
                self._pipe.srem(table, item)

            # print('sdelete %s data size %s'%(table, len(data)))
            self._pipe.execute()



    def zadd(self, table, values,  prioritys = 0):
        '''
        @summary: 使用有序set集合存储数据, 去重(值存在更新)
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        @param prioritys: 优先级; double类型,支持list 或 单个值。 根据此字段的值来排序, 值越小越优先。 可不传值,默认value的优先级为0
        ---------
        @result:若库中存在 返回0,否则入库,返回1。 批量添加返回None
        '''

        if isinstance(values, list):
            if not isinstance(prioritys, list):
                prioritys = [prioritys] * len(values)
            else:
                assert len(values) == len(prioritys), 'values值要与prioritys值一一对应'

            if not self._is_redis_cluster: self._pipe.multi()
            for value, priority in zip(values, prioritys):
                if self._is_redis_cluster:
                    self._pipe.zadd(table, priority, value)
                else:
                    self._pipe.zadd(table, value, priority)
            self._pipe.execute()

        else:
            if self._is_redis_cluster:
                return self._redis.zadd(table, prioritys, values)
            else:
                return self._redis.zadd(table, values, prioritys)

    def zget(self, table, count = 0, is_pop = True):
        '''
        @summary: 从有序set集合中获取数据
        ---------
        @param table:
        @param count: 数量
        @param is_pop:获取数据后,是否在原set集合中删除,默认是
        ---------
        @result: 列表
        '''
        start_pos = 0 # 包含
        end_pos = 0 if count == 0 else count - 1 # 包含

        if not self._is_redis_cluster: self._pipe.multi() # 标记事务的开始 参考 http://www.runoob.com/redis/redis-transactions.html
        self._pipe.zrange(table, start_pos, end_pos) # 取值
        if is_pop: self._pipe.zremrangebyrank(table, start_pos, end_pos) # 删除
        results, count = self._pipe.execute()
        return results

    def zget_count(self, table, priority_min = None, priority_max = None):
        '''
        @summary: 获取表数据的数量
        ---------
        @param table:
        @param priority_min:优先级范围 最小值(包含)
        @param priority_max:优先级范围 最大值(包含)
        ---------
        @result:
        '''

        if priority_min != None and priority_max != None:
            return self._redis.zcount(table, priority_min, priority_max)
        else:
            return self._redis.zcard(table)

    def lpush(self, table, values):
        if isinstance(values, list):
            if not self._is_redis_cluster: self._pipe.multi()
            for value in values:
                self._pipe.rpush(table, value)
            self._pipe.execute()

        else:
            return self._redis.rpush(table, values)

    def lpop(self, table, count = 1):
        '''
        @summary:
        ---------
        @param table:
        @param count:
        ---------
        @result: 返回列表
        '''
        datas = []

        count = count if count <= self.lget_count(table) else self.lget_count(table)

        if count:
            if count > 1:
                if not self._is_redis_cluster: self._pipe.multi()
                while count:
                    data = self._pipe.lpop(table)
                    count -= 1
                datas = self._pipe.execute()

            else:
                datas.append(self._redis.lpop(table))

        return datas

    def lget_count(self, table):
        return self._redis.llen(table)

    def setbit(self, table, offset, value):
        self._redis.setbit(table, offset, value)

    def getbit(self, table, offset):
        return self._redis.getbit(table, offset)

    def clear(self, table):
        try:
            self._redis.delete(table)
        except Exception as e:
            log.error(e)
Exemplo n.º 30
0
class RedisMiddleware(object):
    """
    任务管理器,负责任务相关操作,如校验是否新增,读取已抓取任务文本
    """
    def __init__(self, taskname, redis_params):
        # self._mkdata()
        self.redis_cli = StrictRedisCluster(
            startup_nodes=redis_params.get('startup_nodes', ''),
            password=redis_params.get('password', ''))
        # 实例化两个bloomfilter
        self.bloom_urls = BloomFilter(
            self.redis_cli, blockNum=6,
            key='bloomfilter_pub')  # url的过滤器,分6个块存,内存空间默认512M
        # list的过滤器,默认1个块存,内存空间给32M
        self.bloom_list = BloomFilter(self.redis_cli,
                                      key='{}:redis_list'.format(taskname),
                                      bit_size=1 << 28)
        # self.redis_cli = redis.Redis(host=redis_host, port=redis_port, db=0, password=redis_psw)

    def redis_del(self, key=None):
        """
        删除redis对应的键
        目前用在循环抓取时候,清空列表url,
        列表url每次循环只抓取一遍,直至下次循环
        :return:
        """
        if not key:
            return
        res = self.redis_cli.delete(key)
        return res

    def redis_push(self, name, data):
        """
        推入数据到redis指定任务列表中
        lpush,将新的数据放在最前面
        :return:
        """

        try:
            if isinstance(data, list):
                for each in data:
                    self.redis_cli.lpush(name, each)
            else:
                self.redis_cli.lpush(name, data)
        except:
            return

    def redis_pop(self, name):
        """
        从指定任务列表中获取数据
        rpop,从最后取
        :return:
        """
        try:
            res = self.redis_cli.rpop(name)
            return res
        except:
            return

    def redis_brpop(self, name, timeout=1):
        """
        从指定任务列表中获取数据
        brpop,阻塞,从最后取
        :return:
        """
        try:
            unuse, res = self.redis_cli.brpop(name, timeout=timeout)
            return res
        except Exception as e:
            print(e)
            return

    def redis_query(self, name):
        """
        查询指定任务列表中数据
        :param name:
        :return:
        """
        try:
            res = self.redis_cli.llen(name)
            return res
        except:
            return
Exemplo n.º 31
0
class RedisClusterNode(object):
    def __init__(self, node_list):
        self.node_list = node_list
        self.__connect()

    #析构函数,断开连接
    def __del__(self):
        if self.connect:
            pass

    def __connect(self):
        try:
            self.connect = StrictRedisCluster(startup_nodes=self.node_list)
        except Exception as e:
            self.connect = None
            print("Connect redisCluster node error!", e)
            return


    #set方法
    def set(self, key, value):
        try:
            self.connect.set(key, value)
            return True
        except Exception as e:
            print("RedisUtil excute set method failed!", e)
            return  False


    #get方法
    def get(self, key):
        try:
            return self.connect.get(key)
        except Exception as e:
            print("RedisUtil excute get method failed!", e)
            return None
    #del方法
    def delete(self, key):
        try:
            self.connect.delete(key)
            return True
        except Exception as e:
            print("RedisUtil excute delete method failed!", e)
            return False
    #mset方法
    def mset(self, map):
        try:
            self.connect.mset(map)
            return True
        except Exception as e:
            print("RedisUtil excute mset method failed!", e)
            return False

    #mget方法
    def mget(self, array_keys):
        try:
            return self.connect.mget(array_keys)
        except Exception as e:
            print("RedisUtil excute mget method failed!", e)
            return None
    #hset方法
    def hset(self, key, field, value):
        try:
            self.connect.hset(key, field, value)
            return True
        except Exception as e:
            print("RedisUtil excute hset method failed!", e)
            return False
    #hget方法
    def hget(self, key, field):
        try:
            return self.connect.hget(key, field)
        except Exception as e:
            print("RedisUtil excute hget method failed!", e)
            return None
Exemplo n.º 32
0
class RedisDB:
    def __init__(self,
                 ip_ports=None,
                 db=None,
                 user_pass=None,
                 url=None,
                 decode_responses=True,
                 service_name=None,
                 max_connections=32,
                 **kwargs):
        """
        redis的封装
        Args:
            ip_ports: ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
            db:
            user_pass:
            url:
            decode_responses:
            service_name: 适用于redis哨兵模式
        """

        # 可能会改setting中的值,所以此处不能直接赋值为默认值,需要后加载赋值
        if ip_ports is None:
            ip_ports = setting.REDISDB_IP_PORTS
        if db is None:
            db = setting.REDISDB_DB
        if user_pass is None:
            user_pass = setting.REDISDB_USER_PASS
        if service_name is None:
            service_name = setting.REDISDB_SERVICE_NAME

        self._is_redis_cluster = False

        try:
            if not url:
                ip_ports = (ip_ports if isinstance(ip_ports, list) else
                            ip_ports.split(","))
                if len(ip_ports) > 1:
                    startup_nodes = []
                    for ip_port in ip_ports:
                        ip, port = ip_port.split(":")
                        startup_nodes.append({"host": ip, "port": port})

                    if service_name:
                        log.debug("使用redis哨兵模式")
                        hosts = [(node["host"], node["port"])
                                 for node in startup_nodes]
                        sentinel = Sentinel(hosts, socket_timeout=3, **kwargs)
                        self._redis = sentinel.master_for(
                            service_name,
                            password=user_pass,
                            db=db,
                            redis_class=redis.StrictRedis,
                            decode_responses=decode_responses,
                            max_connections=max_connections,
                            **kwargs)

                    else:
                        log.debug("使用redis集群模式")
                        self._redis = StrictRedisCluster(
                            startup_nodes=startup_nodes,
                            decode_responses=decode_responses,
                            password=user_pass,
                            max_connections=max_connections,
                            **kwargs)

                    self._is_redis_cluster = True
                else:
                    ip, port = ip_ports[0].split(":")
                    self._redis = redis.StrictRedis(
                        host=ip,
                        port=port,
                        db=db,
                        password=user_pass,
                        decode_responses=decode_responses,
                        max_connections=max_connections,
                        **kwargs)
            else:
                self._redis = redis.StrictRedis.from_url(
                    url, decode_responses=decode_responses)

        except Exception as e:
            raise
        else:
            if not url:
                log.debug("连接到redis数据库 %s db%s" % (ip_ports, db))
            else:
                log.debug("连接到redis数据库 %s" % (url))

        self._ip_ports = ip_ports
        self._db = db
        self._user_pass = user_pass
        self._url = url

    def __repr__(self):
        if self._url:
            return "<Redisdb url:{}>".format(self._url)

        return "<Redisdb ip_ports: {} db:{} user_pass:{}>".format(
            self._ip_ports, self._db, self._user_pass)

    @classmethod
    def from_url(cls, url):
        return cls(url=url)

    def sadd(self, table, values):
        """
        @summary: 使用无序set集合存储数据, 去重
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        ---------
        @result: 若库中存在 返回0,否则入库,返回1。 批量添加返回None
        """

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.sadd(table, value)
            pipe.execute()

        else:
            return self._redis.sadd(table, values)

    def sget(self, table, count=1, is_pop=True):
        """
        返回 list 如 ['1'] 或 []
        @param table:
        @param count:
        @param is_pop:
        @return:
        """
        datas = []
        if is_pop:
            count = count if count <= self.sget_count(
                table) else self.sget_count(table)
            if count:
                if count > 1:
                    pipe = self._redis.pipeline(
                        transaction=True
                    )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

                    if not self._is_redis_cluster:
                        pipe.multi()
                    while count:
                        pipe.spop(table)
                        count -= 1
                    datas = pipe.execute()

                else:
                    datas.append(self._redis.spop(table))

        else:
            datas = self._redis.srandmember(table, count)

        return datas

    def srem(self, table, values):
        """
        @summary: 移除集合中的指定元素
        ---------
        @param table:
        @param values: 一个或者列表
        ---------
        @result:
        """
        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.srem(table, value)
            pipe.execute()
        else:
            self._redis.srem(table, values)

    def sget_count(self, table):
        return self._redis.scard(table)

    def sdelete(self, table):
        """
        @summary: 删除set集合的大键(数据量大的表)
        删除大set键,使用sscan命令,每次扫描集合中500个元素,再用srem命令每次删除一个键
        若直接用delete命令,会导致Redis阻塞,出现故障切换和应用程序崩溃的故障。
        ---------
        @param table:
        ---------
        @result:
        """
        # 当 SCAN 命令的游标参数被设置为 0 时, 服务器将开始一次新的迭代, 而当服务器向用户返回值为 0 的游标时, 表示迭代已结束
        cursor = "0"
        while cursor != 0:
            cursor, data = self._redis.sscan(table, cursor=cursor, count=500)
            for item in data:
                # pipe.srem(table, item)
                self._redis.srem(table, item)

            # pipe.execute()

    def sismember(self, table, key):
        "Return a boolean indicating if ``value`` is a member of set ``name``"
        return self._redis.sismember(table, key)

    def zadd(self, table, values, prioritys=0):
        """
        @summary: 使用有序set集合存储数据, 去重(值存在更新)
        ---------
        @param table:
        @param values: 值; 支持list 或 单个值
        @param prioritys: 优先级; double类型,支持list 或 单个值。 根据此字段的值来排序, 值越小越优先。 可不传值,默认value的优先级为0
        ---------
        @result:若库中存在 返回0,否则入库,返回1。 批量添加返回 [0, 1 ...]
        """
        if isinstance(values, list):
            if not isinstance(prioritys, list):
                prioritys = [prioritys] * len(values)
            else:
                assert len(values) == len(prioritys), "values值要与prioritys值一一对应"

            pipe = self._redis.pipeline(transaction=True)

            if not self._is_redis_cluster:
                pipe.multi()
            for value, priority in zip(values, prioritys):
                pipe.zadd(table, priority, value)
            return pipe.execute()

        else:
            return self._redis.zadd(table, prioritys, values)

    def zget(self, table, count=1, is_pop=True):
        """
        @summary: 从有序set集合中获取数据 优先返回分数小的(优先级高的)
        ---------
        @param table:
        @param count: 数量 -1 返回全部数据
        @param is_pop:获取数据后,是否在原set集合中删除,默认是
        ---------
        @result: 列表
        """
        start_pos = 0  # 包含
        end_pos = count - 1 if count > 0 else count

        pipe = self._redis.pipeline(
            transaction=True
        )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

        if not self._is_redis_cluster:
            pipe.multi(
            )  # 标记事务的开始 参考 http://www.runoob.com/redis/redis-transactions.html
        pipe.zrange(table, start_pos, end_pos)  # 取值
        if is_pop:
            pipe.zremrangebyrank(table, start_pos, end_pos)  # 删除
        results, *count = pipe.execute()
        return results

    def zremrangebyscore(self, table, priority_min, priority_max):
        """
        根据分数移除成员 闭区间
        @param table:
        @param priority_min:
        @param priority_max:
        @return: 被移除的成员个数
        """
        return self._redis.zremrangebyscore(table, priority_min, priority_max)

    def zrangebyscore(self,
                      table,
                      priority_min,
                      priority_max,
                      count=None,
                      is_pop=True):
        """
        @summary: 返回指定分数区间的数据 闭区间
        ---------
        @param table:
        @param priority_min: 优先级越小越优先
        @param priority_max:
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        @param is_pop: 是否删除
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            local key = KEYS[1]
            local min_score = ARGV[2]
            local max_score = ARGV[3]
            local is_pop = ARGV[4]
            local count = ARGV[5]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', key, min_score, max_score, 'limit', 0, count)
            else
                datas = redis.call('zrangebyscore', key, min_score, max_score)
            end

            -- 删除redis中刚取到的值
            if (is_pop) then
                for i=1, #datas do
                    redis.call('zrem', key, datas[i])
                end
            end


            return datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(keys=[table],
                      args=[table, priority_min, priority_max, is_pop, count])
        else:
            res = cmd(keys=[table],
                      args=[table, priority_min, priority_max, is_pop])

        return res

    def zrangebyscore_increase_score(self,
                                     table,
                                     priority_min,
                                     priority_max,
                                     increase_score,
                                     count=None):
        """
        @summary: 返回指定分数区间的数据 闭区间, 同时修改分数
        ---------
        @param table:
        @param priority_min: 最小分数
        @param priority_max: 最大分数
        @param increase_score: 分数值增量 正数则在原有的分数上叠加,负数则相减
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            local key = KEYS[1]
            local min_score = ARGV[1]
            local max_score = ARGV[2]
            local increase_score = ARGV[3]
            local count = ARGV[4]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', key, min_score, max_score, 'limit', 0, count)
            else
                datas = redis.call('zrangebyscore', key, min_score, max_score)
            end

            --修改优先级
            for i=1, #datas do
                redis.call('zincrby', key, increase_score, datas[i])
            end

            return datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(keys=[table],
                      args=[priority_min, priority_max, increase_score, count])
        else:
            res = cmd(keys=[table],
                      args=[priority_min, priority_max, increase_score])

        return res

    def zrangebyscore_set_score(self,
                                table,
                                priority_min,
                                priority_max,
                                score,
                                count=None):
        """
        @summary: 返回指定分数区间的数据 闭区间, 同时修改分数
        ---------
        @param table:
        @param priority_min: 最小分数
        @param priority_max: 最大分数
        @param score: 分数值
        @param count: 获取的数量,为空则表示分数区间内的全部数据
        ---------
        @result:
        """

        # 使用lua脚本, 保证操作的原子性
        lua = """
            local key = KEYS[1]
            local min_score = ARGV[1]
            local max_score = ARGV[2]
            local set_score = ARGV[3]
            local count = ARGV[4]

            -- 取值
            local datas = nil
            if count then
                datas = redis.call('zrangebyscore', key, min_score, max_score, 'withscores','limit', 0, count)
            else
                datas = redis.call('zrangebyscore', key, min_score, max_score, 'withscores')
            end

            local real_datas = {} -- 数据
            --修改优先级
            for i=1, #datas, 2 do
               local data = datas[i]
               local score = datas[i+1]

               table.insert(real_datas, data) -- 添加数据

               redis.call('zincrby', key, set_score - score, datas[i])
            end

            return real_datas

        """
        cmd = self._redis.register_script(lua)
        if count:
            res = cmd(keys=[table],
                      args=[priority_min, priority_max, score, count])
        else:
            res = cmd(keys=[table], args=[priority_min, priority_max, score])

        return res

    def zget_count(self, table, priority_min=None, priority_max=None):
        """
        @summary: 获取表数据的数量
        ---------
        @param table:
        @param priority_min:优先级范围 最小值(包含)
        @param priority_max:优先级范围 最大值(包含)
        ---------
        @result:
        """

        if priority_min != None and priority_max != None:
            return self._redis.zcount(table, priority_min, priority_max)
        else:
            return self._redis.zcard(table)

    def zrem(self, table, values):
        """
        @summary: 移除集合中的指定元素
        ---------
        @param table:
        @param values: 一个或者列表
        ---------
        @result:
        """
        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.zrem(table, value)
            pipe.execute()
        else:
            self._redis.zrem(table, values)

    def zexists(self, table, values):
        """
        利用zscore判断某元素是否存在
        @param values:
        @return:
        """
        is_exists = []

        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。
            pipe.multi()
            for value in values:
                pipe.zscore(table, value)
            is_exists_temp = pipe.execute()
            for is_exist in is_exists_temp:
                if is_exist != None:
                    is_exists.append(1)
                else:
                    is_exists.append(0)

        else:
            is_exists = self._redis.zscore(table, values)
            is_exists = 1 if is_exists != None else 0

        return is_exists

    def lpush(self, table, values):
        if isinstance(values, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

            if not self._is_redis_cluster:
                pipe.multi()
            for value in values:
                pipe.rpush(table, value)
            pipe.execute()

        else:
            return self._redis.rpush(table, values)

    def lpop(self, table, count=1):
        """
        @summary:
        ---------
        @param table:
        @param count:
        ---------
        @result: count>1时返回列表
        """
        datas = None

        count = count if count <= self.lget_count(table) else self.lget_count(
            table)

        if count:
            if count > 1:
                pipe = self._redis.pipeline(
                    transaction=True
                )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。

                if not self._is_redis_cluster:
                    pipe.multi()
                while count:
                    pipe.lpop(table)
                    count -= 1
                datas = pipe.execute()

            else:
                datas = self._redis.lpop(table)

        return datas

    def rpoplpush(self, from_table, to_table=None):
        """
        将列表 from_table 中的最后一个元素(尾元素)弹出,并返回给客户端。
        将 from_table 弹出的元素插入到列表 to_table ,作为 to_table 列表的的头元素。
        如果 from_table 和 to_table 相同,则列表中的表尾元素被移动到表头,并返回该元素,可以把这种特殊情况视作列表的旋转(rotation)操作
        @param from_table:
        @param to_table:
        @return:
        """

        if not to_table:
            to_table = from_table

        return self._redis.rpoplpush(from_table, to_table)

    def lget_count(self, table):
        return self._redis.llen(table)

    def lrem(self, table, value, num=0):
        return self._redis.lrem(table, value, num)

    def hset(self, table, key, value):
        """
        @summary:
        如果 key 不存在,一个新的哈希表被创建并进行 HSET 操作。
        如果域 field 已经存在于哈希表中,旧值将被覆盖
        ---------
        @param table:
        @param key:
        @param value:
        ---------
        @result: 1 新插入; 0 覆盖
        """

        return self._redis.hset(table, key, value)

    def hset_batch(self, table, datas):
        """
        批量插入
        Args:
            datas:
                [[key, value]]
        Returns:

        """
        pipe = self._redis.pipeline(transaction=True)

        if not self._is_redis_cluster:
            pipe.multi()
        for key, value in datas:
            pipe.hset(table, key, value)
        return pipe.execute()

    def hincrby(self, table, key, increment):
        return self._redis.hincrby(table, key, increment)

    def hget(self, table, key, is_pop=False):
        if not is_pop:
            return self._redis.hget(table, key)
        else:
            lua = """
                local key = KEYS[1]
                local field = ARGV[1]

                -- 取值
                local datas = redis.call('hget', key, field)
                -- 删除值
                redis.call('hdel', key, field)

                return datas

                    """
            cmd = self._redis.register_script(lua)
            res = cmd(keys=[table], args=[key])

            return res

    def hgetall(self, table):
        return self._redis.hgetall(table)

    def hexists(self, table, key):
        return self._redis.hexists(table, key)

    def hdel(self, table, *keys):
        """
        @summary: 删除对应的key 可传多个
        ---------
        @param table:
        @param *keys:
        ---------
        @result:
        """

        self._redis.hdel(table, *keys)

    def hget_count(self, table):
        return self._redis.hlen(table)

    def setbit(self, table, offsets, values):
        """
        设置字符串数组某一位的值, 返回之前的值
        @param table:
        @param offsets: 支持列表或单个值
        @param values: 支持列表或单个值
        @return: list / 单个值
        """
        if isinstance(offsets, list):
            if not isinstance(values, list):
                values = [values] * len(offsets)
            else:
                assert len(offsets) == len(values), "offsets值要与values值一一对应"

            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。
            pipe.multi()

            for offset, value in zip(offsets, values):
                pipe.setbit(table, offset, value)

            return pipe.execute()

        else:
            return self._redis.setbit(table, offsets, values)

    def getbit(self, table, offsets):
        """
        取字符串数组某一位的值
        @param table:
        @param offsets: 支持列表
        @return: list / 单个值
        """
        if isinstance(offsets, list):
            pipe = self._redis.pipeline(
                transaction=True
            )  # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。
            pipe.multi()
            for offset in offsets:
                pipe.getbit(table, offset)

            return pipe.execute()

        else:
            return self._redis.getbit(table, offsets)

    def bitcount(self, table):
        return self._redis.bitcount(table)

    def strset(self, table, value, **kwargs):
        return self._redis.set(table, value, **kwargs)

    def str_incrby(self, table, value):
        return self._redis.incrby(table, value)

    def strget(self, table):
        return self._redis.get(table)

    def strlen(self, table):
        return self._redis.strlen(table)

    def getkeys(self, regex):
        return self._redis.keys(regex)

    def exists_key(self, key):
        return self._redis.exists(key)

    def set_expire(self, key, seconds):
        """
        @summary: 设置过期时间
        ---------
        @param key:
        @param seconds: 秒
        ---------
        @result:
        """

        self._redis.expire(key, seconds)

    def clear(self, table):
        try:
            self._redis.delete(table)
        except Exception as e:
            log.error(e)

    def get_redis_obj(self):
        return self._redis
Exemplo n.º 33
0
class RedisQueue(object):
    # def __init__(self, redis_type=None,**args):
    #
    # 	if redis_type == "cluster":
    # 		try:
    # 			self.rc = StrictRedisCluster(**args)
    # 		except Exception as e:
    # 			print("Connect Error!")
    # 	else:
    # 		try:
    # 			self.pool = redis.ConnectionPool(**args)
    # 			self.rc = redis.Redis(connection_pool=self.pool)
    # 		except Exception as e:
    # 			print("Connect Error!")

    def __init__(self, res):
        redis_type = res[1]
        if redis_type == "cluster":
            try:
                con = res[0]
                self.rc = StrictRedisCluster(**con)
            except Exception as e:
                print(e)
                print("Connect Error!")
        else:
            try:
                con = res[0]
                self.pool = redis.ConnectionPool(**con)
                self.rc = redis.Redis(connection_pool=self.pool)
            except Exception as e:
                print(e)
                print("Connect Error!")

    def get_all(self, key, block=True, timeout=None):
        for i in self.rc.keys():
            if key in str(i):
                type = self.rc.type(i)
                if type == 'string':
                    vals = self.rc.get(i)

                elif type == 'list':
                    vals = self.rc.lrange(i, 0, -1)

                elif type == 'set':
                    vals = self.rc.smembers(i)

                elif type == 'zset':
                    vals = self.rc.zrange(i, 0, -1)

                elif type == "hash":
                    vals = self.rc.hgetall(i)
                else:
                    print(type, i)
        return list(vals[0])

    def keys(self):
        keys = self.rc.keys()
        return keys

    def iskey(self, key):
        if self.rc.exists(key):
            return 1
        else:
            return 0

    def get(self, key):
        res = self.rc.get(key)
        nres = json.loads(res)
        return nres

    def put(self, key, value):
        new_value = json.dumps(value)
        res = self.rc.set(key, new_value)

    def delall(self, key):
        self.rc.delete(key)


# print(getredis('ak_sit'))
#RedisQueue(getredis('ak_cluster'))
Exemplo n.º 34
0
class SharQ(object):
    """The SharQ object is the core of this queue.
    SharQ does the following.

        1. Accepts a configuration file.
        2. Initializes the queue.
        3. Exposes functions to interact with the queue.
    """
    def __init__(self, config_path):
        """Construct a SharQ object by doing the following.
            1. Read the configuration path.
            2. Load the config.
            3. Initialized SharQ.
        """
        self.config_path = config_path
        self._load_config()
        self._initialize()

    def _initialize(self):
        """Read the SharQ configuration and set appropriate
        variables. Open a redis connection pool and load all
        the Lua scripts.
        """
        self._key_prefix = self._config.get('redis', 'key_prefix')
        self._job_expire_interval = int(
            self._config.get('sharq', 'job_expire_interval'))
        self._default_job_requeue_limit = int(
            self._config.get('sharq', 'default_job_requeue_limit'))

        # initalize redis
        redis_connection_type = self._config.get('redis', 'conn_type')
        db = self._config.get('redis', 'db')
        if redis_connection_type == 'unix_sock':
            self._r = redis.StrictRedis(db=db,
                                        unix_socket_path=self._config.get(
                                            'redis', 'unix_socket_path'))
        elif redis_connection_type == 'tcp_sock':
            if self._config.getboolean('redis', 'clustered', fallback=False):
                startup_nodes = [{
                    "host": self._config.get('redis', 'host'),
                    "port": self._config.get('redis', 'port')
                }]
                self._r = StrictRedisCluster(startup_nodes=startup_nodes,
                                             decode_responses=True,
                                             skip_full_coverage_check=True)
            else:
                self._r = redis.StrictRedis(
                    db=db,
                    host=self._config.get('redis', 'host'),
                    port=self._config.get('redis', 'port'))
        self._load_lua_scripts()

    def _load_config(self):
        """Read the configuration file and load it into memory."""
        self._config = ConfigParser.SafeConfigParser()
        self._config.read(self.config_path)

    def reload_config(self, config_path=None):
        """Reload the configuration from the new config file if provided
        else reload the current config file.
        """
        if config_path:
            self.config_path = config_path
        self._load_config()

    def _load_lua_scripts(self):
        """Loads all lua scripts required by SharQ."""
        # load lua scripts
        lua_script_path = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'scripts/lua')
        with open(os.path.join(lua_script_path, 'enqueue.lua'),
                  'r') as enqueue_file:
            self._lua_enqueue_script = enqueue_file.read()
            self._lua_enqueue = self._r.register_script(
                self._lua_enqueue_script)

        with open(os.path.join(lua_script_path, 'dequeue.lua'),
                  'r') as dequeue_file:
            self._lua_dequeue_script = dequeue_file.read()
            self._lua_dequeue = self._r.register_script(
                self._lua_dequeue_script)

        with open(os.path.join(lua_script_path, 'finish.lua'),
                  'r') as finish_file:
            self._lua_finish_script = finish_file.read()
            self._lua_finish = self._r.register_script(self._lua_finish_script)

        with open(os.path.join(lua_script_path, 'interval.lua'),
                  'r') as interval_file:
            self._lua_interval_script = interval_file.read()
            self._lua_interval = self._r.register_script(
                self._lua_interval_script)

        with open(os.path.join(lua_script_path, 'requeue.lua'),
                  'r') as requeue_file:
            self._lua_requeue_script = requeue_file.read()
            self._lua_requeue = self._r.register_script(
                self._lua_requeue_script)

        with open(os.path.join(lua_script_path, 'metrics.lua'),
                  'r') as metrics_file:
            self._lua_metrics_script = metrics_file.read()
            self._lua_metrics = self._r.register_script(
                self._lua_metrics_script)

    def reload_lua_scripts(self):
        """Lets user reload the lua scripts in run time."""
        self._load_lua_scripts()

    def enqueue(self,
                payload,
                interval,
                job_id,
                queue_id,
                queue_type='default',
                requeue_limit=None):
        """Enqueues the job into the specified queue_id
        of a particular queue_type
        """
        # validate all the input
        if not is_valid_interval(interval):
            raise BadArgumentException('`interval` has an invalid value.')

        if not is_valid_identifier(job_id):
            raise BadArgumentException('`job_id` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        if requeue_limit is None:
            requeue_limit = self._default_job_requeue_limit

        if not is_valid_requeue_limit(requeue_limit):
            raise BadArgumentException('`requeue_limit` has an invalid value.')

        try:
            serialized_payload = serialize_payload(payload)
        except TypeError as e:
            raise BadArgumentException(e.message)

        timestamp = str(generate_epoch())

        keys = [self._key_prefix, queue_type]

        args = [
            timestamp, queue_id, job_id,
            '"%s"' % serialized_payload, interval, requeue_limit
        ]

        self._lua_enqueue(keys=keys, args=args)

        response = {'status': 'queued'}
        return response

    def dequeue(self, queue_type='default'):
        """Dequeues a job from any of the ready queues
        based on the queue_type. If no job is ready,
        returns a failure status.
        """
        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        timestamp = str(generate_epoch())

        keys = [self._key_prefix, queue_type]
        args = [timestamp, self._job_expire_interval]

        dequeue_response = self._lua_dequeue(keys=keys, args=args)

        if len(dequeue_response) < 4:
            response = {'status': 'failure'}
            return response

        queue_id, job_id, payload, requeues_remaining = dequeue_response
        payload = deserialize_payload(payload[1:-1])

        response = {
            'status': 'success',
            'queue_id': queue_id,
            'job_id': job_id,
            'payload': payload,
            'requeues_remaining': int(requeues_remaining)
        }

        return response

    def finish(self, job_id, queue_id, queue_type='default'):
        """Marks any dequeued job as *completed successfully*.
        Any job which gets a finish will be treated as complete
        and will be removed from the SharQ.
        """
        if not is_valid_identifier(job_id):
            raise BadArgumentException('`job_id` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        keys = [self._key_prefix, queue_type]

        args = [queue_id, job_id]

        response = {'status': 'success'}

        finish_response = self._lua_finish(keys=keys, args=args)
        if finish_response == 0:
            # the finish failed.
            response.update({'status': 'failure'})

        return response

    def interval(self, interval, queue_id, queue_type='default'):
        """Updates the interval for a specific queue_id
        of a particular queue type.
        """
        # validate all the input
        if not is_valid_interval(interval):
            raise BadArgumentException('`interval` has an invalid value.')

        if not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        # generate the interval key
        interval_hmap_key = '%s:interval' % self._key_prefix
        interval_queue_key = '%s:%s' % (queue_type, queue_id)
        keys = [interval_hmap_key, interval_queue_key]

        args = [interval]
        interval_response = self._lua_interval(keys=keys, args=args)
        if interval_response == 0:
            # the queue with the id and type does not exist.
            response = {'status': 'failure'}
        else:
            response = {'status': 'success'}

        return response

    def requeue(self):
        """Re-queues any expired job (one which does not get an expire
        before the job_expiry_interval) back into their respective queue.
        This function has to be run at specified intervals to ensure the
        expired jobs are re-queued back.
        """
        timestamp = str(generate_epoch())
        # get all queue_types and requeue one by one.
        # not recommended to do this entire process
        # in lua as it might take long and block other
        # enqueues and dequeues.
        active_queue_type_list = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
        for queue_type in active_queue_type_list:
            # requeue all expired jobs in all queue types.
            keys = [self._key_prefix, queue_type]

            args = [timestamp]
            job_discard_list = self._lua_requeue(keys=keys, args=args)
            # discard the jobs if any
            for job in job_discard_list:
                queue_id, job_id = job.split(':')
                # explicitly finishing a job
                # is nothing but discard.
                self.finish(job_id=job_id,
                            queue_id=queue_id,
                            queue_type=queue_type)

    def metrics(self, queue_type=None, queue_id=None):
        """Provides a way to get statistics about various parameters like,
        * global enqueue / dequeue rates per min.
        * per queue enqueue / dequeue rates per min.
        * queue length of each queue.
        * list of queue ids for each queue type.
        """
        if queue_id is not None and not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if queue_type is not None and not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        response = {'status': 'failure'}
        if not queue_type and not queue_id:
            # return global stats.
            # list of active queue types (ready + active)
            active_queue_types = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
            ready_queue_types = self._r.smembers('%s:ready:queue_type' %
                                                 self._key_prefix)
            all_queue_types = active_queue_types | ready_queue_types
            # global rates for past 10 minutes
            timestamp = str(generate_epoch())
            keys = [self._key_prefix]
            args = [timestamp]

            enqueue_details, dequeue_details = self._lua_metrics(keys=keys,
                                                                 args=args)

            enqueue_counts = {}
            dequeue_counts = {}
            # the length of enqueue & dequeue details are always same.
            for i in xrange(0, len(enqueue_details), 2):
                enqueue_counts[str(enqueue_details[i])] = int(
                    enqueue_details[i + 1] or 0)
                dequeue_counts[str(dequeue_details[i])] = int(
                    dequeue_details[i + 1] or 0)

            response.update({
                'status': 'success',
                'queue_types': list(all_queue_types),
                'enqueue_counts': enqueue_counts,
                'dequeue_counts': dequeue_counts
            })
            return response
        elif queue_type and not queue_id:
            # return list of queue_ids.
            # get data from two sorted sets in a transaction
            pipe = self._r.pipeline()
            pipe.zrange('%s:%s' % (self._key_prefix, queue_type), 0, -1)
            pipe.zrange('%s:%s:active' % (self._key_prefix, queue_type), 0, -1)
            ready_queues, active_queues = pipe.execute()
            # extract the queue_ids from the queue_id:job_id string
            active_queues = [i.split(':')[0] for i in active_queues]
            all_queue_set = set(ready_queues) | set(active_queues)
            response.update({
                'status': 'success',
                'queue_ids': list(all_queue_set)
            })
            return response
        elif queue_type and queue_id:
            # return specific details.
            active_queue_types = self._r.smembers('%s:active:queue_type' %
                                                  self._key_prefix)
            ready_queue_types = self._r.smembers('%s:ready:queue_type' %
                                                 self._key_prefix)
            all_queue_types = active_queue_types | ready_queue_types
            # queue specific rates for past 10 minutes
            timestamp = str(generate_epoch())
            keys = ['%s:%s:%s' % (self._key_prefix, queue_type, queue_id)]
            args = [timestamp]

            enqueue_details, dequeue_details = self._lua_metrics(keys=keys,
                                                                 args=args)

            enqueue_counts = {}
            dequeue_counts = {}
            # the length of enqueue & dequeue details are always same.
            for i in xrange(0, len(enqueue_details), 2):
                enqueue_counts[str(enqueue_details[i])] = int(
                    enqueue_details[i + 1] or 0)
                dequeue_counts[str(dequeue_details[i])] = int(
                    dequeue_details[i + 1] or 0)

            # get the queue length for the job queue
            queue_length = self._r.llen(
                '%s:%s:%s' % (self._key_prefix, queue_type, queue_id))

            response.update({
                'status': 'success',
                'queue_length': int(queue_length),
                'enqueue_counts': enqueue_counts,
                'dequeue_counts': dequeue_counts
            })
            return response
        elif not queue_type and queue_id:
            raise BadArgumentException(
                '`queue_id` should be accompanied by `queue_type`.')

        return response

    def clear_queue(self, queue_type=None, queue_id=None, purge_all=False):
        """clear the all entries in queue with particular queue_id
        and queue_type. It takes an optional argument, 
        purge_all : if True, then it will remove the related resources
        from the redis.
        """
        if queue_id is None or not is_valid_identifier(queue_id):
            raise BadArgumentException('`queue_id` has an invalid value.')

        if queue_type is None or not is_valid_identifier(queue_type):
            raise BadArgumentException('`queue_type` has an invalid value.')

        response = {'status': 'Failure', 'message': 'No queued calls found'}
        # remove from the primary sorted set
        primary_set = '{}:{}'.format(self._key_prefix, queue_type)
        queued_status = self._r.zrem(primary_set, queue_id)
        if queued_status:
            response.update({
                'status': 'Success',
                'message': 'Successfully removed all queued calls'
            })
        # do a full cleanup of reources
        # although this is not necessary as we don't remove resources
        # while dequeue operation
        job_queue_list = '{}:{}:{}'.format(self._key_prefix, queue_type,
                                           queue_id)
        if queued_status and purge_all:
            job_list = self._r.lrange(job_queue_list, 0, -1)
            pipe = self._r.pipeline()
            # clear the payload data for job_uuid
            for job_uuid in job_list:
                if job_uuid is None:
                    continue
                payload_set = '{}:payload'.format(self._key_prefix)
                job_payload_key = '{}:{}:{}'.format(queue_type, queue_id,
                                                    job_uuid)
                pipe.hdel(payload_set, job_payload_key)
            # clear jobrequest interval
            interval_set = '{}:interval'.format(self._key_prefix)
            job_interval_key = '{}:{}'.format(queue_type, queue_id)
            pipe.hdel(interval_set, job_interval_key)
            # clear job_queue_list
            pipe.delete(job_queue_list)
            pipe.execute()
            response.update({
                'status':
                'Success',
                'message':
                'Successfully removed all queued calls and purged related resources'
            })
        else:
            # always delete the job queue list
            self._r.delete(job_queue_list)
        return response
Exemplo n.º 35
0
    lines = f.readlines()

startup_nodes = []
for ip in lines:
    startup_nodes.append({"host": ip[:-1], "port": "6379"})

rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True)

print "Starting timer..."
startTotal = start = time.time()

key_pair_values= {}
for i in range(operations):
    key_pair_values[string_generator(10)] = string_generator(90)

for key in key_pair_values:
    rc.set(key, key_pair_values[key])
print "Insert Time:",time.time() - start,"seconds"

start = time.time()
for key in key_pair_values:
    rc.get(key)
print "Lookup Time:",time.time() - start,"seconds"

start = time.time()
for key in key_pair_values:
    rc.delete(key)
print "Delete Time:",time.time() - start,"seconds"

print "Overall Time:",time.time() - startTotal,"seconds"
Exemplo n.º 36
0
    def test_fix(self):
        def migrate_one_slot(nodes, _):
            if nodes[0].port == 7100:
                source, target = nodes
            else:
                target, source = nodes
            return [(source, target, 1)]

        comm.create([('127.0.0.1', 7100)])
        rc = StrictRedisCluster(
            startup_nodes=[{
                'host': '127.0.0.1',
                'port': 7100
            }],
            decode_responses=True)
        comm.join_cluster(
            '127.0.0.1',
            7100,
            '127.0.0.1',
            7101,
            balance_plan=migrate_one_slot)

        rc.set('h-893', 'I am in slot 0')
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        t7100 = Connection('127.0.0.1', 7100)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))

        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        t7100.execute('cluster', 'setslot', 0, 'importing', n7101.node_id)

        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7101 = Connection('127.0.0.1', 7101)
        nodes = base.list_nodes('127.0.0.1', 7100)
        self.assertEqual(2, len(nodes))
        n7100 = nodes[('127.0.0.1', 7100)]
        n7101 = nodes[('127.0.0.1', 7101)]
        self.assertEqual(16384, len(n7100.assigned_slots))
        self.assertEqual(0, len(n7101.assigned_slots))

        t7100.execute('cluster', 'setslot', 0, 'migrating', n7101.node_id)
        comm.fix_migrating('127.0.0.1', 7100)
        self.assertEqual('I am in slot 0', rc.get('h-893'))

        comm.quit_cluster('127.0.0.1', 7101)
        rc.delete('h-893')
        comm.shutdown_cluster('127.0.0.1', 7100)

        t7100.close()
        t7101.close()