Пример #1
0
    def test_access_correct_slave_with_readonly_mode_client(self, sr):
        """
        Test that the client can get value normally with readonly mode
        when we connect to correct slave.
        """

        # we assume this key is set on 127.0.0.1:7001
        sr.set('foo87', 'foo')
        sr.set('foo88', 'bar')
        import time
        time.sleep(1)

        with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock:
            return_slave_mock.return_value = {
                'name': '127.0.0.1:7004',
                'host': '127.0.0.1',
                'port': 7004,
                'server_type': 'slave',
            }

            master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'}
            with patch.object(
                    ClusterConnectionPool,
                    'get_master_node_by_slot',
                    return_value=master_value) as return_master_mock:
                readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True)
                with readonly_client.pipeline() as readonly_pipe:
                    assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')]
                    assert return_master_mock.call_count == 0
Пример #2
0
    def test_access_correct_slave_with_readonly_mode_client(self, sr):
        """
        Test that the client can get value normally with readonly mode
        when we connect to correct slave.
        """

        # we assume this key is set on 127.0.0.1:7001
        sr.set('foo87', 'foo')
        sr.set('foo88', 'bar')
        import time
        time.sleep(1)

        with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock:
            return_slave_mock.return_value = {
                'name': '127.0.0.1:7004',
                'host': '127.0.0.1',
                'port': 7004,
                'server_type': 'slave',
            }

            master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'}
            with patch.object(
                    ClusterConnectionPool,
                    'get_master_node_by_slot',
                    return_value=master_value) as return_master_mock:
                readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True)
                with readonly_client.pipeline() as readonly_pipe:
                    assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')]
Пример #3
0
 def test_moved_redirection_on_slave_with_readonly_mode_client(self):
     """
     Ditto with READONLY mode.
     """
     self.assert_moved_redirection_on_slave(
         ClusterReadOnlyConnectionPool,
         StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1)
     )
Пример #4
0
 def test_moved_redirection_on_slave_with_default(self):
     """
     On Pipeline, we redirected once and finally get from master with
     readonly client when data is completely moved.
     """
     self.assert_moved_redirection_on_slave(
         ClusterConnectionPool,
         StrictRedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1)
     )
Пример #5
0
def test_pubsub_thread_publish():
    """
    This test will never fail but it will still show and be viable to use
    and to test the threading capability of the connectionpool and the publish
    mechanism.
    """
    startup_nodes = [{"host": "127.0.0.1", "port": "7000"}]

    r = StrictRedisCluster(
        startup_nodes=startup_nodes,
        decode_responses=True,
        max_connections=16,
        max_connections_per_node=16,
    )

    def t_run(rc):
        for i in range(0, 50):
            rc.publish('foo', 'bar')
            rc.publish('bar', 'foo')
            rc.publish('asd', 'dsa')
            rc.publish('dsa', 'asd')
            rc.publish('qwe', 'bar')
            rc.publish('ewq', 'foo')
            rc.publish('wer', 'dsa')
            rc.publish('rew', 'asd')

        # Use this for debugging
        # print(rc.connection_pool._available_connections)
        # print(rc.connection_pool._in_use_connections)
        # print(rc.connection_pool._created_connections)

    try:
        threads = []
        for i in range(10):
            t = threading.Thread(target=t_run, args=(r, ))
            threads.append(t)
            t.start()
    except Exception:
        print("Error: unable to start thread")
Пример #6
0
}, {
    'host': '114.116.43.125',
    'port': 9003
}, {
    'host': '114.116.43.125',
    'port': 9004
}, {
    'host': '114.116.43.125',
    'port': 9005
}, {
    'host': '114.116.43.125',
    'port': 9006
}]
cluster_client = StrictRedisCluster(
    startup_nodes=nodes,
    max_connections=20,
    skip_full_coverage_check=True,
)
#建立连接好缓慢啊,几乎要四五分钟,节点可以填一两个就行,没全部填进入也没事,慢是因为集群配置有问题吗,这个库会缓存所有的集群槽,
# 一直在nodemanage.py里面的initialize 中循环循环(212行),在内网测试就非常快,所以这个没有多大的问题
tmp = cluster_client.get('zillo')
print(tmp)
print(cluster_client.cluster_info())
'''
Json.cn
在线解析 什么是JSON JSON解析代码 JSON组件


{"172.16.0.71:9001": {"cluster_stats_messages_pong_sent": 337918, "cluster_stats_messages_meet_received": 5, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332547, "cluster_slots_fail": 0, "cluster_stats_messages_received": 670465, "cluster_stats_messages_ping_sent": 332547, "cluster_stats_messages_ping_received": 337913, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 670465, "cluster_slots_pfail": 0, "cluster_my_epoch": 1, "cluster_slots_ok": 16384}, "172.16.0.71:9002": {"cluster_stats_messages_pong_sent": 327570, "cluster_stats_messages_meet_received": 2, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_stats_messages_meet_sent": 3, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332304, "cluster_slots_fail": 0, "cluster_stats_messages_received": 659874, "cluster_stats_messages_ping_sent": 332301, "cluster_stats_messages_ping_received": 327568, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 659874, "cluster_slots_pfail": 0, "cluster_my_epoch": 2, "cluster_slots_ok": 16384}, "172.16.0.71:9003": {"cluster_stats_messages_pong_sent": 335109, "cluster_stats_messages_meet_received": 2, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_stats_messages_meet_sent": 4, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332674, "cluster_slots_fail": 0, "cluster_stats_messages_received": 667783, "cluster_stats_messages_ping_sent": 332670, "cluster_stats_messages_ping_received": 335107, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 667783, "cluster_slots_pfail": 0, "cluster_my_epoch": 3, "cluster_slots_ok": 16384}, "172.16.0.71:9004": {"cluster_stats_messages_pong_sent": 330755, "cluster_stats_messages_meet_received": 2, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_stats_messages_meet_sent": 4, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332622, "cluster_slots_fail": 0, "cluster_stats_messages_received": 663377, "cluster_stats_messages_ping_sent": 332618, "cluster_stats_messages_ping_received": 330753, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 663377, "cluster_slots_pfail": 0, "cluster_my_epoch": 2, "cluster_slots_ok": 16384}, "172.16.0.71:9005": {"cluster_stats_messages_pong_sent": 340213, "cluster_stats_messages_meet_received": 4, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_stats_messages_meet_sent": 1, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332575, "cluster_slots_fail": 0, "cluster_stats_messages_received": 672788, "cluster_stats_messages_ping_sent": 332574, "cluster_stats_messages_ping_received": 340209, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 672788, "cluster_slots_pfail": 0, "cluster_my_epoch": 3, "cluster_slots_ok": 16384}, "172.16.0.71:9006": {"cluster_stats_messages_pong_sent": 323682, "cluster_stats_messages_meet_received": 2, "cluster_state": "ok", "cluster_slots_assigned": 16384, "cluster_stats_messages_meet_sent": 5, "cluster_known_nodes": 6, "cluster_stats_messages_pong_received": 332525, "cluster_slots_fail": 0, "cluster_stats_messages_received": 656207, "cluster_stats_messages_ping_sent": 332520, "cluster_stats_messages_ping_received": 323680, "cluster_size": 3, "cluster_current_epoch": 6, "cluster_stats_messages_sent": 656207, "cluster_slots_pfail": 0, "cluster_my_epoch": 1, "cluster_slots_ok": 16384}}
      
{
Пример #7
0
import rediscluster
from rediscluster.client import StrictRedisCluster

startup_nodes=[{"host":"192.168.31.229","port":"7000"}]
rc=StrictRedisCluster(startup_nodes=startup_nodes,decode_responses=True)
rc.set('foo','bar')
value=rc.get('foo')
print value
    
Пример #8
0
    "retry_on_timeout": True,
}
redis_batch_size = 100
pipeline_size = 200
ttl_sec = None

params = ('3e7c4e68-ddaf-4c19-e874-ea4412c33afb', 'NPI_30')

#with CassandraClusterUtils.get_cluster(targetdc) as cluster:
#    with cluster.connect() as session:
#        segment_iter = self.get_segment_iterator()

#cluster = UserStoreClusterUtils.get_cluster(self.targetdc)

# cluster = StrictRedisCluster(startup_nodes=startup_nodes, **cluster_parameters)
cluster = StrictRedisCluster(startup_nodes=ams_cluster, **cluster_parameters)
add_token_if_exists_sha_256 = cluster.script_load(
    lua_append_tokens_for_existing_user)
add_token_and_create_if_not_exists_sha_256 = cluster.script_load(
    lua_append_tokens_and_create_user_if_not_exists)
lua_append_s6_graph_sha_256 = cluster.script_load(lua_append_s6_graph)
lua_remove_s6_graph_sha_256 = cluster.script_load(lua_remove_s6_graph)

# import ipdb; ipdb.set_trace()
#redis_users_dao = RedisUsersDAO(_cluster)
#pipeline = cluster.pipeline()
#pipeline.evalsha(add_token_and_create_if_not_exists_sha_256, 1, params[0], params[1], ttl_sec)
#pipeline.evalsha(add_token_if_exists_sha_256, 1, params[0], params[1], ttl_sec)
# res = cluster.evalsha(add_token_if_exists_sha_256, 1, params[0], params[1], ttl_sec)
res = cluster.evalsha(add_token_and_create_if_not_exists_sha_256, 1, params[0],
                      params[1], ttl_sec)
Пример #9
0
from rediscluster.client import StrictRedisCluster

if __name__ == '__main__':

    nodes = [
        {
            'host': '192.168.210.174',
            'port': 7001
        },
        {
            'host': '192.168.210.174',
            'port': 7002
        },
        {
            'host': '192.168.210.173',
            'port': 7004
        },
    ]

    # 创建连接到集群的对象
    strict_redis_cluster = StrictRedisCluster(startup_nodes=nodes,
                                              decode_responses=True)
    # 通过strict_redis_cluster对象实现对Redis数据的操作

    strict_redis_cluster.set('username', 'redis')

    username = strict_redis_cluster.get('username')
    print(username)
Пример #10
0
    print 'Bad argument!!!!!!!'
    sys.exit(1)

if sys.argv[3].lower() == 'r':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1001:' + sys.argv[2]
elif sys.argv[3].lower() == 'f':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1002:' + sys.argv[2]
elif sys.argv[3].lower() == 'm':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1005:' + sys.argv[2]
elif sys.argv[3].lower() == 'l':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1006:' + sys.argv[2]
elif sys.argv[3].lower() == 'c':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1007:' + sys.argv[2]
elif sys.argv[3].lower() == 'e':
    smscode = 'SOA:MYACCOUNT:SMSCODE:1008:' + sys.argv[2]
else:
    print 'Bad argument!!!!!!!'
    sys.exit(1)

try:
    redisconn = StrictRedisCluster(startup_nodes=target_redis)
    #print redisconn.mget(smscode)
    sms = redisconn.mget(smscode)
    if sms[0]:
        print "smscode is:", sms
    else:
        print "Cannot find the smscode"

except Exception, e:
    print "Connect Redis node error:", e
    sys.exit(1)