def test_start_multi(self): comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101)]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(8192, len(nodes[('127.0.0.1', 7100)].assigned_slots)) self.assertEqual(8192, len(nodes[('127.0.0.1', 7101)].assigned_slots)) comm.quit_cluster('127.0.0.1', 7100) comm.shutdown_cluster('127.0.0.1', 7101) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101), ('127.0.0.1', 7102)]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(3, len(nodes)) self.assertEqual(5462, len(nodes[('127.0.0.1', 7100)].assigned_slots)) self.assertEqual(5461, len(nodes[('127.0.0.1', 7101)].assigned_slots)) self.assertEqual(5461, len(nodes[('127.0.0.1', 7102)].assigned_slots)) comm.quit_cluster('127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7102) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101), ('127.0.0.1', 7100), ('127.0.0.1', 7102)]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(3, len(nodes)) self.assertEqual(5462, len(nodes[('127.0.0.1', 7100)].assigned_slots)) self.assertEqual(5461, len(nodes[('127.0.0.1', 7101)].assigned_slots)) self.assertEqual(5461, len(nodes[('127.0.0.1', 7102)].assigned_slots)) comm.quit_cluster('127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7102)
def test_fix(self): def migrate_one_slot(nodes, _): if nodes[0].port == 7100: source, target = nodes else: target, source = nodes return [(source, target, 1)] comm.create([('127.0.0.1', 7100)]) rc = StrictRedisCluster(startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101, balance_plan=migrate_one_slot) rc.set('h-893', 'I am in slot 0') comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) t7100 = Connection('127.0.0.1', 7100) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] t7100.execute('cluster', 'setslot', 0, 'importing', n7101.node_id) comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) t7101 = Connection('127.0.0.1', 7101) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) t7100.execute('cluster', 'setslot', 0, 'migrating', n7101.node_id) comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) comm.quit_cluster('127.0.0.1', 7101) rc.delete('h-893') comm.shutdown_cluster('127.0.0.1', 7100) t7100.close() t7101.close()
def test_api(self): comm.start_cluster('127.0.0.1', 7100) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102) time.sleep(1) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) for i in xrange(20): rc.set('key_%s' % i, 'value_%s' % i) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(3, len(nodes)) self.assertEqual(range(8192), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8192, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) comm.quit_cluster('127.0.0.1', 7101) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(range(16384), nodes[('127.0.0.1', 7100)].assigned_slots) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) for i in xrange(20): rc.delete('key_%s' % i) comm.quit_cluster('127.0.0.1', 7102) comm.shutdown_cluster('127.0.0.1', 7100)
def test_start_with_max_slots_set(self): comm.create([('127.0.0.1', 7100)], max_slots=7000) rc = StrictRedisCluster( startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.shutdown_cluster('127.0.0.1', 7100) comm.start_cluster_on_multi( [('127.0.0.1', 7100), ('127.0.0.1', 7101)], max_slots=7000) rc = StrictRedisCluster( startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_quit_problems(self): comm.start_cluster('127.0.0.1', 7100) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) comm.replicate('127.0.0.1', 7100, '127.0.0.1', 7102) time.sleep(1) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) for i in xrange(20): rc.set('key_%s' % i, 'value_%s' % i) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(3, len(nodes)) self.assertEqual(range(8192), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8192, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) for i in xrange(20): rc.delete('key_%s' % i) self.assertRaisesRegexp(ValueError, '^The master still has slaves$', comm.quit_cluster, '127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7102) comm.quit_cluster('127.0.0.1', 7101) self.assertRaisesRegexp(ValueError, '^This is the last node', comm.quit_cluster, '127.0.0.1', 7100) comm.shutdown_cluster('127.0.0.1', 7100)
def test_join_no_load(self): comm.create([('127.0.0.1', 7100)]) rc = StrictRedisCluster( startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('x-{h-893}', 'y') rc.set('y-{h-893}', 'zzZ') rc.set('z-{h-893}', 'w') rc.incr('h-893') comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7101) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7102) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [0]) nodes = base.list_nodes('127.0.0.1', 7102) self.assertEqual(3, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] n7102 = nodes[('127.0.0.1', 7102)] self.assertEqual(16383, len(n7100.assigned_slots)) self.assertEqual(1, len(n7101.assigned_slots)) self.assertEqual(0, len(n7102.assigned_slots)) try: t = n7101.get_conn() m = t.execute('get', 'h-893') self.assertEqual('1', m) m = t.execute('get', 'y-{h-893}') self.assertEqual('zzZ', m) comm.quit_cluster('127.0.0.1', 7102) comm.quit_cluster('127.0.0.1', 7101) t = n7100.get_conn() rc.delete('x-{h-893}') rc.delete('y-{h-893}') rc.delete('z-{h-893}') rc.delete('h-893') comm.shutdown_cluster('127.0.0.1', 7100) finally: n7100.close() n7101.close()
def test_api(self): conf_file = '/tmp/redis_cluster_node_7103.conf' redis_server = Popen([ os.environ['REDIS_SERVER'], '--cluster-enabled', 'yes', '--cluster-config-file', conf_file, '--save', '', '--appendonly', 'no', '--port', '7103', ]) time.sleep(1) try: comm.create([ ('127.0.0.1', 7101), ('127.0.0.1', 7102), ('127.0.0.1', 7103), ]) nodes = base.list_nodes('127.0.0.1', 7101) slots = nodes[('127.0.0.1', 7103)].assigned_slots redis_server.terminate() redis_server = None # wait for process stop try: while True: time.sleep(3) base.list_nodes('127.0.0.1', 7103) except RedisIOError as e: # redis cluster-node-timeout + 1 time.sleep(6) comm.rescue_cluster('127.0.0.1', 7102, '127.0.0.1', 7100) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(slots, nodes[('127.0.0.1', 7100)].assigned_slots) nodes = base.list_nodes('127.0.0.1', 7101) self.assertEqual(slots, nodes[('127.0.0.1', 7100)].assigned_slots) nodes = base.list_nodes('127.0.0.1', 7102) self.assertEqual(slots, nodes[('127.0.0.1', 7100)].assigned_slots) comm.quit_cluster('127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7102, ignore_failed=True) finally: if redis_server != None: redis_server.kill() os.remove(conf_file)
def test_join_no_load(self): comm.create([('127.0.0.1', 7100)]) rc = StrictRedisCluster(startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('x-{h-893}', 'y') rc.set('y-{h-893}', 'zzZ') rc.set('z-{h-893}', 'w') rc.incr('h-893') comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7101) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) comm.join_no_load('127.0.0.1', 7100, '127.0.0.1', 7102) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [0]) nodes = base.list_nodes('127.0.0.1', 7102) self.assertEqual(3, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] n7102 = nodes[('127.0.0.1', 7102)] self.assertEqual(16383, len(n7100.assigned_slots)) self.assertEqual(1, len(n7101.assigned_slots)) self.assertEqual(0, len(n7102.assigned_slots)) try: t = n7101.get_conn() m = t.execute('get', 'h-893') self.assertEqual('1', m) m = t.execute('get', 'y-{h-893}') self.assertEqual('zzZ', m) comm.quit_cluster('127.0.0.1', 7102) comm.quit_cluster('127.0.0.1', 7101) t = n7100.get_conn() rc.delete('x-{h-893}') rc.delete('y-{h-893}') rc.delete('z-{h-893}') rc.delete('h-893') comm.shutdown_cluster('127.0.0.1', 7100) finally: n7100.close() n7101.close()
def test_execution(self): with self.app.test_client() as client: r = client.post('/nodes/add', data={ 'host': '127.0.0.1', 'port': '7100', 'mem': '1048576', }) self.assertReqStatus(200, r) r = client.post('/cluster/add', data={ 'descr': 'lazy dog', }) self.assertReqStatus(200, r) cluster_id = r.data r = client.post('/cluster/launch', data={ 'cluster_id': cluster_id, 'host': '127.0.0.1', 'port': 7100, }) self.assertReqStatus(200, r) task = ClusterTask(cluster_id=int(cluster_id), task_type=0) task.add_step('join', cluster_id=cluster_id, cluster_host='127.0.0.1', cluster_port=7100, newin_host='127.0.0.1', newin_port=7101) task.add_step('migrate', src_host='127.0.0.1', src_port=7100, dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) self.db.session.add(task) self.db.session.commit() self.exec_all_tasks() nodes, node_7100 = comm.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(2, 16384), sorted(node_7100.assigned_slots)) tasks = list(self.db.session.query(ClusterTask).all()) self.assertEqual(1, len(tasks)) t = tasks[0] self.assertIsNotNone(t.completion) self.assertIsNone(t.exec_error) self.assertIsNone(t.acquired_lock()) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_fix(self): def migrate_one_slot(nodes, _): if nodes[0].port == 7100: source, target = nodes else: target, source = nodes return [(source, target, 1)] comm.start_cluster('127.0.0.1', 7100) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101, balance_plan=migrate_one_slot) rc.set('h-893', 'I am in slot 0') comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) t7100 = Talker('127.0.0.1', 7100) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] t7100.talk('cluster', 'setslot', 0, 'importing', n7101.node_id) comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) t7101 = Talker('127.0.0.1', 7101) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) n7100 = nodes[('127.0.0.1', 7100)] n7101 = nodes[('127.0.0.1', 7101)] self.assertEqual(16384, len(n7100.assigned_slots)) self.assertEqual(0, len(n7101.assigned_slots)) t7100.talk('cluster', 'setslot', 0, 'migrating', n7101.node_id) comm.fix_migrating('127.0.0.1', 7100) self.assertEqual('I am in slot 0', rc.get('h-893')) comm.quit_cluster('127.0.0.1', 7101) rc.delete('h-893') comm.shutdown_cluster('127.0.0.1', 7100) t7100.close() t7101.close()
def test_start_with_max_slots_set(self): comm.start_cluster('127.0.0.1', 7100, max_slots=7000) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.shutdown_cluster('127.0.0.1', 7100) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101)], max_slots=7000) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_start_with_max_slots_set(self): comm.create([('127.0.0.1', 7100)], max_slots=7000) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.shutdown_cluster('127.0.0.1', 7100) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101)], max_slots=7000) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) rc.delete('key') comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_start_multi(self): comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101)]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(8192, len(nodes[('127.0.0.1', 7100)].assigned_slots)) self.assertEqual(8192, len(nodes[('127.0.0.1', 7101)].assigned_slots)) comm.quit_cluster('127.0.0.1', 7100) comm.shutdown_cluster('127.0.0.1', 7101) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101), ('127.0.0.1', 7102)]) nodes = base.list_nodes('127.0.0.1', 7100) possible_slots_number = (5461, 5462) ports = (7100, 7101, 7102) self.assertEqual(3, len(nodes)) for port in ports: self.assertIn(len(nodes[('127.0.0.1', port)].assigned_slots), possible_slots_number, msg='port=%s' % port) self.assertEqual( sum( len(nodes[('127.0.0.1', port)].assigned_slots) for port in ports), 16384) comm.quit_cluster('127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7102) comm.start_cluster_on_multi([('127.0.0.1', 7100), ('127.0.0.1', 7101), ('127.0.0.1', 7100), ('127.0.0.1', 7102)]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(3, len(nodes)) for port in ports: self.assertIn(len(nodes[('127.0.0.1', port)].assigned_slots), possible_slots_number, msg='port=%s' % port) self.assertEqual( sum( len(nodes[('127.0.0.1', port)].assigned_slots) for port in ports), 16384) comm.quit_cluster('127.0.0.1', 7100) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7102)
def test_execution(self): with self.app.test_client() as client: r = client.post('/nodes/add', data={ 'host': '127.0.0.1', 'port': '7100', 'mem': '1048576', }) self.assertReqStatus(200, r) r = client.post('/cluster/add', data={ 'descr': 'lazy dog', }) self.assertReqStatus(200, r) cluster_id = r.data r = client.post('/cluster/launch', data={ 'cluster_id': cluster_id, 'host': '127.0.0.1', 'port': 7100, }) self.assertReqStatus(200, r) task = ClusterTask(cluster_id=int(cluster_id), task_type=0) task.add_step( 'join', cluster_id=cluster_id, cluster_host='127.0.0.1', cluster_port=7100, newin_host='127.0.0.1', newin_port=7101) task.add_step( 'migrate', src_host='127.0.0.1', src_port=7100, dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) self.db.session.add(task) self.db.session.commit() self.exec_all_tasks() nodes, node_7100 = comm.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(2, 16384), sorted(node_7100.assigned_slots)) tasks = list(self.db.session.query(ClusterTask).all()) self.assertEqual(1, len(tasks)) t = tasks[0] self.assertIsNotNone(t.completion) self.assertIsNone(t.exec_error) self.assertIsNone(t.acquired_lock()) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_execution_failed(self): with self.app.test_client() as client: r = client.post('/redis/add', data={ 'host': '127.0.0.1', 'port': '7100', }) self.assertReqStatus(200, r) r = client.post('/cluster/add', data={ 'descr': 'lazy dog', }) self.assertReqStatus(200, r) cluster_id = r.data r = client.post('/task/launch', data=json.dumps({ 'cluster': cluster_id, 'nodes': [{ 'host': '127.0.0.1', 'port': 7100, }], })) self.assertReqStatus(200, r) self.exec_all_tasks() task = ClusterTask(cluster_id=int(cluster_id), task_type=0) task.add_step('join', cluster_id=cluster_id, cluster_host='127.0.0.1', cluster_port=7100, newin_host='127.0.0.1', newin_port=7101) task.add_step('migrate', src_host='127.0.0.1', src_port=7100, dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) task.add_step('migrate', src_host='127.0.0.1', src_port=7100, dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) task.add_step('migrate', src_host='127.0.0.1', src_port=7100, dst_host='127.0.0.1', dst_port=7101, slots=[2, 3]) self.db.session.add(task) self.db.session.commit() self.exec_all_tasks() nodes, node_7100 = comm.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(2, 16384), sorted(node_7100.assigned_slots)) tasks = list( self.db.session.query(ClusterTask).order_by( ClusterTask.id.asc()).all()) self.assertEqual(2, len(tasks)) t = tasks[1] self.assertIsNotNone(t.completion) self.assertIsNotNone(t.exec_error) self.assertIsNone(t.acquired_lock()) steps = t.all_steps self.assertEqual(4, len(steps)) step = steps[0] self.assertTrue(step.completed) self.assertIsNone(step.exec_error) step = steps[1] self.assertTrue(step.completed) self.assertIsNone(step.exec_error) step = steps[2] self.assertTrue(step.completed) self.assertIsNotNone(step.exec_error) step = steps[3] self.assertFalse(step.started) self.assertFalse(step.completed) self.assertIsNone(step.exec_error) comm.quit_cluster('127.0.0.1', 7101) comm.shutdown_cluster('127.0.0.1', 7100)
def test_api(self): comm.create([('127.0.0.1', 7100)]) rc = StrictRedisCluster( startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) for i in range(20): rc.set('key_%s' % i, 'value_%s' % i) for i in range(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual( list(range(8192)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual( list(range(8192, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual( list(range(8193)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual( list(range(8193, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8193, 8194, 8195]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual( list(range(8196)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual( list(range(8196, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) six.assertRaisesRegex(self, ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) six.assertRaisesRegex(self, ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8195, 8196]) six.assertRaisesRegex( self, ValueError, 'Two nodes are not in the same cluster', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196]) comm.quit_cluster('127.0.0.1', 7100) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(1, len(nodes)) self.assertEqual(0, len(nodes[('127.0.0.1', 7100)].assigned_slots)) nodes = base.list_nodes('127.0.0.1', 7101) self.assertEqual(1, len(nodes)) self.assertEqual( list(range(16384)), nodes[('127.0.0.1', 7101)].assigned_slots) rc = StrictRedisCluster( startup_nodes=[{ 'host': '127.0.0.1', 'port': 7101 }], decode_responses=True) for i in range(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) self.assertEqual('value', rc.get('key')) six.assertRaisesRegex(self, RedisStatusError, 'still contains keys', comm.shutdown_cluster, '127.0.0.1', 7101) rc.delete('key', *['key_%s' % i for i in range(20)]) comm.shutdown_cluster('127.0.0.1', 7101) six.assertRaisesRegex( self, RedisClusterException, 'All slots are not covered after query all startup_nodes. .*', rc.get, 'key')
def test_api(self): comm.create([('127.0.0.1', 7100)]) rc = StrictRedisCluster(startup_nodes=[{ 'host': '127.0.0.1', 'port': 7100 }], decode_responses=True) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) for i in range(20): rc.set('key_%s' % i, 'value_%s' % i) for i in range(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(list(range(8192)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(list(range(8192, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(list(range(8193)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(list(range(8193, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8193, 8194, 8195]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(list(range(8196)), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(list(range(8196, 16384)), nodes[('127.0.0.1', 7100)].assigned_slots) six.assertRaisesRegex(self, ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) six.assertRaisesRegex(self, ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8195, 8196]) six.assertRaisesRegex(self, ValueError, 'Two nodes are not in the same cluster', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196]) comm.quit_cluster('127.0.0.1', 7100) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(1, len(nodes)) self.assertEqual(0, len(nodes[('127.0.0.1', 7100)].assigned_slots)) nodes = base.list_nodes('127.0.0.1', 7101) self.assertEqual(1, len(nodes)) self.assertEqual(list(range(16384)), nodes[('127.0.0.1', 7101)].assigned_slots) rc = StrictRedisCluster(startup_nodes=[{ 'host': '127.0.0.1', 'port': 7101 }], decode_responses=True) for i in range(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) self.assertEqual('value', rc.get('key')) six.assertRaisesRegex(self, RedisStatusError, 'still contains keys', comm.shutdown_cluster, '127.0.0.1', 7101) rc.delete('key', *['key_%s' % i for i in range(20)]) comm.shutdown_cluster('127.0.0.1', 7101) six.assertRaisesRegex( self, RedisClusterException, 'All slots are not covered after query all startup_nodes. .*', rc.get, 'key')
def test_api(self): comm.start_cluster('127.0.0.1', 7100) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) for i in xrange(20): rc.set('key_%s' % i, 'value_%s' % i) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8192), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8192, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8193), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8193, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8193, 8194, 8195]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8196), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8196, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) self.assertRaisesRegexp(ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) self.assertRaisesRegexp(ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8195, 8196]) self.assertRaisesRegexp(ValueError, 'Two nodes are not in the same cluster', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196]) comm.quit_cluster('127.0.0.1', 7100) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) self.assertEqual('value', rc.get('key')) nodes = base.list_nodes('127.0.0.1', 7101) self.assertEqual(1, len(nodes)) self.assertEqual(range(16384), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertRaisesRegexp(RedisStatusError, 'Cluster containing keys', comm.shutdown_cluster, '127.0.0.1', 7101) rc.delete('key', *['key_%s' % i for i in xrange(20)]) comm.shutdown_cluster('127.0.0.1', 7101) self.assertRaisesRegexp(ResponseError, 'CLUSTERDOWN .*', rc.get, 'key')
def test_api(self): comm.start_cluster('127.0.0.1', 7100) rc = RedisCluster([{'host': '127.0.0.1', 'port': 7100}]) rc.set('key', 'value') self.assertEqual('value', rc.get('key')) comm.join_cluster('127.0.0.1', 7100, '127.0.0.1', 7101) for i in xrange(20): rc.set('key_%s' % i, 'value_%s' % i) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8192), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8192, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8193), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8193, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) comm.migrate_slots('127.0.0.1', 7100, '127.0.0.1', 7101, [8193, 8194, 8195]) nodes = base.list_nodes('127.0.0.1', 7100) self.assertEqual(2, len(nodes)) self.assertEqual(range(8196), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertEqual(range(8196, 16384), nodes[('127.0.0.1', 7100)].assigned_slots) self.assertRaisesRegexp( ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8192]) self.assertRaisesRegexp( ValueError, 'Not all slot held by', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7101, [8195, 8196]) self.assertRaisesRegexp( ValueError, 'Two nodes are not in the same cluster', comm.migrate_slots, '127.0.0.1', 7100, '127.0.0.1', 7102, [8196]) comm.quit_cluster('127.0.0.1', 7100) for i in xrange(20): self.assertEqual('value_%s' % i, rc.get('key_%s' % i)) self.assertEqual('value', rc.get('key')) nodes = base.list_nodes('127.0.0.1', 7101) self.assertEqual(1, len(nodes)) self.assertEqual(range(16384), nodes[('127.0.0.1', 7101)].assigned_slots) self.assertRaisesRegexp( RedisStatusError, 'Cluster containing keys', comm.shutdown_cluster, '127.0.0.1', 7101) rc.delete('key', *['key_%s' % i for i in xrange(20)]) comm.shutdown_cluster('127.0.0.1', 7101) self.assertRaisesRegexp(ResponseError, 'CLUSTERDOWN .*', rc.get, 'key')