def test_random_pgs_del_and_add(self): util.print_frame() # start load generator util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start() util.log("started load_generator") servers = self.cluster['servers'] gw_list = [] for server in servers: gw = {} gw['mgmt'] = telnetlib.Telnet(server['ip'], server['gateway_port'] + 1) gw['normal'] = telnetlib.Telnet(server['ip'], server['gateway_port']) gw_list.append(gw) count = 10 while count > 0: c = random.choice(servers) for gw in gw_list: gw['mgmt'].write("pgs_del %d %d\r\n" % (c['id'], c['pg_id'])) gw['mgmt'].read_until("+OK\r\n") gw_list[0]['mgmt'].write("cluster_info\r\nping\r\n") print gw_list[0]['mgmt'].read_until("+PONG\r\n") for gw in gw_list: gw['mgmt'].write( "pgs_add %d %d %s %d\r\n" % (c['id'], c['pg_id'], c['ip'], c['redis_port'])) gw['mgmt'].read_until("+OK\r\n") for gw in gw_list: while True: gw['normal'].write("info gateway\r\n") ret = gw['normal'].read_until("\r\n\r\n") if "gateway_disconnected_redis:0\r\n" in ret: break count -= 1 # check consistency of load_generator for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].quit() for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].join() self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after gateway_mgmt test')
def setUpClass(cls): cls.conf_checker = default_cluster.initialize_starting_up_smr_before_redis( cls.cluster) assert cls.conf_checker != None, 'failed to initialize cluster' slave = util.get_server_by_role(cls.cluster['servers'], 'slave') for i in range(cls.max_load_generator): cls.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, slave['ip'], slave['redis_port']) cls.load_gen_thrd_list[i].start()
def setUpClass(cls): ret = default_cluster.initialize_starting_up_smr_before_redis( cls.cluster) if ret is not 0: default_cluster.finalize(cls.cluster) return -1 slave = util.get_server_by_role(cls.cluster['servers'], 'slave') for i in range(cls.max_load_generator): cls.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, slave['ip'], slave['redis_port']) cls.load_gen_thrd_list[i].start()
def test_migrate_all(self): util.print_frame() migration_count = 10 # start load generator load_gen_thrd_list = {} util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port, ops_limit=500) load_gen_thrd_list[i].start() time.sleep(5) # generate load for 5 sec # start migration for i in range(migration_count): # pg0 -> pg1 ret = util.migration(self.cluster, 0, 1, 4096, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail') # pg0 <- pg1 ret = util.migration(self.cluster, 1, 0, 4096, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail') ok = True for j in range(len(load_gen_thrd_list)): if load_gen_thrd_list[j].isConsistent() == False: ok = False break if not ok: break time.sleep(5) # generate load for 5 sec # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration')
def put_some_data( self ): # start load generator max_load_generator = 100 load_gen_thrd_list = {} util.log('start load_generator') for i in range(max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port) load_gen_thrd_list[i].start() time.sleep(10) # generate some load util.log('end load_generator') # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Data are inconsistent.') return 0
def test_migration_with_expire_command(self): util.print_frame() util.log("start load_generator") load_gen_thrd_list = {} for i in range(1): ip, port = util.get_rand_gateway(self.cluster) load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port) load_gen_thrd_list[i].start() time.sleep(5) # generate load for 5 sec tps = 20000 src_pg_id = 0 dst_pg_id = 1 leader_cm = self.cluster['servers'][0] src_master = util.get_server_by_role_and_pg(self.cluster['servers'], 'master', src_pg_id) dst_master = util.get_server_by_role_and_pg(self.cluster['servers'], 'master', dst_pg_id) smr = smr_mgmt.SMR(src_master['id']) ret = smr.connect(src_master['ip'], src_master['smr_mgmt_port']) if ret != 0: util.log('failed to connect to smr(source master)') return False src_redis = redis_mgmt.Redis(src_master['id']) ret = src_redis.connect(src_master['ip'], src_master['redis_port']) self.assertEquals(ret, 0, 'failed to connect to redis') dst_redis = redis_mgmt.Redis(dst_master['id']) ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port']) self.assertEquals(ret, 0, 'failed to connect to redis') ts = time.time() self.setExpireKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:expired', 10) self.setExpireKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:persist', 20) self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired', 10) self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist', 20) self.setExpireS3Key(src_redis, 'S3:PermanentKey', 0) util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key( src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key( src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired') self.assertEquals(res, ":0\r\n") util.log(">>> migrate test with expire command start(%s), ts:%d" % (time.asctime(), ts)) ts = time.time() self.setExpireKey(src_redis, 'beforeCheckpoint~afterCheckpoint:expired', 10) self.setExpireKey(src_redis, 'beforeCheckpoint~afterCheckpoint:persist', 20) self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired', 10) self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist', 20) # notify dst_redis of migration start util.log(">>> notify dst_redis of migration start (%s)" % time.asctime()) cmd = 'migconf migstart %d-%d\r\n' % (0, 8191) dst_redis.write(cmd) res = dst_redis.read_until('\r\n') self.assertEquals(res, '+OK\r\n') # remote partial checkpoint util.log(">>> start remote checkpoint and load (%s)" % time.asctime()) cmd = "./cluster-util --getandplay %s %d %s %d %d-%d %d" % ( src_master['ip'], src_master['redis_port'], dst_master['ip'], dst_master['redis_port'], 0, 8191, tps) p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd, True, None, subprocess.PIPE, None) ret = p.wait() for line in p.stdout: if line.find("Checkpoint Sequence Number:") != -1: util.log("seqnumber : " + line[line.rfind(":") + 1:]) seq = int(line[line.rfind(":") + 1:]) util.log(">>>" + str(line.rstrip())) self.assertEqual(0, ret) util.log(">>> end remote checkpoint and load (%s)" % time.asctime()) util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(src_redis, 'beforeCheckpoint~afterCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(src_redis, 'beforeCheckpoint~afterCheckpoint:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired') self.assertEquals(res, ":0\r\n") # bgsave for testing later about recovery during migration util.log( ">>> bgsave for testing later about recovery during migration (%s)" % time.asctime()) cmd = 'bgsave\r\n' dst_redis.write(cmd) res = dst_redis.read_until('\r\n') self.assertEquals(res, '+Background saving started\r\n') ts = time.time() self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:expired', 10) self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:persist', 20) self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:expired', 10) self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:persist', 20) util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(src_redis, 'afterCheckpoint~afterCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(src_redis, 'afterCheckpoint~afterCheckpoint:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:expired') self.assertEquals(res, ":0\r\n") ts = time.time() self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:expired', 10) self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:persist', 100) self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:expired', 10) self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:persist', 100) # remote catchup (smr log migration) util.log(">>> start remote catchup (%s)" % time.asctime()) dst_host = dst_master['ip'] dst_smr_port = dst_master['smr_base_port'] rle = '1 8192' num_part = 8192 smr.write('migrate start %s %d %d %d %d %s\r\n' % (dst_host, dst_smr_port, seq, tps, num_part, rle)) response = smr.read_until('\r\n') if response[:3] != '+OK': util.log('failed to execute migrate start command, response:%s' % response) return False while True: smr.write('migrate info\r\n') response = smr.read_until('\r\n') seqs = response.split() logseq = int(seqs[1].split(':')[1]) mig = int(seqs[2].split(':')[1]) util.log('migrate info: %s' % response) if (logseq - mig < 500000): util.log('Remote catchup almost done. try mig2pc') break time.sleep(1) util.log(">>> sleep until 90 sec pass") self.assertFalse(time.time() - ts >= 90) time.sleep(90 - (time.time() - ts)) res = self.persistKey(src_redis, 'afterCheckpoint~duringCatchup:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(src_redis, 'afterCheckpoint~duringCatchup:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:expired') self.assertEquals(res, ":0\r\n") ts = time.time() self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:expired', 10) self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:persist', 20) self.setExpireS3Key(src_redis, 'S3:duringCatchup~duringCatchup:expired', 10) self.setExpireS3Key(src_redis, 'S3:duringCatchup~duringCatchup:persist', 20) util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(src_redis, 'S3:duringCatchup~duringCatchup:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(src_redis, 'S3:duringCatchup~duringCatchup:expired') self.assertEquals(res, ":0\r\n") ts = time.time() self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:expired', 10) self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:persist', 20) self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:expired', 10) self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:persist', 20) util.log(">>> remote catchup phase almost done (%s)" % time.asctime()) # mig2pc util.log(">>> start mig2pc (%s)" % time.asctime()) cmd = 'mig2pc %s %d %d %d %d' % (self.cluster['cluster_name'], src_pg_id, dst_pg_id, 0, 8191) result = util.cm_command(leader_cm['ip'], leader_cm['cm_port'], cmd) util.log('mig2pc result : ' + result) if not result.startswith('{"state":"success","msg":"+OK"}\r\n'): util.log('failed to execute mig2pc command, result:%s' % result) return False util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(dst_redis, 'S3:duringCatchup~afterMig2pc:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(dst_redis, 'S3:duringCatchup~afterMig2pc:expired') self.assertEquals(res, ":0\r\n") ts = time.time() self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:expired', 10) self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:persist', 20) self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired', 10) self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist', 20) # finish migration smr.write('migrate interrupt\r\n') response = smr.read_until('\r\n') util.log('migrate interrupt: %s' % response) smr.disconnect() # notify dst_redis of migration end util.log(">>> notify dst_redis of migration end (%s)" % time.asctime()) cmd = 'migconf migend\r\n' dst_redis.write(cmd) res = dst_redis.read_until('\r\n') self.assertEquals(res, '+OK\r\n') cmd = 'migconf clearstart %d-%d\r\n' % (0, 8191) src_redis.write(cmd) res = src_redis.read_until('\r\n') self.assertEquals(res, '+OK\r\n') util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:persist') self.assertEquals(res, ":1\r\n") res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:expired') self.assertEquals(res, ":0\r\n") res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist') self.assertEquals(res, ":1\r\n") res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired') self.assertEquals(res, ":0\r\n") ts = time.time() util.log(">>> sleep until 15 sec pass") self.assertFalse(time.time() - ts >= 15) time.sleep(15 - (time.time() - ts)) self.assertTrue( self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired')) # remote partial checkpoint util.log(">>> start rangedel (%s)" % time.asctime()) cmd = "./cluster-util --rangedel %s %d %d-%d %d" % ( src_master['ip'], src_master['redis_port'], 0, 8191, tps) p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd, True, None, subprocess.PIPE, None) ret = p.wait() for line in p.stdout: util.log(">>>" + str(line.rstrip())) cmd = 'migconf clearend\r\n' src_redis.write(cmd) res = src_redis.read_until('\r\n') self.assertEqual(res, '+OK\r\n') time.sleep(5) # generate load for 5 sec # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration') # kill dst_redis and recover from bgsave util.log(">>> kill dst_redis and recover from bgsave (%s)" % time.asctime()) dst_redis.disconnect() ret = testbase.request_to_shutdown_redis(dst_master) self.assertEquals(ret, 0, 'failed to shutdown redis') ret = testbase.request_to_shutdown_smr(dst_master) self.assertEquals(ret, 0, 'failed to shutdown smr') time.sleep(5) testbase.request_to_start_smr(dst_master) self.assertEqual(ret, 0, 'failed to start smr, server:%d' % dst_master['id']) ret = testbase.request_to_start_redis(dst_master) self.assertEqual(ret, 0, 'failed to start redis, server:%d' % dst_master['id']) ret = testbase.wait_until_finished_to_set_up_role(dst_master) self.assertEquals( ret, 0, 'failed to role change. server:%d' % (dst_master['id'])) dst_redis = redis_mgmt.Redis(dst_master['id']) ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port']) self.assertEquals(ret, 0, 'failed to connect to redis') self.assertTrue( self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired')) self.assertTrue( self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired')) self.getS3TTL(dst_redis, 'S3:PermanentKey') # kill dst_slave redis and recover without dump file util.log(">>> kill dst_redis and recover without dump file (%s)" % time.asctime()) dst_slave = util.get_server_by_role_and_pg(self.cluster['servers'], 'slave', dst_pg_id) ret = testbase.request_to_shutdown_redis(dst_slave) self.assertEquals(ret, 0, 'failed to shutdown redis') ret = testbase.request_to_shutdown_smr(dst_slave) self.assertEquals(ret, 0, 'failed to shutdown smr') time.sleep(5) testbase.request_to_start_smr(dst_slave) self.assertEqual(ret, 0, 'failed to start smr, server:%d' % dst_slave['id']) ret = testbase.request_to_start_redis(dst_slave) self.assertEqual(ret, 0, 'failed to start redis, server:%d' % dst_slave['id']) ret = testbase.wait_until_finished_to_set_up_role(dst_slave) self.assertEquals( ret, 0, 'failed to role change. server:%d' % (dst_slave['id'])) dst_redis_slave = redis_mgmt.Redis(dst_slave['id']) ret = dst_redis_slave.connect(dst_slave['ip'], dst_slave['redis_port']) self.assertEquals(ret, 0, 'failed to connect to redis') self.assertTrue( self.isExist(dst_redis_slave, 'beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~beforeCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~beforeCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~afterCheckpoint:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~afterCheckpoint:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'duringCatchup~duringCatchup:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'duringCatchup~duringCatchup:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~duringCatchup:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~duringCatchup:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~afterMig2pc:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~afterMig2pc:expired')) self.assertTrue( self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:expired')) self.assertTrue( self.isS3Exist(dst_redis_slave, 'S3:afterMig2pc~migrateEnd:persist')) self.assertFalse( self.isS3Exist(dst_redis_slave, 'S3:afterMig2pc~migrateEnd:expired')) self.getS3TTL(dst_redis_slave, 'S3:PermanentKey') # Go back to initial configuration self.assertTrue( util.migration(self.cluster, dst_pg_id, src_pg_id, 0, 8191, 40000), 'failed to rollback migration')
def test_5_transfer_pgs_to_another_machine(self): util.print_frame() self.load_gen_list = {} # get gateway info ip, port = util.get_rand_gateway(self.cluster) gw = gateway_mgmt.Gateway(self.cluster['servers'][0]['id']) ret = gw.connect(ip, port) self.assertEqual(ret, 0, 'failed to connect to gateway, %s:%d' % (ip, port)) # incrase master generation number util.log('failover in order to increase master generation number.') max = 0 for i in range(5): key_base = 'key' for i in range(max, max + 10000): cmd = 'set %s%d %d\r\n' % (key_base, i, i) gw.write(cmd) res = gw.read_until('\r\n') self.assertEquals(res, '+OK\r\n') max = max + 10000 m = util.get_server_by_role(self.cluster['servers'], 'master') util.log('failover pgs%d' % m['id']) ret = util.failover(m, self.leader_cm) self.assertTrue(ret, 'failed to failover pgs%d' % m['id']) # start load generator util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_list[i] = load_generator.LoadGenerator(i, ip, port) self.load_gen_list[i].start() time.sleep(5) # generate load for 5 sec util.log("started load_generator") m, s1, s2 = util.get_mss(self.cluster) servers = [m, s1, s2] # bgsave for s in servers: ret = util.bgsave(s) self.assertTrue(ret, 'failed to bgsave. pgs%d' % s['id']) new_servers = [config.server4, config.server5] # add new slaves for s in new_servers: util.log('delete pgs%d`s check point.' % s['id']) util.del_dumprdb(s['id']) ret = util.cluster_util_getdump(s['id'], m['ip'], m['redis_port'], 'dump.rdb', 0, 8191) self.assertEqual( True, ret, 'failed : util.cluster_util_getdump returns false, src=%s:%d dest_pgsid=%d' % (m['ip'], m['redis_port'], s['id'])) ret = util.install_pgs(self.cluster, s, self.leader_cm, 0, rm_ckpt=False) self.assertEqual( True, ret, 'failed : util.pgs_add returns false, pgsid=%d' % s['id']) util.log('succeeeded : add a new slave, pgsid=%d' % s['id']) # check consistency ok = True for j in range(self.max_load_generator): if self.load_gen_list[j].isConsistent() == False: ok = False break if not ok: break for server_to_del in servers: for s in servers: util.pingpong(s['ip'], s['smr_mgmt_port']) for s in new_servers: util.pingpong(s['ip'], s['smr_mgmt_port']) self.__del_server(server_to_del) util.log('succeeded : delete pgs%d' % server_to_del['id']) new_m = util.get_server_by_role(new_servers, 'master') new_s = util.get_server_by_role(new_servers, 'slave') self.assertNotEqual(new_m, None, 'master is None.') self.assertNotEqual(new_s, None, 'slave is None.') for s in new_servers: util.pingpong(s['ip'], s['smr_mgmt_port']) time.sleep(5) # generate load for 5 sec # check consistency of load_generator for i in range(self.max_load_generator): self.load_gen_list[i].quit() for i in range(self.max_load_generator): self.load_gen_list[i].join() self.assertTrue(self.load_gen_list[i].isConsistent(), 'Inconsistent after migration') self.load_gen_list.pop(i, None) # Go back to initial configuration # recover pgs for s in servers: self.assertTrue( util.install_pgs(self.cluster, s, self.leader_cm, rm_ckpt=False), 'failed to recover pgs. (install_pgs)') # cleanup new slaves for s in new_servers: self.assertTrue( util.uninstall_pgs(self.cluster, s, self.leader_cm), 'failed to cleanup pgs. (uninstall_pgs)')
def test_4_role_change_with_failover(self): util.print_frame() loop_cnt = 0 while loop_cnt < 5: util.log('') util.log('Loop:%d' % loop_cnt) util.log("States (before role change)") util.log_server_state(self.cluster) target = random.choice(self.cluster['servers']) # bgsave ret = util.bgsave(target) self.assertTrue(ret, 'failed to bgsave. pgs:%d' % target['id']) # shutdown util.log('shutdown pgs%d(%s:%d)' % (target['id'], target['ip'], target['smr_base_port'])) ret = testbase.request_to_shutdown_smr(target) self.assertEqual(ret, 0, 'failed to shutdown smr') ret = testbase.request_to_shutdown_redis(target) self.assertEquals(ret, 0, 'failed to shutdown redis') r = '' expected = 'N' for fc_cnt in xrange(20): r = util.get_smr_role_of_cm(target, self.leader_cm) if r == expected: break time.sleep(0.5) self.assertEquals(r, expected, 'failure detection error.') running_servers = [] for s in self.cluster['servers']: if s != target: running_servers.append(s) # Get old timestamp old_timestamps = {} for s in running_servers: ts = util.get_timestamp_of_pgs(s) old_timestamps[s['id']] = ts # Start load generator self.load_gen_list = {} util.log('start load generator') for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen = load_generator.LoadGenerator(i, ip, port) load_gen.start() self.load_gen_list[i] = load_gen m, s1, s2 = util.get_mss(self.cluster) self.assertNotEqual(m, None, 'master is None.') self.assertNotEqual(s1, None, 'slave1 is None.') # Role change master_id = util.role_change(self.leader_cm, self.cluster['cluster_name'], s1['id']) self.assertNotEqual(master_id, -1, 'role_change failed') util.log("States (after role change)") util.log_server_state(self.cluster) # Check - get new timestamp new_timestamps = {} for s in running_servers: ts = util.get_timestamp_of_pgs(s) new_timestamps[s['id']] = ts # Check - compare old timestamps and new timestamps for s in running_servers: old_ts = old_timestamps[s['id']] new_ts = new_timestamps[s['id']] self.assertNotEqual( old_ts, new_ts, 'Timestamp of a running server has not changed. %d->%d' % (old_ts, new_ts)) # Check quorum m = self.cluster['servers'][master_id] expected = 1 ok = self.__check_quorum(m, expected) self.assertTrue( ok, 'unexpected quorum(after role change). expected:%s' % (expected)) # recovery util.log('recovery pgs%d(%s:%d)' % (target['id'], target['ip'], target['smr_base_port'])) ret = testbase.request_to_start_smr(target) self.assertEqual(ret, 0, 'failed to start smr') util.log('start smr-replicator done') ret = testbase.request_to_start_redis(target, 60) self.assertEqual(ret, 0, 'failed to start redis') util.log('start redis-arc done') ret = testbase.wait_until_finished_to_set_up_role(target, max_try=300) self.assertEquals( ret, 0, 'failed to role change. smr_id:%d' % (target['id'])) util.log("States (after recovery)") util.log_server_state(self.cluster) # Check cluster state normal_state = False for i in xrange(20): normal_state = util.check_cluster(self.cluster['cluster_name'], self.leader_cm['ip'], self.leader_cm['cm_port'], check_quorum=True) if normal_state: break time.sleep(0.5) self.assertTrue(normal_state, "Unstable cluster state") # Check quorum expected = 2 ok = self.__check_quorum(m, expected) self.assertTrue( ok, 'unexpected quorum(after recovery). expected:%s' % (expected)) # Cheeck Consistency util.log('stop load generator') for i in range(self.max_load_generator): self.load_gen_list[i].quit() for i in range(self.max_load_generator): self.load_gen_list[i].join() self.assertTrue(self.load_gen_list[i].isConsistent(), 'Inconsistent after migration') self.load_gen_list.pop(i, None) loop_cnt += 1 return 0
def role_change_with_hanging_pgs(self, hanging_servers, running_servers, target_id, master): util.log('hanging_servers:%s' % hanging_servers) util.log('running_servers:%s' % running_servers) util.log('target_id:%s' % target_id) # Initial data util.put_some_data(self.cluster, 3, 10) util.log("States (before role change)") util.log_server_state(self.cluster) # Get old timestamp old_timestamps = {} for s in self.cluster['servers']: ts = util.get_timestamp_of_pgs(s) old_timestamps[s['id']] = ts # hang for s in hanging_servers: smr = smr_mgmt.SMR(s['id']) ret = smr.connect(s['ip'], s['smr_mgmt_port']) self.assertEqual( ret, 0, 'failed to connect to master. %s:%d' % (s['ip'], s['smr_mgmt_port'])) util.log("PGS '%d' hang" % s['id']) smr.write('fi delay sleep 1 13000\r\n') reply = smr.read_until('\r\n', 1) if reply != None and reply.find('-ERR not supported') != -1: self.assertEqual( 0, 1, 'make sure that smr has compiled with gcov option.') smr.disconnect() # Role change master_id = util.role_change(self.leader_cm, self.cluster['cluster_name'], target_id) self.assertEqual(master_id, -1, 'We expected that role_change failed, but success') # Check rollback - check quorum if master not in hanging_servers: expected = 2 ok = self.__check_quorum(master, expected) self.assertTrue(ok, 'rollback quorum fail. expected:%s' % (expected)) # Check rollback - get new timestamp new_timestamps_in_hang = {} for s in running_servers: ts = util.get_timestamp_of_pgs(s) new_timestamps_in_hang[s['id']] = ts # Check rollback - compare old timestamps and new timestamps for s in running_servers: old_ts = old_timestamps[s['id']] new_ts = new_timestamps_in_hang[s['id']] self.assertEqual( old_ts, new_ts, 'Timestamp of a running server has changed. %d->%d' % (old_ts, new_ts)) time.sleep(16) util.log("States (after role change)") util.log_server_state(self.cluster) self.load_gen_list = {} # Start load generator for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen = load_generator.LoadGenerator(i, ip, port) load_gen.start() self.load_gen_list[i] = load_gen # Check quorum if master in hanging_servers: m, s1, s2 = util.get_mss(self.cluster) self.assertNotEqual(m, None, 'master is None.') self.assertNotEqual(s1, None, 'slave1 is None.') self.assertNotEqual(s2, None, 'slave2 is None.') expected = 2 ok = self.__check_quorum(m, expected) self.assertTrue(ok, 'rollback quorum fail. expected:%s' % (expected)) # Check cluster state normal_state = False for i in xrange(20): normal_state = util.check_cluster(self.cluster['cluster_name'], self.leader_cm['ip'], self.leader_cm['cm_port'], check_quorum=True) if normal_state: break time.sleep(0.5) self.assertTrue(normal_state, "Unstable cluster state") # Cheeck Consistency for i in range(self.max_load_generator): self.load_gen_list[i].quit() for i in range(self.max_load_generator): self.load_gen_list[i].join() self.assertTrue(self.load_gen_list[i].isConsistent(), 'Inconsistent after migration') self.load_gen_list.pop(i, None)
def test_1_role_change(self): util.print_frame() self.load_gen_list = {} # Start load generator util.log("Start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen = load_generator.LoadGenerator(i, ip, port) load_gen.start() self.load_gen_list[i] = load_gen # Loop (smr: 3 copy) for i in range(30): target_server = util.get_server_by_role(self.cluster['servers'], 'slave') self.assertNotEquals(target_server, None, 'Get slave fail.') target = target_server['id'] print '' util.log("(3 copy) Loop:%d, target pgs:%d" % (i, target)) # Get old timestamp util.log_server_state(self.cluster) old_timestamp_list = [] for s in self.cluster['servers']: ts = util.get_timestamp_of_pgs(s) old_timestamp_list.append(ts) # Role change master = util.role_change(self.leader_cm, self.cluster['cluster_name'], target) self.assertNotEqual(master, -1, 'role_change error.') while target == master: target = (target + 1) % 3 util.log('Change role success.') # Wait until role change finished for s in self.cluster['servers']: max_try_cnt = 20 ok = False for try_cnt in range(max_try_cnt): try: pong = util.pingpong(s['ip'], s['redis_port']) if pong != None and pong == '+PONG\r\n': ok = True break except: pass time.sleep(0.2) self.assertTrue(ok, 'redis state error.') # Get new timestamp util.log_server_state(self.cluster) new_timestamp_list = [] for s in self.cluster['servers']: ts = util.get_timestamp_of_pgs(s) new_timestamp_list.append(ts) # Compare old timestamps and new timestamps for i in range(3): self.assertNotEqual( old_timestamp_list[i], new_timestamp_list[i], 'Timestamp is not changed. %d->%d' % (old_timestamp_list[i], new_timestamp_list[i])) # Cheeck Consistency for load_gen_id, load_gen in self.load_gen_list.items(): self.assertTrue(load_gen.isConsistent(), 'Data inconsistency after role_change') # Loop (smr: 2 copy) self.__del_server(self.cluster['servers'][0]) servers = [self.cluster['servers'][1], self.cluster['servers'][2]] normal_state = False for i in xrange(20): normal_state = util.check_cluster(self.cluster['cluster_name'], self.leader_cm['ip'], self.leader_cm['cm_port'], check_quorum=True) if normal_state: break time.sleep(0.5) self.assertTrue(normal_state, "Unstable cluster state") for i in range(30): print '' util.log("(2 copy) Loop:%d, target pgs:%d" % (i, target)) s = util.get_server_by_role(servers, 'slave') target = s['id'] # Get old timestamp util.log_server_state(self.cluster) old_timestamp_list = [] for s in servers: ts = util.get_timestamp_of_pgs(s) old_timestamp_list.append(ts) # Role change master = util.role_change(self.leader_cm, self.cluster['cluster_name'], target) self.assertNotEqual(master, -1, 'role_change error.') while target == master: target = (target) % 2 + 1 util.log('Change role success.') # Wait until role change finished for s in servers: max_try_cnt = 20 ok = False for try_cnt in range(max_try_cnt): pong = util.pingpong(s['ip'], s['redis_port']) if pong != None and pong == '+PONG\r\n': ok = True break time.sleep(0.1) self.assertTrue(ok, 'redis state error.') # Get new timestamp util.log_server_state(self.cluster) new_timestamp_list = [] for s in servers: ts = util.get_timestamp_of_pgs(s) new_timestamp_list.append(ts) # Compare old timestamps and new timestamps for i in range(2): self.assertNotEqual( old_timestamp_list[i], new_timestamp_list[i], 'Timestamp is not changed. %d->%d' % (old_timestamp_list[i], new_timestamp_list[i])) # Cheeck Consistency for load_gen_id, load_gen in self.load_gen_list.items(): self.assertTrue(load_gen.isConsistent(), 'Data inconsistency after role_change') # Go back to initial configuration self.assertTrue( util.install_pgs(self.cluster, self.cluster['servers'][0], self.leader_cm, rm_ckpt=False), 'failed to recover pgs.')
def test_scaleout(self): util.print_frame() # start load generator util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start() time.sleep(5) # generate load for 5 sec util.log("started load_generator") # servers for scale out servers = [config.server4, config.server5, config.server6] leader_cm = self.cluster['servers'][0] # start migration migration_count = 5 for i in range(migration_count): # Scale out cluster = config.clusters[0] ret = util.pg_add(cluster, servers, leader_cm) self.assertEqual(True, ret, 'Scale out fail. util.pg_add returns false') time.sleep(5) # pg0 -> pg1 cluster = config.clusters[1] ret = util.migration(cluster, 0, 1, 4096, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail 0 -> 1') # pg0 <- pg1 cluster = config.clusters[1] ret = util.migration(cluster, 1, 0, 4096, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail 1 <- 0') # Scale in #TODO Temporary #cluster = config.clusters[0] #for server in cluster['servers']: # if testbase.request_to_shutdown_hbc(server) is not 0: # util.log('scale in : failed to request to shutdown hbc') # self.assertFalse('scale in : failed to request to shutdown hbc') #time.sleep(5) ############### cluster = config.clusters[1] ret = util.pg_del(cluster, servers, leader_cm) self.assertEqual(True, ret, 'Scale in fail. util.pg_del returns false') #TODO Temporary #cluster = config.clusters[0] #for server in cluster['servers']: # if testbase.request_to_start_heartbeat_checker( server ) is not 0: # util.log('scale in : failed to start hbc') # self.assertFalse('scale in : failed to start hbc') #time.sleep(5) ############### # check consistency ok = True for j in range(len(self.load_gen_thrd_list)): if self.load_gen_thrd_list[j].isConsistent() == False: ok = False break if not ok: break time.sleep(5) # generate load for 5 sec # check consistency of load_generator for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].quit() for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].join() self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration')
def test_delete_smrlog_after_scaleout(self): util.print_frame() # start load generator util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start() time.sleep(5) # generate load for 5 sec util.log("started load_generator") # servers for scale out servers = [config.server4, config.server5, config.server6] leader_cm = self.cluster['servers'][0] # Scale out cluster = config.clusters[0] ret = util.pg_add(cluster, servers, leader_cm) self.assertEqual(True, ret, 'Scale out fail. util.pg_add returns false') time.sleep(5) # pg0 -> pg1 cluster = config.clusters[1] ret = util.migration(cluster, 0, 1, 8000, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail 0 -> 1') # get log file old_logs = {} for s in config.clusters[0]['servers']: parent_dir, log_dir = util.smr_log_dir(s['id']) path = '%s/%s' % (parent_dir, log_dir) old_logs[s['id']] = util.ls(path) # bgsave in order to make smrlogs deleted. for s in config.clusters[0]['servers']: bgsave_ret = util.bgsave(s) self.assertTrue(bgsave_ret, 'failed to bgsave. pgs%d' % s['id']) util.log('bgsave pgs%d is done.') # check consistency ok = True for j in range(len(self.load_gen_thrd_list)): self.assertTrue(self.load_gen_thrd_list[j].isConsistent(), 'Inconsistent after migration') # is smr-replicator delete smrlogs? i = 0 while i < 20: i += 1 # get current log files cur_logs = {} for s in config.clusters[0]['servers']: parent_dir, log_dir = util.smr_log_dir(s['id']) path = '%s/%s' % (parent_dir, log_dir) cur_logs[s['id']] = util.ls(path) # compare old and new temp_old_logs = copy.deepcopy(old_logs) for id, nl in cur_logs.items(): ol = temp_old_logs.get(id) self.assertNotEqual( ol, None, "failed to check logfiles. old logs for smr-replicator '%d' is not exist." % id) for log in nl: if log in ol: ol.remove(log) ok = True for id, ol in temp_old_logs.items(): if len(ol) == 0: ok = False util.log( 'Loop %d ---------------------------------------------------------' % i) util.log('deleted smrlog files: %s' % util.json_to_str(temp_old_logs)) if ok: break time.sleep(10) self.assertTrue(ok, 'smr-replicator does not delete smrlogs.') util.log('smr-replicator deletes smrlogs.') # check consistency of load_generator for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].quit() for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].join() self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration')
def test_random_migrate(self): util.print_frame() # start load generator load_gen_thrd_list = {} util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port) load_gen_thrd_list[i].start() ret = util.migration(self.cluster, 0, 1, 4096, 8191, 40000) self.assertEqual(True, ret, 'Migration Fail') leader_cm = self.cluster['servers'][0] cluster_name = self.cluster['cluster_name'] mapping = [-1] * 8192 count = 50 while count > 0: # get PN -> PG map cmd = 'cluster_info %s' % cluster_name result = util.cm_command(leader_cm['ip'], leader_cm['cm_port'], cmd) ret = json.loads(result) rle = ret['data']['cluster_info']['PN_PG_Map'] print "PN_PG_MAP = %s" % rle sp = rle.split() index = 0 for i in range(len(sp) / 2): for j in range(int(sp[i * 2 + 1])): mapping[index] = int(sp[i * 2]) index += 1 slot = random.randint(0, 8191) src_pgid = mapping[slot] dst_pgid = (src_pgid + 1) % 2 slot_end = slot while random.randint(0, 5) <= 4: if slot_end < 8191 and mapping[slot_end + 1] == src_pgid: slot_end += 1 else: break print "SLOT=%d, SRC_PGID=%d, DST_PGID=%d" % (slot, src_pgid, dst_pgid) ret = util.migration(self.cluster, src_pgid, dst_pgid, slot, slot_end, 40000) self.assertEqual(True, ret, 'Migration Fail') ok = True for j in range(len(load_gen_thrd_list)): if load_gen_thrd_list[j].isConsistent() == False: ok = False break if not ok: break count -= 1 # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration') # Go back to initial configuration cinfo = util.cluster_info(leader_cm['ip'], leader_cm['cm_port'], cluster_name) for slot in util.get_slots(cinfo['cluster_info']['PN_PG_Map'], 1): self.assertTrue( util.migration(self.cluster, 1, 0, slot['begin'], slot['end'], 40000), 'failed to rollback migration')
def __test_local_proxy(self, arch=64): util.print_frame() # Clean server log file p = util.exec_proc_async(util.capi_dir(0), 'rm capi_server-*', True, None, subprocess.PIPE, None) p.wait() # run test server _capi_server_conf = """ zookeeper 127.0.0.1:2181 cluster_name %s port 6200 daemonize no num_conn_per_gw 2 init_timeout_millis 10000 log_level INFO log_file_prefix "capi_server" max_fd 4096 conn_reconnect_millis 1000 zk_reconnect_millis 1000 zk_session_timeout_millis 10000 """ % self.cluster['cluster_name'] old_cwd = os.path.abspath(os.getcwd()) os.chdir(util.capi_dir(0)) f = open('capi_server.conf', 'w') f.write(_capi_server_conf) f.close() os.chdir(old_cwd) if arch is 32: cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER else: cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER capi_server = util.exec_proc_async(util.capi_dir(0), cmd, True, None, subprocess.PIPE, None) # ping check while True: try: t = telnetlib.Telnet('127.0.0.1', 6200) break except: time.sleep(1) continue t.write("ping\r\n") t.read_until('+PONG\r\n') t.close() # Start load generator load_gen_thrd_list = {} for i in range(self.max_load_generator): load_gen_thrd_list[i] = load_generator.LoadGenerator( i, 'localhost', 6200) load_gen_thrd_list[i].start() time.sleep(5) # Check reconfiguration by SIGHUP p = util.exec_proc_async( util.capi_dir(0), 'grep "Connected to the zookeeper" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Connected to the zookeeper" result : ' + wc self.assertEquals(wc.strip(), '1') capi_server.send_signal(signal.SIGHUP) time.sleep(5) p = util.exec_proc_async( util.capi_dir(0), 'grep "Connected to the zookeeper" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Connected to the zookeeper" result : ' + wc self.assertEquals(wc.strip(), '2') p = util.exec_proc_async( util.capi_dir(0), 'grep "Graceful shutdown caused by API" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Graceful shutdown caused by API" result : ' + wc self.assertEquals(wc.strip(), '1') # Check consistency after sending many SIGHUP signal for i in range(50): capi_server.send_signal(signal.SIGHUP) time.sleep(0.1) # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after sending signal') # Terminate test server capi_server.send_signal(signal.SIGTERM) capi_server.wait()
def test_quorum_with_left_pgs( self ): util.print_frame() # start load generators load_gen_list = {} for i in range( len(self.cluster['servers']) ): server = self.cluster['servers'][i] load_gen = load_generator.LoadGenerator(server['id'], server['ip'], server['gateway_port']) load_gen.start() load_gen_list[i] = load_gen # get master, slave1, slave2 m, s1, s2 = util.get_mss( self.cluster ) self.assertNotEqual( m, None, 'master is None.' ) self.assertNotEqual( s1, None, 'slave1 is None.' ) self.assertNotEqual( s2, None, 'slave2 is None.' ) # detach pgs from cluster cmd = 'pgs_leave %s %d\r\n' % (m['cluster_name'], m['id']) ret = util.cm_command( self.leader_cm['ip'], self.leader_cm['cm_port'], cmd ) jobj = json.loads(ret) self.assertEqual( jobj['msg'], '+OK', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2]) ) util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2]) ) # check if pgs is removed success = False for try_cnt in range( 10 ): redis = redis_mgmt.Redis( m['id'] ) ret = redis.connect( m['ip'], m['redis_port'] ) self.assertEquals( ret, 0, 'failed : connect to smr%d(%s:%d)' % (m['id'], m['ip'], m['redis_port']) ) util.log( 'succeeded : connect to smr%d(%s:%d)' % (m['id'], m['ip'], m['redis_port']) ) redis.write( 'info stats\r\n' ) for i in range( 6 ): redis.read_until( '\r\n' ) res = redis.read_until( '\r\n' ) self.assertNotEqual( res, '', 'failed : get reply of "info stats" from redis%d(%s:%d)' % (m['id'], m['ip'], m['redis_port']) ) util.log( 'succeeded : get reply of "info stats" from redis%d(%s:%d), reply="%s"' % (m['id'], m['ip'], m['redis_port'], res[:-2]) ) no = int( res.split(':')[1] ) if no <= 100: success = True break time.sleep( 1 ) self.assertEquals( success, True, 'failed : pgs does not removed.' ) util.log( 'succeeded : pgs is removed' ) # check states of all pgs in pg for s in self.cluster['servers']: real_role = util.get_role_of_server( s ) real_role = util.roleNumberToChar( real_role ) smr_info = util.get_smr_info( s, self.leader_cm ) cc_role = smr_info['smr_Role'] cc_hb = smr_info['hb'] if cc_hb == 'N': continue self.assertEqual( real_role, cc_role, 'failed : each role is difference, real=%s, cc=%s' % (real_role, cc_role) ) util.log( 'succeeded : a role of real pgs is the same with a role in cc, real=%s, cc=%s' % (real_role, cc_role) ) # check quorum policy quorum_of_haning_master = util.get_quorum( m ) self.assertEqual( self.quorum_policy[1], quorum_of_haning_master, 'invalid quorum of left master, expected:%d, but:%d' %( self.quorum_policy[1], quorum_of_haning_master) ) util.log( 'succeeded : quorum of left master=%d' % quorum_of_haning_master ) # 'role lconn' to master cmd = 'role lconn\r\n' ret = util.cmd_to_smr( m, cmd ) self.assertEqual( ret, '+OK\r\n', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2]) ) util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2]) ) # wait for master election success = False new_master = None for i in range( 10 ): role = util.get_role_of_server( s1 ) if role == c.ROLE_MASTER: success = True new_master = s1 break role = util.get_role_of_server( s2 ) if role == c.ROLE_MASTER: success = True new_master = s2 break time.sleep( 1 ) self.assertEqual( success, True, 'failed to elect new master' ) util.log( 'succeeded : elect new master, master_id=%d' % new_master['id'] ) time.sleep( 1 ) # check the numbers of master, slave, and lconn cnt_master = 0 cnt_slave = 0 cnt_lconn = 0 for s in self.cluster['servers']: role = util.get_role_of_server( s ) if role == c.ROLE_MASTER: cnt_master = cnt_master + 1 elif role == c.ROLE_SLAVE: cnt_slave = cnt_slave + 1 elif role == c.ROLE_LCONN: cnt_lconn = cnt_lconn + 1 self.assertEqual( cnt_master, 1, 'failed : the number of master is %s, expected 1' % cnt_master ) self.assertEqual( cnt_slave, 1, 'failed : the number of slave is %s, expected 1' % cnt_slave ) self.assertEqual( cnt_lconn, 1, 'failed : the number of lconn is %s, expected 1' % cnt_lconn ) # check states of all pgs in pg for s in self.cluster['servers']: real_role = util.get_role_of_server( s ) real_role = util.roleNumberToChar( real_role ) smr_info = util.get_smr_info( s, self.leader_cm ) cc_role = smr_info['smr_Role'] cc_hb = smr_info['hb'] if cc_hb == 'N': continue self.assertEqual( real_role, cc_role, 'failed : each role is difference, real=%s, cc=%s' % (real_role, cc_role) ) util.log( 'succeeded : a role of real pgs is the same with a role in cc, real=%s, cc=%s' % (real_role, cc_role) ) # check quorum policy quorum_of_new_master = util.get_quorum( new_master ) self.assertNotEqual( None, quorum_of_new_master, 'failed : find new master' ) self.assertEqual( self.quorum_policy[1], quorum_of_new_master , 'invalid quorum of new master, expected:%d, but:%d' % (self.quorum_policy[1], quorum_of_new_master) ) util.log( 'succeeded : quorum of new master=%d' % quorum_of_new_master ) # shutdown load generators for i in range( len(load_gen_list) ): load_gen_list[i].quit() load_gen_list[i].join() return 0
def test_local_proxy(self, arch=64): util.print_frame() # Clean server log file p = util.exec_proc_async(util.capi_dir(0), 'rm capi_server-*', True, None, subprocess.PIPE, None) p.wait() # run test server capi_server = self.run_test_server(arch) # ping check while True: try: t = telnetlib.Telnet('127.0.0.1', 6200) break except: time.sleep(1) continue t.write("ping\r\n") t.read_until('+PONG\r\n') t.close() # Start load generator load_gen_thrd_list = {} for i in range(self.max_load_generator): load_gen_thrd_list[i] = load_generator.LoadGenerator( i, 'localhost', 6200) load_gen_thrd_list[i].start() time.sleep(5) # Check reconfiguration by SIGHUP p = util.exec_proc_async( util.capi_dir(0), 'grep "Connected to the zookeeper" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Connected to the zookeeper" result : ' + wc self.assertEquals(wc.strip(), '1') capi_server.send_signal(signal.SIGHUP) time.sleep(5) p = util.exec_proc_async( util.capi_dir(0), 'grep "Connected to the zookeeper" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Connected to the zookeeper" result : ' + wc self.assertEquals(wc.strip(), '2') p = util.exec_proc_async( util.capi_dir(0), 'grep "Graceful shutdown caused by API" capi_server-* | wc -l', True, None, subprocess.PIPE, None) p.wait() wc = p.stdout.readline() print 'grep "Graceful shutdown caused by API" result : ' + wc self.assertEquals(wc.strip(), '1') # Check consistency after sending many SIGHUP signal for i in range(50): capi_server.send_signal(signal.SIGHUP) time.sleep(0.1) # check consistency of load_generator for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].quit() for i in range(len(load_gen_thrd_list)): load_gen_thrd_list[i].join() self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after sending signal') # Terminate test server self.stop_test_server(capi_server)
def test_rdb_backups(self): util.print_frame() bgsave_count = 50 org_path = os.getcwd() os.chdir(util.redis_dir(0)) server0 = self.cluster['servers'][0] redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port']) util.log("Starting load generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start() util.log("Set the number of rdb backups = 24") redis0.write("config set number-of-rdb-backups 24\r\n") redis0.read_until("+OK\r\n") util.log("Clear old rdb backups\r\n") for f in os.listdir('.'): if (f.endswith('.rdb')): os.remove(f) util.log( "Bgsaving continuously and counting the number of rdb backups") for i in range(bgsave_count): # Save current time before Bgsaving redis0.write('time\r\n') redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) ret = redis0.read_until('\r\n', 1) redis_server_time = int(ret.strip()) redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) time.sleep(1.1) redis0.write('time\r\n') redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) ret = redis0.read_until('\r\n', 1) self.assertNotEqual(redis_server_time, int(ret.strip())) redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) util.log("%d ~ %d" % (redis_server_time, int(ret.strip()))) # Bgsave redis0.write("bgsave\r\n") ret = redis0.read_until('\r\n', 1) self.assertEqual('+Background saving started\r\n', ret) # Wait finishing bgsave while True: redis0.write('lastsave\r\n') ret = redis0.read_until('\r\n', 1) lastsave_time = int(ret[1:].strip()) if lastsave_time > redis_server_time: break time.sleep(0.1) # Count the number of rdb backups rdb_list = [ name for name in os.listdir('.') if os.path.isfile(name) and name.startswith('dump') and name.endswith('.rdb') ] util.log(rdb_list) util.log("Iteration:%d, rdb Backups:%d" % (i + 1, len(rdb_list))) self.assertTrue(i + 1 > 24 and len(rdb_list) == 25 or len(rdb_list) == i + 1) self.assertTrue('dump.rdb' in rdb_list) util.log("\nSet the number of rdb backups = 5") redis0.write("config set number-of-rdb-backups 5\r\n") redis0.read_until("+OK\r\n") for i in range(3): # Save current time before Bgsaving redis0.write('time\r\n') redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) ret = redis0.read_until('\r\n', 1) redis_server_time = int(ret.strip()) redis0.read_until('\r\n', 1) redis0.read_until('\r\n', 1) time.sleep(1.1) # Bgsave redis0.write("bgsave\r\n") ret = redis0.read_until('\r\n', 1) self.assertEqual('+Background saving started\r\n', ret) # Wait finishing bgsave while True: redis0.write('lastsave\r\n') ret = redis0.read_until('\r\n', 1) lastsave_time = int(ret[1:].strip()) if lastsave_time > redis_server_time: break time.sleep(0.1) # Count the number of rdb backups rdb_list = [ name for name in os.listdir('.') if os.path.isfile(name) and name.startswith('dump') and name.endswith('.rdb') ] util.log(rdb_list) util.log("Iteration:%d, rdb Backups:%d" % (i + 1, len(rdb_list))) self.assertTrue(len(rdb_list) == 6) self.assertTrue('dump.rdb' in rdb_list) # check consistency of load_generator for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].quit() for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].join() self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after gateway_mgmt test') os.chdir(org_path)
def start_load_generator(self, num): for i in range(num): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start()
def test_moving_pgs(self): util.print_frame() # start load generator util.log("start load_generator") for i in range(self.max_load_generator): ip, port = util.get_rand_gateway(self.cluster) self.load_gen_thrd_list[i] = load_generator.LoadGenerator( i, ip, port) self.load_gen_thrd_list[i].start() util.log("started load_generator") servers = self.cluster['servers'] gw_list = [] for server in servers: gw = {} gw['mgmt'] = telnetlib.Telnet(server['ip'], server['gateway_port'] + 1) gw['normal'] = telnetlib.Telnet(server['ip'], server['gateway_port']) gw_list.append(gw) n = 0 step = 0 iter = 30 while iter > 0: if n == 0 or random.randint(0, 1) == 0: step = random.randint(1, 10) else: step = -1 * random.randint(1, n) print "<<< ITER = %d, PG%d -> PG%d, PG%d -> PG%d >>>" % ( iter, n * 2, (n + step) * 2, n * 2 + 1, (n + step) * 2 + 1) gw = gw_list[0] self.pgs_del_server(gw['mgmt'], servers[0], n) self.pgs_del_server(gw['mgmt'], servers[1], n) self.pgs_del_server(gw['mgmt'], servers[5], n) gw['mgmt'].write("pg_add %d\r\n" % ((n + step) * 2)) gw['mgmt'].read_until("+OK\r\n") gw['mgmt'].write("pg_add %d\r\n" % ((n + step) * 2 + 1)) gw['mgmt'].read_until("+OK\r\n") self.pgs_add_server(gw['mgmt'], servers[0], n + step) self.pgs_add_server(gw['mgmt'], servers[1], n + step) self.pgs_add_server(gw['mgmt'], servers[5], n + step) while True: gw['normal'].write("info gateway\r\n") ret = gw['normal'].read_until("\r\n", 1) if "-ERR" in ret: continue ret = gw['normal'].read_until("\r\n\r\n", 1) #print ret if "gateway_disconnected_redis:0\r\n" in ret: break gw['mgmt'].write("delay 0 4095\r\n") gw['mgmt'].read_until("+OK\r\n") gw['mgmt'].write("delay 4096 8191\r\n") gw['mgmt'].read_until("+OK\r\n") gw['mgmt'].write("redirect 0 4095 %d\r\n" % ((n + step) * 2)) gw['mgmt'].read_until("+OK\r\n") gw['mgmt'].write("redirect 4096 8191 %d\r\n" % ((n + step) * 2 + 1)) gw['mgmt'].read_until("+OK\r\n") gw_list[0]['mgmt'].write("cluster_info\r\nping\r\n") print gw_list[0]['mgmt'].read_until("+PONG\r\n") self.pgs_del_server(gw['mgmt'], servers[2], n) self.pgs_del_server(gw['mgmt'], servers[3], n) self.pgs_del_server(gw['mgmt'], servers[4], n) self.pgs_add_server(gw['mgmt'], servers[2], n + step) self.pgs_add_server(gw['mgmt'], servers[3], n + step) self.pgs_add_server(gw['mgmt'], servers[4], n + step) while True: gw['normal'].write("info gateway\r\n") ret = gw['normal'].read_until("\r\n", 1) if "-ERR" in ret: continue ret = gw['normal'].read_until("\r\n\r\n", 1) #print ret if "gateway_disconnected_redis:0\r\n" in ret: break gw['mgmt'].write("pg_del %d\r\n" % (n * 2)) gw['mgmt'].read_until("+OK\r\n") gw['mgmt'].write("pg_del %d\r\n" % (n * 2 + 1)) gw['mgmt'].read_until("+OK\r\n") n += step gw_list[0]['mgmt'].write("cluster_info\r\nping\r\n") print gw_list[0]['mgmt'].read_until("+PONG\r\n") iter -= 1 # check consistency of load_generator for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].quit() for i in range(len(self.load_gen_thrd_list)): self.load_gen_thrd_list[i].join() self.assertTrue(self.load_gen_thrd_list[i].isConsistent(), 'Inconsistent after gateway_mgmt test')
def pgs_add_and_del(self, upgrade_server, type): util.print_frame() util.log('[start] add and del pgs%d. type:%s' % (upgrade_server['id'], type)) util.log_server_state(self.cluster) # start load generator load_gen_list = {} for i in range(len(self.cluster['servers'])): server = self.cluster['servers'][i] load_gen = load_generator.LoadGenerator(server['id'], server['ip'], server['gateway_port']) load_gen.start() load_gen_list[i] = load_gen # detach pgs from cluster cmd = 'pgs_leave %s %d\r\n' % (upgrade_server['cluster_name'], upgrade_server['id']) ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd) jobj = json.loads(ret) self.assertEqual( jobj['msg'], '+OK', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2])) util.log('succeeded : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2])) # set new values ip, port = util.get_rand_gateway(self.cluster) gw = gateway_mgmt.Gateway('0') gw.connect(ip, port) for i in range(0, 50): cmd = 'set %s%d %d\r\n' % (self.key_base, i, i) gw.write(cmd) res = gw.read_until('\r\n') self.assertEqual( res, '+OK\r\n', 'failed to set values to gw(%s:%d). cmd:%s, res:%s' % (ip, port, cmd[:-2], res[:-2])) # attach pgs from cluster cmd = 'pgs_join %s %d\r\n' % (upgrade_server['cluster_name'], upgrade_server['id']) ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd) jobj = json.loads(ret) self.assertEqual(jobj['msg'], '+OK', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], ret)) util.log('succeeded : cmd="%s", reply="%s"' % (cmd[:-2], ret[:-2])) time.sleep(3) # check new values redis = redis_mgmt.Redis(upgrade_server['id']) ret = redis.connect(upgrade_server['ip'], upgrade_server['redis_port']) self.assertEquals( ret, 0, 'failed : connect to smr%d(%s:%d)' % (upgrade_server['id'], upgrade_server['ip'], upgrade_server['redis_port'])) for i in range(0, 50): cmd = 'get %s%d\r\n' % (self.key_base, i) redis.write(cmd) redis.read_until('\r\n') res = redis.read_until('\r\n') self.assertEqual( res, '%d\r\n' % i, 'failed to get values from redis%d. %s != %d' % (upgrade_server['id'], res, i)) util.log('succeeded : check values with get operations on pgs%d.' % (upgrade_server['id'])) # shutdown load generators for i in range(len(load_gen_list)): load_gen_list[i].quit() load_gen_list[i].join() util.log_server_state(self.cluster) return 0