Example #1
0
    def test_catdatadel(self):
        util.print_frame()
        test_limit_mb = 10

        # Make data
        server = self.cluster['servers'][0]
        redis = telnetlib.Telnet(server['ip'], server['redis_port'])
        util.log("Insert large key about 100MB")
        self.insertLargeKey(redis, "test_key")

        # Get dump
        util.log("Start getdump, start ts:%d" % time.time())
        cmd = "./cluster-util --getdump %s %d getdump.rdb 0-8191 %d" % (server['ip'], server['redis_port'], test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = proc.wait()
        self.assertEqual(0, ret)

        # Test --catdata sub command
        util.log("Start --catdata, start ts:%d" % time.time())
        cmd = "./cluster-util --catdata getdump.rdb > catdata.resp"
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = proc.wait()
        self.assertEqual(0, ret)

        # test --deldata sub command
        util.log("Start --deldata, start ts:%d" % time.time())
        cmd = "./cluster-util --deldata getdump.rdb > deldata.resp"
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = proc.wait()
        self.assertEqual(0, ret)
Example #2
0
    def timedump_and_make_json_output(self, target_time):
        cmd = "./dump-util --dump %d ../smr0/log0 . out.rdb" % target_time
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0, p.stdout.readlines())

        cmd = "./dump-util --dump-iterator out.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0, p.stdout.readlines())
Example #3
0
    def timedump_and_make_json_output(self, target_time):
        cmd = "./dump-util --dump %d ../smr0/log0 . out.rdb" % target_time
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0, p.stdout.readlines())

        cmd = "./dump-util --dump-iterator out.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0, p.stdout.readlines())
Example #4
0
    def test_getdump_and_playdump(self):
        util.print_frame()
        self.setup_proxies()

        test_limit_mb = 10

        server = self.cluster['servers'][0]
        redis = telnetlib.Telnet(server['ip'], server['redis_port'])

        util.log("Insert large key about 100MB")
        self.insertLargeKey(redis, "test_key")

        # Test getdump
        start_time = time.time()
        util.log("Start getdump, start ts:%d" % start_time)

        cmd = "./cluster-util --getdump %s %d getdump.rdb 0-8191 %d" % (server['ip'], self.getdump_proxy_port, test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        monitor_file = "%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log)
        self.monitor_filesize_diff(proc, test_limit_mb, monitor_file)

        ret = proc.wait()
        self.assertEqual(0, ret)

        elapse_time = time.time() - start_time
        util.log("End getdump, elapsed:%d" % elapse_time)

        file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log))
        util.log("File Size:%d, elapsed:%d, limit:%dMB/s, actual:%dMB/s" % (file_size,
                 elapse_time, test_limit_mb, file_size / elapse_time / 1024 / 1024))
        self.assertTrue(file_size / (10 * 1024 * 1024) < elapse_time)

        # Test playdump
        start_time = time.time()
        util.log("Start playdump, start ts:%d" % start_time)

        cmd = "./cluster-util --playdump getdump.rdb %s %d 30000 %d" % (server['ip'], self.playdump_proxy_port, test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        monitor_file = "%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log)
        self.monitor_filesize_diff(proc, test_limit_mb, monitor_file)

        ret = proc.wait()
        self.assertEqual(0, ret)

        elapse_time = time.time() - start_time
        util.log("End playdump, elapsed:%d" % elapse_time)

        file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log))
        util.log("File Size:%d, elapsed:%d, limit:%dMB/s, actual:%dMB/s" % (file_size,
                 elapse_time, test_limit_mb, file_size / elapse_time / 1024 / 1024))
        self.assertTrue(file_size / (10 * 1024 * 1024) < elapse_time)
    def test_getdump_and_playdump(self):
        util.print_frame()

        test_limit_mb = 10

        server = self.cluster['servers'][0]
        redis = telnetlib.Telnet(server['ip'], server['redis_port'])

        util.log("Insert large key about 100MB")
        self.insertLargeKey(redis, "test_key")

        # Test getdump
        start_time = time.time()
        util.log("Start getdump, start ts:%d" % start_time)

        cmd = "./cluster-util --getdump %s %d getdump.rdb 0-8191 %d" % (server['ip'], self.getdump_proxy_port, test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        monitor_file = "%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log)
        self.monitor_filesize_diff(proc, test_limit_mb, monitor_file)

        ret = proc.wait()
        self.assertEqual(0, ret)

        elapse_time = time.time() - start_time
        util.log("End getdump, elapsed:%d" % elapse_time)

        file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log))
        util.log("File Size:%d, elapsed:%d, limit:%dMB/s, actual:%dMB/s" % (file_size,
                 elapse_time, test_limit_mb, file_size / elapse_time / 1024 / 1024))
        self.assertTrue(file_size / (10 * 1024 * 1024) < elapse_time)

        # Test playdump
        start_time = time.time()
        util.log("Start playdump, start ts:%d" % start_time)

        cmd = "./cluster-util --playdump getdump.rdb %s %d 30000 %d" % (server['ip'], self.playdump_proxy_port, test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        monitor_file = "%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log)
        self.monitor_filesize_diff(proc, test_limit_mb, monitor_file)

        ret = proc.wait()
        self.assertEqual(0, ret)

        elapse_time = time.time() - start_time
        util.log("End playdump, elapsed:%d" % elapse_time)

        file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log))
        util.log("File Size:%d, elapsed:%d, limit:%dMB/s, actual:%dMB/s" % (file_size,
                 elapse_time, test_limit_mb, file_size / elapse_time / 1024 / 1024))
        self.assertTrue(file_size / (10 * 1024 * 1024) < elapse_time)
Example #6
0
    def setup_proxy_server(self, in_port, out_port, in_log, out_log, fifo_name):
        try:
            os.remove("%s/%s" % (util.cluster_util_dir(0), fifo_name))
        except Exception as e:
            pass
        fifo_cmd = "mkfifo %s" % fifo_name
        fifo_proc = util.exec_proc_async(util.cluster_util_dir(0), fifo_cmd, True, None, subprocess.PIPE, None)
        ret = fifo_proc.wait()
        self.assertEqual(0, ret)

        proxy_cmd = "nc -l %d < %s | tee %s | nc 127.0.0.1 %d | tee %s > %s" % (
            in_port, fifo_name, in_log, out_port, out_log, fifo_name)
        return  util.exec_proc_async(util.cluster_util_dir(0), proxy_cmd, True, None, subprocess.PIPE, None)
Example #7
0
    def test_error_log(self):
        util.print_frame()

        cluster = self.cluster

        # Start test-fiall
        p = util.exec_proc_async('%s/.obj%d' % (constant.ARCCI_DIR, self.arch),
                "./test-fiall -z localhost:2181 -c %s -s 10" % cluster['cluster_name'],
                subprocess.PIPE, subprocess.PIPE, subprocess.PIPE);

        # Set up arguments
        server = cluster['servers'][0]
        gw_id = server['id']
        gw_port = server['gateway_port']

        # Check load
        for i in range(20):
            ok = True
            for s in cluster['servers']:
                tps = util.get_tps(s['ip'], s['gateway_port'], 'gw')
                util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps))
            time.sleep(1)

        # Check no error
        util.log(' ### BEGIN - ARCCI LOGS ### ')
        (stdout, stderr) = p.communicate()
        for line in stdout.split("\n"):
            util.log(line)
        util.log(' ### END - ARCCI LOGS ### ')
Example #8
0
    def test_getandplay(self):
        util.print_frame()

        test_limit_mb = 10

        server = self.cluster['servers'][0]
        redis = telnetlib.Telnet(server['ip'], server['redis_port'])

        util.log("Insert large key about 100MB")
        self.insertLargeKey(redis, "test_key")

        # Test getandplay
        start_time = time.time()
        util.log("Start getandplay, start ts:%d" % start_time)

        cmd = "./cluster-util --getandplay %s %d %s %d 0-8191 30000 %d" % (server['ip'], self.getdump_proxy_port,
                                                                        server['ip'], self.playdump_proxy_port, test_limit_mb)
        proc = util.exec_proc_async(util.cluster_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        monitor_file1 = "%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log)
        monitor_file2 = "%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log)
        self.monitor_filesize_diff(proc, test_limit_mb, monitor_file1, monitor_file2)

        ret = proc.wait()
        self.assertEqual(0, ret)

        elapse_time = time.time() - start_time
        util.log("End getandplay, elapsed:%d" % elapse_time)

        dump_file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.getdump_proxy_log))
        play_file_size = os.path.getsize("%s/%s" % (util.cluster_util_dir(0), self.playdump_proxy_log))
        util.log("Dump File Size:%d, Play File Size:%d, elapsed:%d, limit:%dMB/s, actual:%dMB/s"
                 % (dump_file_size, play_file_size, elapse_time, test_limit_mb,
                    (dump_file_size + play_file_size) / elapse_time / 1024 / 1024))
        self.assertTrue((dump_file_size + play_file_size) / (10 * 1024 * 1024) < elapse_time)
Example #9
0
    def run_test_server(self, arch=64):
        # run test server
        _capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 1000
        """ % self.cluster['cluster_name']
        old_cwd = os.path.abspath(os.getcwd())
        os.chdir(util.capi_dir(0))
        f = open('capi_server.conf', 'w')
        f.write(_capi_server_conf)
        f.close()
        os.chdir(old_cwd)

        if arch is 32:
            cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
        else:
            cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER

        capi_server = util.exec_proc_async(util.capi_dir(0), cmd, True, None,
                                           subprocess.PIPE, None)

        return capi_server
Example #10
0
    def test_basic_op_smr(self):
        util.print_frame()
        f = open("%s/test_basicop_output_smr" % constant.logdir, 'w')
        p = util.exec_proc_async("../redis", "./runtest_smr --accurate", True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)
Example #11
0
    def test_basic_op_gateway(self):
        util.print_frame()
        ip, port = util.get_rand_gateway(self.cluster)
        f = open("%s/test_basicop_output_gw" % constant.logdir, 'w')
        p = util.exec_proc_async("../redis", "./runtest_gw --accurate --gw-port "+str(port), True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)
Example #12
0
    def test_basic_op_smr(self):
        util.print_frame()
        f = open("%s/test_basicop_output_smr" % constant.logdir, 'w')
        p = util.exec_proc_async("../redis-2.8.8",
                            "./runtest_smr --accurate",
                            True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)
Example #13
0
    def test_basic_op_gateway(self):
        util.print_frame()
        ip, port = util.get_rand_gateway(self.cluster)
        f = open("%s/test_basicop_output_gw" % constant.logdir, 'w')
        p = util.exec_proc_async("../redis-2.8.8",
                            "./runtest_gw --accurate --gw-port "+str(port),
                            True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)
Example #14
0
    def test_basic_op_capi(self):

        capi_server = self.run_capi_server()

        f = open("%s/test_basicop_output_capi%d" % (constant.logdir, self.arch), 'w')
        p = util.exec_proc_async("../redis", "./runtest_gw --accurate --gw-port 6200", True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)

        self.stop_process(capi_server)
Example #15
0
    def start(cls, mode=0):
        """
        mode: CLI_RESTART
        """
        with cls.__lock:
            if mode & CLI_RESTART:
                cls.stop()

            if cls.__cli_proc != None:
                return

            args = 'java -jar %s -z localhost:2181' % c.ZK_CLI
            cls.__cli_proc = util.exec_proc_async(c.ZK_CLI_DIR, args.split(), False, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE)
Example #16
0
    def __test_basic_op_capi(self, arch = 64):

        capi_server = self.run_capi_server(arch)

        f = open("%s/test_basicop_output_capi%d" % (constant.logdir, arch), 'w')
        p = util.exec_proc_async("../redis-2.8.8",
                            "./runtest_gw --accurate --gw-port 6200",
                            True, None, f, None)

        ret = p.wait()
        f.close()
        self.assertEquals(0, ret)

        self.stop_process(capi_server)
Example #17
0
    def test_dump_iterator_with_mig_conf_clearstart(self):
        util.print_frame()

        num_test = 100
        dict = {}
        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        for i in xrange(num_test):
            key = self.string_gen(random.randint(1, 64))
            val = self.string_gen(random.randint(1, 64))
            dict[key] = val

            redis0.write('*3\r\n$3\r\nset\r\n')
            redis0.write('$%d\r\n%s\r\n' % (len(key), key))
            redis0.write('$%d\r\n%s\r\n' % (len(val), val))
            ret = redis0.read_until('\r\n', 1)
            self.assertEqual(ret, '+OK\r\n')

        self.clearstart(redis0, 4096, 8191)

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None,
                                 subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0)

        count = 0
        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        for line in f.readlines():
            count += 1
            data = json.loads(line.strip())
            key = self.b32hexdecode(data['key'])
            val = self.b32hexdecode(data['value'])
            self.assertEqual(dict[key], val)
            if ((crc16.crc16_buff(key, 0) % 8192) >= 4096):
                print key
                print val
                print crc16.crc16_buff(key, 0) % 8192
                self.assertTrue(
                    False, "dump-util doesn't recognize keys on migration")

        print "Total Count of json output = %d" % count
        f.close()

        # Go back to initial configuration
        self.clearend(redis0)
Example #18
0
    def start(self):
        # make zk directory
        util.local("mkdir -p %s" % self.working_dir)

        # start zk
        self.p = util.exec_proc_async(
                self.working_dir, 
                "java -jar %s/jar/zookeeper-3.4.6-fatjar.jar server %d data" % (os.getcwd(), self.port), 
                True, 
                out_handle=util.devnull)

        # wait warmup
        return util.await(10)(
                lambda reply: reply == 'imok',
                lambda : self.command('ruok'))
Example #19
0
    def start(self):
        # make zk directory
        util.local("mkdir -p %s" % self.working_dir)

        # start zk
        self.p = util.exec_proc_async(
            self.working_dir,
            "java -jar %s/jar/zookeeper-3.4.6-fatjar.jar server %d data" %
            (os.getcwd(), self.port),
            True,
            out_handle=util.devnull)

        # wait warmup
        return util. await (10)(lambda reply: reply == 'imok',
                                lambda: self.command('ruok'))
Example #20
0
    def start(cls, mode=0):
        """
        mode: CLI_RESTART
        """
        with cls.__lock:
            if mode & CLI_RESTART:
                cls.stop()

            if cls.__cli_proc != None:
                return

            args = 'java -jar %s -z localhost:2181' % c.ZK_CLI
            cls.__cli_proc = util.exec_proc_async(c.ZK_CLI_DIR, args.split(),
                                                  False, subprocess.PIPE,
                                                  subprocess.PIPE,
                                                  subprocess.PIPE)
Example #21
0
    def test_dump_iterator_with_mig_conf_clearstart(self):
        util.print_frame()

        num_test = 100
        dict = {}
        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        for i in xrange(num_test):
            key = self.string_gen(random.randint(1, 64))
            val = self.string_gen(random.randint(1, 64))
            dict[key] = val

            redis0.write('*3\r\n$3\r\nset\r\n')
            redis0.write('$%d\r\n%s\r\n' % (len(key), key))
            redis0.write('$%d\r\n%s\r\n' % (len(val), val))
            ret = redis0.read_until('\r\n', 1)
            self.assertEqual(ret, '+OK\r\n')

        self.clearstart(redis0, 4096, 8191)

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()
        self.assertTrue(ret == 0)

        count = 0
        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        for line in f.readlines():
            count += 1
            data = json.loads(line.strip())
            key = self.b32hexdecode(data['key'])
            val = self.b32hexdecode(data['value'])
            self.assertEqual(dict[key], val)
            if ((crc16.crc16_buff(key, 0) % 8192) >= 4096):
                print key
                print val
                print crc16.crc16_buff(key, 0) % 8192
                self.assertTrue(False, "dump-util doesn't recognize keys on migration")

        print "Total Count of json output = %d" % count
        f.close()

        # Go back to initial configuration
        self.clearend(redis0)
Example #22
0
    def testbase32hex_conversion(self):
        util.print_frame()

        count = 100
        dict = {}

        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        while count > 0:
            count -= 1
            key = self.string_gen(random.randint(1, 1000))
            val = self.string_gen(random.randint(1, 1000))
            dict[key] = val
            redis0.write('*3\r\n$3\r\nset\r\n')
            redis0.write('$%d\r\n%s\r\n' % (len(key), key))
            redis0.write('$%d\r\n%s\r\n' % (len(val), val))
            ret = redis0.read_until('\r\n', 1)
            self.assertEqual(ret, '+OK\r\n')

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None,
                                 subprocess.PIPE, None)

        ret = p.wait()
        self.assertTrue(ret == 0)

        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        skip_line = 2
        for line in f.readlines():
            # skip first 2 lines (smr_seqnum, smr_mstime)
            if skip_line > 0:
                skip_line -= 1
                continue

            line = line.strip()
            key = self.b32hexdecode(json.loads(line)['key'])
            val = self.b32hexdecode(json.loads(line)['value'])

            self.assertTrue(key in dict.keys(),
                            'key(%s) is not in json output' % key)
            self.assertEqual(dict[key], val,
                             "val(%s) is not match with %s" % (dict[key], val))

        f.close()
Example #23
0
    def run_capi_server(self):
        # run capi test server
        _capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
        """ % self.cluster['cluster_name']
        old_cwd = os.path.abspath( os.getcwd() )
        os.chdir(util.capi_dir(0))
        f = open('capi_server.conf', 'w')
        f.write(_capi_server_conf)
        f.close()
        os.chdir(old_cwd)

        if self.arch is 32:
            cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
        else:
            cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER

        capi_server = util.exec_proc_async(util.capi_dir(0),
                            cmd, True, None, subprocess.PIPE, None)

        # ping check
        while True:
            try:
                t = telnetlib.Telnet('127.0.0.1', 6200)
                break
            except:
                time.sleep(1)
                continue

        t.write("ping\r\n")
        t.read_until('+PONG\r\n')
        t.close()

        return capi_server
Example #24
0
    def run_capi_server(self, arch=64):
        # run capi test server
        _capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
        """ % self.cluster['cluster_name']
        old_cwd = os.path.abspath( os.getcwd() )
        os.chdir(util.capi_dir(0))
        f = open('capi_server.conf', 'w')
        f.write(_capi_server_conf)
        f.close()
        os.chdir(old_cwd)

        if arch is 32:
            cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
        else:
            cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER

        capi_server = util.exec_proc_async(util.capi_dir(0),
                            cmd, True, None, subprocess.PIPE, None)

        # ping check
        while True:
            try:
                t = telnetlib.Telnet('127.0.0.1', 6200)
                break
            except:
                time.sleep(1)
                continue

        t.write("ping\r\n")
        t.read_until('+PONG\r\n')
        t.close()

        return capi_server
Example #25
0
    def testbase32hex_conversion(self):
        util.print_frame()

        count = 100
        dict = {}

        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        while count > 0:
            count -= 1;
            key = self.string_gen(random.randint(1,1000))
            val = self.string_gen(random.randint(1,1000))
            dict[key] = val
            redis0.write('*3\r\n$3\r\nset\r\n')
            redis0.write('$%d\r\n%s\r\n' % (len(key), key))
            redis0.write('$%d\r\n%s\r\n' % (len(val), val))
            ret = redis0.read_until('\r\n', 1)
            self.assertEqual(ret, '+OK\r\n')

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        ret = p.wait()
        self.assertTrue(ret == 0);

        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        skip_line = 2
        for line in f.readlines():
            # skip first 2 lines (smr_seqnum, smr_mstime)
            if skip_line > 0:
                skip_line -= 1
                continue

            line = line.strip()
            key = self.b32hexdecode(json.loads(line)['key'])
            val = self.b32hexdecode(json.loads(line)['value'])

            self.assertTrue(key in dict.keys(), 'key(%s) is not in json output' % key)
            self.assertEqual(dict[key], val,
                             "val(%s) is not match with %s" % (dict[key], val))

        f.close()
Example #26
0
    def __test_local_proxy(self, arch=64):
        util.print_frame()

        # Clean server log file
        p = util.exec_proc_async(util.capi_dir(0),
                'rm capi_server-*',
                True, None, subprocess.PIPE, None)

        p.wait()

        # run test server
        _capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
        """ % self.cluster['cluster_name']
        old_cwd = os.path.abspath( os.getcwd() )
        os.chdir(util.capi_dir(0))
        f = open('capi_server.conf', 'w')
        f.write(_capi_server_conf)
        f.close()
        os.chdir(old_cwd)

        if arch is 32:
            cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
        else:
            cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER

        capi_server = util.exec_proc_async(util.capi_dir(0),
                            cmd, True, None, subprocess.PIPE, None)

        # ping check
        while True:
            try:
                t = telnetlib.Telnet('127.0.0.1', 6200)
                break
            except:
                time.sleep(1)
                continue

        t.write("ping\r\n")
        t.read_until('+PONG\r\n')
        t.close()

        # Start load generator
        load_gen_thrd_list = {}
        for i in range(self.max_load_generator):
            load_gen_thrd_list[i] = load_generator.LoadGenerator(i, 'localhost', 6200)
            load_gen_thrd_list[i].start()

        time.sleep(5)

        # Check reconfiguration by SIGHUP
        p = util.exec_proc_async(util.capi_dir(0),
                'grep "Connected to the zookeeper" capi_server-* | wc -l',
                True, None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        capi_server.send_signal(signal.SIGHUP)
        time.sleep(5)

        p = util.exec_proc_async(util.capi_dir(0),
                'grep "Connected to the zookeeper" capi_server-* | wc -l',
                True, None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '2')

        p = util.exec_proc_async(util.capi_dir(0),
                'grep "Graceful shutdown caused by API" capi_server-* | wc -l',
                True, None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Graceful shutdown caused by API" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        # Check consistency after sending many SIGHUP signal
        for i in range(50):
            capi_server.send_signal(signal.SIGHUP)
            time.sleep(0.1)

        # check consistency of load_generator
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].quit()
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].join()
            self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after sending signal')

        # Terminate test server
        capi_server.send_signal(signal.SIGTERM)
        capi_server.wait()
Example #27
0
    def test_migration_with_expire_command(self):
        util.print_frame()

        util.log("start load_generator")
        load_gen_thrd_list = {}
        for i in range(1):
            ip, port = util.get_rand_gateway(self.cluster)
            load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port)
            load_gen_thrd_list[i].start()

        time.sleep(5) # generate load for 5 sec
        tps = 20000
        src_pg_id = 0
        dst_pg_id = 1
        leader_cm = self.cluster['servers'][0]
        src_master = util.get_server_by_role_and_pg(self.cluster['servers'], 'master', src_pg_id)
        dst_master = util.get_server_by_role_and_pg(self.cluster['servers'], 'master', dst_pg_id)

        smr = smr_mgmt.SMR(src_master['id'])
        ret = smr.connect(src_master['ip'], src_master['smr_mgmt_port'])
        if ret != 0:
            util.log('failed to connect to smr(source master)')
            return False

        src_redis = redis_mgmt.Redis(src_master['id'])
        ret = src_redis.connect(src_master['ip'], src_master['redis_port'] )
        self.assertEquals( ret, 0, 'failed to connect to redis' )

        dst_redis = redis_mgmt.Redis(dst_master['id'])
        ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port'] )
        self.assertEquals( ret, 0, 'failed to connect to redis' )

        ts = time.time()
        self.setExpireKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:expired', 10)
        self.setExpireKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist', 20)

        self.setExpireS3Key(src_redis, 'S3:PermanentKey', 0)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'beforeCheckpoint~beforeCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        util.log(">>> migrate test with expire command start(%s), ts:%d" % (time.asctime(), ts))

        ts = time.time()
        self.setExpireKey(src_redis, 'beforeCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireKey(src_redis, 'beforeCheckpoint~afterCheckpoint:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist', 20)

        # notify dst_redis of migration start
        util.log(">>> notify dst_redis of migration start (%s)" % time.asctime())

        cmd = 'migconf migstart %d-%d\r\n' % (0, 8191)
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals( res, '+OK\r\n' )

        # remote partial checkpoint
        util.log(">>> start remote checkpoint and load (%s)" % time.asctime())
        cmd = "./cluster-util --getandplay %s %d %s %d %d-%d %d" % (
                    src_master['ip'], src_master['redis_port'],
                    dst_master['ip'], dst_master['redis_port'],
                    0, 8191, tps)
        p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd, True, None, subprocess.PIPE, None)

        ret = p.wait()
        for line in p.stdout:
            if line.find("Checkpoint Sequence Number:") != -1:
                util.log("seqnumber : " + line[line.rfind(":")+1:])
                seq = int(line[line.rfind(":")+1:])
            util.log(">>>" + str(line.rstrip()))

        self.assertEqual(0, ret)
        util.log(">>> end remote checkpoint and load (%s)" % time.asctime())

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis, 'beforeCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'beforeCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        # bgsave for testing later about recovery during migration
        util.log(">>> bgsave for testing later about recovery during migration (%s)" % time.asctime())
        cmd = 'bgsave\r\n'
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals( res, '+Background saving started\r\n' )

        ts = time.time()
        self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:persist', 20)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis, 'afterCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'afterCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:expired', 10)
        self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:persist', 100)
        self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:persist', 100)

        # remote catchup (smr log migration)
        util.log(">>> start remote catchup (%s)" % time.asctime())

        dst_host = dst_master['ip']
        dst_smr_port = dst_master['smr_base_port']
        rle = '1 8192'
        num_part = 8192

        smr.write('migrate start %s %d %d %d %d %s\r\n' % (dst_host, dst_smr_port,
                                                     seq, tps, num_part, rle))
        response = smr.read_until('\r\n')
        if response[:3] != '+OK':
            util.log('failed to execute migrate start command, response:%s' % response)
            return False

        while True:
            smr.write('migrate info\r\n')
            response = smr.read_until('\r\n')
            seqs = response.split()
            logseq = int(seqs[1].split(':')[1])
            mig = int(seqs[2].split(':')[1])
            util.log('migrate info: %s' % response)
            if (logseq-mig < 500000):
                util.log('Remote catchup almost done. try mig2pc')
                break
            time.sleep(1)

        util.log(">>> sleep until 90 sec pass")
        self.assertFalse(time.time() - ts >= 90)
        time.sleep(90 - (time.time() - ts))

        res = self.persistKey(src_redis, 'afterCheckpoint~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'afterCheckpoint~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis, 'S3:afterCheckpoint~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:expired', 10)
        self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~duringCatchup:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~duringCatchup:persist', 20)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis, 'S3:duringCatchup~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis, 'S3:duringCatchup~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:expired', 10)
        self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:expired', 10)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:persist', 20)

        util.log(">>> remote catchup phase almost done (%s)" % time.asctime())

        # mig2pc
        util.log(">>> start mig2pc (%s)" % time.asctime())

        cmd = 'mig2pc %s %d %d %d %d' % (self.cluster['cluster_name'], src_pg_id, dst_pg_id,
                                         0, 8191)
        result = util.cm_command(leader_cm['ip'], leader_cm['cm_port'], cmd)
        util.log('mig2pc result : ' + result)
        if not result.startswith('{"state":"success","msg":"+OK"}\r\n'):
            util.log('failed to execute mig2pc command, result:%s' % result)
            return False

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(dst_redis, 'S3:duringCatchup~afterMig2pc:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(dst_redis, 'S3:duringCatchup~afterMig2pc:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:expired', 10)
        self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:persist', 20)
        self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired', 10)
        self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist', 20)

        # finish migration
        smr.write('migrate interrupt\r\n')
        response = smr.read_until('\r\n')
        util.log('migrate interrupt: %s' % response)
        smr.disconnect()

        # notify dst_redis of migration end
        util.log(">>> notify dst_redis of migration end (%s)" % time.asctime())

        cmd = 'migconf migend\r\n'
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals( res, '+OK\r\n' )

        cmd = 'migconf clearstart %d-%d\r\n' % (0, 8191)
        src_redis.write(cmd)
        res = src_redis.read_until('\r\n')
        self.assertEquals( res, '+OK\r\n' )

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        self.assertTrue(self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired'))

        # remote partial checkpoint
        util.log(">>> start rangedel (%s)" % time.asctime())
        cmd = "./cluster-util --rangedel %s %d %d-%d %d" % (
                    src_master['ip'], src_master['redis_port'],
                    0, 8191, tps)
        p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd, True, None, subprocess.PIPE, None)
        ret = p.wait()

        for line in p.stdout:
            util.log(">>>" + str(line.rstrip()))

        cmd = 'migconf clearend\r\n'
        src_redis.write(cmd)
        res = src_redis.read_until('\r\n')
        self.assertEqual(res, '+OK\r\n')

        time.sleep(5) # generate load for 5 sec
        # check consistency of load_generator
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].quit()
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].join()
            self.assertTrue(load_gen_thrd_list[i].isConsistent(), 'Inconsistent after migration')

        # kill dst_redis and recover from bgsave
        util.log(">>> kill dst_redis and recover from bgsave (%s)" % time.asctime())

        dst_redis.disconnect()
        ret = testbase.request_to_shutdown_redis(dst_master)
        self.assertEquals( ret, 0, 'failed to shutdown redis' )
        ret = testbase.request_to_shutdown_smr(dst_master)
        self.assertEquals(ret, 0, 'failed to shutdown smr')
        time.sleep(5)

        testbase.request_to_start_smr(dst_master)
        self.assertEqual( ret, 0, 'failed to start smr, server:%d' % dst_master['id'] )

        ret = testbase.request_to_start_redis(dst_master)
        self.assertEqual( ret, 0, 'failed to start redis, server:%d' % dst_master['id']  )

        ret = testbase.wait_until_finished_to_set_up_role(dst_master)
        self.assertEquals( ret, 0, 'failed to role change. server:%d' % (dst_master['id']) )

        dst_redis = redis_mgmt.Redis(dst_master['id'])
        ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port'] )
        self.assertEquals( ret, 0, 'failed to connect to redis' )

        self.assertTrue(self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired'))

        self.getS3TTL(dst_redis, 'S3:PermanentKey')

        # kill dst_slave redis and recover without dump file
        util.log(">>> kill dst_redis and recover without dump file (%s)" % time.asctime())
        dst_slave = util.get_server_by_role_and_pg(self.cluster['servers'], 'slave', dst_pg_id)

        ret = testbase.request_to_shutdown_redis(dst_slave)
        self.assertEquals( ret, 0, 'failed to shutdown redis' )
        ret = testbase.request_to_shutdown_smr(dst_slave)
        self.assertEquals(ret, 0, 'failed to shutdown smr')
        time.sleep(5)

        testbase.request_to_start_smr(dst_slave)
        self.assertEqual( ret, 0, 'failed to start smr, server:%d' % dst_slave['id'] )

        ret = testbase.request_to_start_redis(dst_slave)
        self.assertEqual( ret, 0, 'failed to start redis, server:%d' % dst_slave['id']  )

        ret = testbase.wait_until_finished_to_set_up_role(dst_slave)
        self.assertEquals( ret, 0, 'failed to role change. server:%d' % (dst_slave['id']) )

        dst_redis_slave = redis_mgmt.Redis(dst_slave['id'])
        ret = dst_redis_slave.connect(dst_slave['ip'], dst_slave['redis_port'] )
        self.assertEquals( ret, 0, 'failed to connect to redis' )

        self.assertTrue(self.isExist(dst_redis_slave, 'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'duringCatchup~duringCatchup:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(self.isS3Exist(dst_redis_slave, 'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(self.isS3Exist(dst_redis_slave, 'S3:afterMig2pc~migrateEnd:expired'))

        self.getS3TTL(dst_redis_slave, 'S3:PermanentKey')

        # Go back to initial configuration
        self.assertTrue(util.migration(self.cluster, dst_pg_id, src_pg_id, 0, 8191, 40000),
                'failed to rollback migration')
Example #28
0
    def test_local_proxy(self, arch=64):
        util.print_frame()

        # Clean server log file
        p = util.exec_proc_async(util.capi_dir(0), 'rm capi_server-*', True,
                                 None, subprocess.PIPE, None)

        p.wait()

        # run test server
        capi_server = self.run_test_server(arch)

        # ping check
        while True:
            try:
                t = telnetlib.Telnet('127.0.0.1', 6200)
                break
            except:
                time.sleep(1)
                continue

        t.write("ping\r\n")
        t.read_until('+PONG\r\n')
        t.close()

        # Start load generator
        load_gen_thrd_list = {}
        for i in range(self.max_load_generator):
            load_gen_thrd_list[i] = load_generator.LoadGenerator(
                i, 'localhost', 6200)
            load_gen_thrd_list[i].start()

        time.sleep(5)

        # Check reconfiguration by SIGHUP
        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Connected to the zookeeper" capi_server-* | wc -l', True,
            None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        capi_server.send_signal(signal.SIGHUP)
        time.sleep(5)

        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Connected to the zookeeper" capi_server-* | wc -l', True,
            None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '2')

        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Graceful shutdown caused by API" capi_server-* | wc -l',
            True, None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Graceful shutdown caused by API" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        # Check consistency after sending many SIGHUP signal
        for i in range(50):
            capi_server.send_signal(signal.SIGHUP)
            time.sleep(0.1)

        # check consistency of load_generator
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].quit()
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].join()
            self.assertTrue(load_gen_thrd_list[i].isConsistent(),
                            'Inconsistent after sending signal')

        # Terminate test server
        self.stop_test_server(capi_server)
Example #29
0
    def test_no_error_scenario_and_memory_leak(self):
        util.print_frame()

        # Start with valgrind
        p = util.exec_proc_async('%s/.obj%d' % (constant.ARCCI_DIR, self.arch),
                "valgrind ./dummy-perf -z localhost:2181 -c %s -n 5 -s 60" % self.cluster['cluster_name'],
                subprocess.PIPE, subprocess.PIPE, subprocess.PIPE);

        # Set up arguments
        server = self.cluster['servers'][0]
        gw_id = server['id']
        gw_port = server['gateway_port']

        # Check load
        for i in range(5):
            ok = True
            for s in self.cluster['servers']:
                tps = util.get_tps(s['ip'], s['gateway_port'], 'gw')
                util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps))
                if tps < 50:
                    ok = False

            if ok:
                break

            time.sleep(1)
        self.assertTrue(ok, 'failed to send requests')

        # Delete gateway
        self.assertTrue(
                util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT),
                'failed to delete gateway')

        # Check load
        for i in range(5):
            ok = True
            tps = util.get_tps(server['ip'], server['gateway_port'], 'gw')
            util.log('%s:%d TPS:%d' % (server['ip'], server['gateway_port'], tps))
            if tps > 10:
                ok = False

            if ok:
                break

            time.sleep(1)
        self.assertTrue(ok, 'failed to send requests')

        # Stop gateway
        ret = util.shutdown_gateway(gw_id, gw_port, True)
        self.assertEqual(ret, 0, 'failed : shutdown gateawy%d' % gw_id)
        time.sleep(5)

        # Start gateway
        ret = util.start_gateway( gw_id, server['ip'], MGMT_PORT, server['cluster_name'], gw_port)
        self.assertEqual( ret, 0, 'failed : start gateawy%d' % gw_id )
        time.sleep(3)

        # Add gateway
        self.assertTrue(
                util.gw_add(CLUSTER_NAME, gw_id, HOST_NAME, HOST_IP, gw_port, MGMT_IP, MGMT_PORT),
                'failed to add gateway')
        time.sleep(10)

        # Check loadbalancing
        for i in range(5):
            ok = True
            for s in self.cluster['servers']:
                tps = util.get_tps(s['ip'], s['gateway_port'], 'gw')
                util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps))
                if tps < 50:
                    ok = False

            if ok:
                break

            time.sleep(1)

        if not ok:
            self.fail('FAIL, loadbalancing,')
        else:
            util.log('SUCCESS, loadbalancing.')

        # Check no error
        no_memory_leak = False
        (stdout, stderr) = p.communicate()
        for line in stdout.split("\n"):
            print line

            if line.find('[ERR]') != -1:
                self.fail('find error, msg:%s' % line)

            if line.find('All heap blocks were freed -- no leaks are possible'):
                no_memory_leak = True

        self.assertTrue(no_memory_leak, 'memory leaks are possible')
        util.log('no leaks are possible')
Example #30
0
    def test_data_type(self):
        util.print_frame()

        dict = {}
        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        # String
        dict['string'] = {}
        key = self.string_gen(random.randint(1, 5)) + '_type_string'
        val = self.string_gen(random.randint(1, 5))
        dict['string']['key'] = key
        dict['string']['val'] = val

        redis0.write('*3\r\n$3\r\nset\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val), val))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, '+OK\r\n')

        # List
        dict['list'] = {}
        key = self.string_gen(random.randint(1, 5)) + '_type_list'
        val1 = self.string_gen(random.randint(1, 5))
        val2 = self.string_gen(random.randint(1, 5))
        dict['list']['key'] = key
        dict['list']['val1'] = val1
        dict['list']['val2'] = val1  # Duplicate value
        dict['list']['val3'] = val2

        redis0.write('*5\r\n$5\r\nrpush\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':3\r\n')

        # Set
        dict['set'] = {}
        key = self.string_gen(random.randint(1, 5)) + '_type_set'
        val1 = self.string_gen(random.randint(1, 5)) + '_v1'
        val2 = self.string_gen(random.randint(1, 5)) + '_v2'
        dict['set']['key'] = key
        dict['set']['val1'] = val1
        dict['set']['val2'] = val2

        redis0.write('*4\r\n$4\r\nsadd\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':2\r\n')

        # Sorted Set
        dict['zset'] = {}
        key = self.string_gen(random.randint(1, 5)) + '_type_zset'
        val1 = self.string_gen(random.randint(1, 5)) + '_v1'
        val2 = self.string_gen(random.randint(1, 5)) + '_v2'
        dict['zset']['key'] = key
        dict['zset']['val1'] = val1
        dict['zset']['score1'] = 20
        dict['zset']['val2'] = val2
        dict['zset']['score2'] = 10

        redis0.write('*6\r\n$4\r\nzadd\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$2\r\n20\r\n$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$2\r\n10\r\n$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':2\r\n')

        # Hash
        dict['hash'] = {}
        key = self.string_gen(random.randint(1, 5)) + '_type_hash'
        key1 = self.string_gen(random.randint(1, 5)) + '_k1'
        val1 = self.string_gen(random.randint(1, 5))
        key2 = self.string_gen(random.randint(1, 5)) + '_k2'
        val2 = self.string_gen(random.randint(1, 5))
        dict['hash']['key'] = key
        dict['hash'][key1] = val1
        dict['hash'][key2] = val2

        redis0.write('*6\r\n$5\r\nhmset\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(key1), key1))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(key2), key2))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, '+OK\r\n')

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None,
                                 subprocess.PIPE, None)

        ret = p.wait()
        self.assertTrue(ret == 0)

        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        skip_line = 2
        for line in f.readlines():
            # skip first 2 lines (smr_seqnum, smr_mstime)
            if skip_line > 0:
                skip_line -= 1
                continue

            data = json.loads(line.strip())
            key = self.b32hexdecode(data['key'])

            if data['type'] == 'string':
                self.assertEqual(
                    dict['string']['key'], key,
                    "key(%s) is not match with %s" %
                    (dict['string']['key'], key))

                val = self.b32hexdecode(data['value'])

                self.assertEqual(
                    dict['string']['val'], val,
                    "val(%s) is not match with %s" %
                    (dict['string']['val'], val))

            elif data['type'] == 'list':
                self.assertEqual(
                    dict['list']['key'], key, "key(%s) is not match with %s" %
                    (dict['list']['key'], key))

                val1 = self.b32hexdecode(data['value'][0])
                val2 = self.b32hexdecode(data['value'][1])
                val3 = self.b32hexdecode(data['value'][2])

                self.assertEqual(
                    dict['list']['val1'], val1,
                    "val(%s) is not match with %s" %
                    (dict['list']['val1'], val1))
                self.assertEqual(
                    dict['list']['val2'], val2,
                    "val(%s) is not match with %s" %
                    (dict['list']['val2'], val2))
                self.assertEqual(
                    dict['list']['val3'], val3,
                    "val(%s) is not match with %s" %
                    (dict['list']['val3'], val3))

            elif data['type'] == 'set':
                self.assertEqual(
                    dict['set']['key'], key,
                    "key(%s) is not match with %s" % (dict['set']['key'], key))

                val1 = self.b32hexdecode(data['value'][0])
                val2 = self.b32hexdecode(data['value'][1])
                if not (val1 == dict['set']['val1'] and val2
                        == dict['set']['val2'] or val1 == dict['set']['val2']
                        and val2 == dict['set']['val1']):

                    util.log(
                        "values(%s, %s) is not match with (%s, %s)" %
                        (dict['set']['val1'], dict['set']['val2'], val1, val2))
                    self.assertTrue(False)

            elif data['type'] == 'zset':
                self.assertEqual(
                    dict['zset']['key'], key, "key(%s) is not match with %s" %
                    (dict['zset']['key'], key))

                # Set variable as sort order
                val2 = self.b32hexdecode(data['value'][0]['data'])
                score2 = int(data['value'][0]['score'])
                val1 = self.b32hexdecode(data['value'][1]['data'])
                score1 = int(data['value'][1]['score'])

                self.assertEqual(
                    dict['zset']['val1'], val1,
                    "val(%s) is not match with %s" %
                    (dict['zset']['val1'], val1))
                self.assertEqual(
                    dict['zset']['score1'], score1,
                    "score(%d) is not match with %d" %
                    (dict['zset']['score1'], score1))
                self.assertEqual(
                    dict['zset']['val2'], val2,
                    "val(%s) is not match with %s" %
                    (dict['zset']['val2'], val2))
                self.assertEqual(
                    dict['zset']['score2'], score2,
                    "score(%d) is not match with %d" %
                    (dict['zset']['score2'], score2))

            elif data['type'] == 'hash':
                self.assertEqual(
                    dict['hash']['key'], key, "key(%s) is not match with %s" %
                    (dict['zset']['key'], key))

                key1 = self.b32hexdecode(data['value'][0]['hkey'])
                val1 = self.b32hexdecode(data['value'][0]['hval'])
                key2 = self.b32hexdecode(data['value'][1]['hkey'])
                val2 = self.b32hexdecode(data['value'][1]['hval'])

                self.assertTrue(key1 in dict['hash'].keys(),
                                'hkey(%s) is not in json output' % key1)
                self.assertTrue(key2 in dict['hash'].keys(),
                                'hkey(%s) is not in json output' % key2)
                self.assertEqual(
                    dict['hash'][key1], val1, "val(%s) is not match with %s" %
                    (dict['hash'][key1], val1))
                self.assertEqual(
                    dict['hash'][key2], val2, "val(%s) is not match with %s" %
                    (dict['hash'][key2], val2))

            else:
                self.assertTrue(False, "Unknown type")

        f.close()
Example #31
0
 def numOpenFds(self, pid):
     p = util.exec_proc_async(util.cluster_util_dir(0), 'ls /proc/%s/fd | wc -l' % pid,
                              True, None, subprocess.PIPE, None)
     ret = p.wait()
     return p.stdout.read()[:-1]
Example #32
0
    def __test_local_proxy(self, arch=64):
        util.print_frame()

        # Clean server log file
        p = util.exec_proc_async(util.capi_dir(0), 'rm capi_server-*', True,
                                 None, subprocess.PIPE, None)

        p.wait()

        # run test server
        _capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
        """ % self.cluster['cluster_name']
        old_cwd = os.path.abspath(os.getcwd())
        os.chdir(util.capi_dir(0))
        f = open('capi_server.conf', 'w')
        f.write(_capi_server_conf)
        f.close()
        os.chdir(old_cwd)

        if arch is 32:
            cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
        else:
            cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER

        capi_server = util.exec_proc_async(util.capi_dir(0), cmd, True, None,
                                           subprocess.PIPE, None)

        # ping check
        while True:
            try:
                t = telnetlib.Telnet('127.0.0.1', 6200)
                break
            except:
                time.sleep(1)
                continue

        t.write("ping\r\n")
        t.read_until('+PONG\r\n')
        t.close()

        # Start load generator
        load_gen_thrd_list = {}
        for i in range(self.max_load_generator):
            load_gen_thrd_list[i] = load_generator.LoadGenerator(
                i, 'localhost', 6200)
            load_gen_thrd_list[i].start()

        time.sleep(5)

        # Check reconfiguration by SIGHUP
        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Connected to the zookeeper" capi_server-* | wc -l', True,
            None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        capi_server.send_signal(signal.SIGHUP)
        time.sleep(5)

        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Connected to the zookeeper" capi_server-* | wc -l', True,
            None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Connected to the zookeeper" result : ' + wc
        self.assertEquals(wc.strip(), '2')

        p = util.exec_proc_async(
            util.capi_dir(0),
            'grep "Graceful shutdown caused by API" capi_server-* | wc -l',
            True, None, subprocess.PIPE, None)

        p.wait()
        wc = p.stdout.readline()
        print 'grep "Graceful shutdown caused by API" result : ' + wc
        self.assertEquals(wc.strip(), '1')

        # Check consistency after sending many SIGHUP signal
        for i in range(50):
            capi_server.send_signal(signal.SIGHUP)
            time.sleep(0.1)

        # check consistency of load_generator
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].quit()
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].join()
            self.assertTrue(load_gen_thrd_list[i].isConsistent(),
                            'Inconsistent after sending signal')

        # Terminate test server
        capi_server.send_signal(signal.SIGTERM)
        capi_server.wait()
Example #33
0
 def numOpenFds(self, pid):
     p = util.exec_proc_async(util.cluster_util_dir(0),
                              'ls /proc/%s/fd | wc -l' % pid, True, None,
                              subprocess.PIPE, None)
     ret = p.wait()
     return p.stdout.read()[:-1]
Example #34
0
    def test_migration_with_expire_command(self):
        util.print_frame()

        util.log("start load_generator")
        load_gen_thrd_list = {}
        for i in range(1):
            ip, port = util.get_rand_gateway(self.cluster)
            load_gen_thrd_list[i] = load_generator.LoadGenerator(i, ip, port)
            load_gen_thrd_list[i].start()

        time.sleep(5)  # generate load for 5 sec
        tps = 20000
        src_pg_id = 0
        dst_pg_id = 1
        leader_cm = self.cluster['servers'][0]
        src_master = util.get_server_by_role_and_pg(self.cluster['servers'],
                                                    'master', src_pg_id)
        dst_master = util.get_server_by_role_and_pg(self.cluster['servers'],
                                                    'master', dst_pg_id)

        smr = smr_mgmt.SMR(src_master['id'])
        ret = smr.connect(src_master['ip'], src_master['smr_mgmt_port'])
        if ret != 0:
            util.log('failed to connect to smr(source master)')
            return False

        src_redis = redis_mgmt.Redis(src_master['id'])
        ret = src_redis.connect(src_master['ip'], src_master['redis_port'])
        self.assertEquals(ret, 0, 'failed to connect to redis')

        dst_redis = redis_mgmt.Redis(dst_master['id'])
        ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port'])
        self.assertEquals(ret, 0, 'failed to connect to redis')

        ts = time.time()
        self.setExpireKey(src_redis,
                          'beforeCheckpoint~beforeCheckpoint:expired', 10)
        self.setExpireKey(src_redis,
                          'beforeCheckpoint~beforeCheckpoint:persist', 20)
        self.setExpireS3Key(src_redis,
                            'S3:beforeCheckpoint~beforeCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis,
                            'S3:beforeCheckpoint~beforeCheckpoint:persist', 20)

        self.setExpireS3Key(src_redis, 'S3:PermanentKey', 0)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis,
                              'beforeCheckpoint~beforeCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis,
                              'beforeCheckpoint~beforeCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(
            src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(
            src_redis, 'S3:beforeCheckpoint~beforeCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        util.log(">>> migrate test with expire command start(%s), ts:%d" %
                 (time.asctime(), ts))

        ts = time.time()
        self.setExpireKey(src_redis,
                          'beforeCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireKey(src_redis,
                          'beforeCheckpoint~afterCheckpoint:persist', 20)
        self.setExpireS3Key(src_redis,
                            'S3:beforeCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis,
                            'S3:beforeCheckpoint~afterCheckpoint:persist', 20)

        # notify dst_redis of migration start
        util.log(">>> notify dst_redis of migration start (%s)" %
                 time.asctime())

        cmd = 'migconf migstart %d-%d\r\n' % (0, 8191)
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals(res, '+OK\r\n')

        # remote partial checkpoint
        util.log(">>> start remote checkpoint and load (%s)" % time.asctime())
        cmd = "./cluster-util --getandplay %s %d %s %d %d-%d %d" % (
            src_master['ip'], src_master['redis_port'], dst_master['ip'],
            dst_master['redis_port'], 0, 8191, tps)
        p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd,
                                 True, None, subprocess.PIPE, None)

        ret = p.wait()
        for line in p.stdout:
            if line.find("Checkpoint Sequence Number:") != -1:
                util.log("seqnumber : " + line[line.rfind(":") + 1:])
                seq = int(line[line.rfind(":") + 1:])
            util.log(">>>" + str(line.rstrip()))

        self.assertEqual(0, ret)
        util.log(">>> end remote checkpoint and load (%s)" % time.asctime())

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis,
                              'beforeCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis,
                              'beforeCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:beforeCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:beforeCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        # bgsave for testing later about recovery during migration
        util.log(
            ">>> bgsave for testing later about recovery during migration (%s)"
            % time.asctime())
        cmd = 'bgsave\r\n'
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals(res, '+Background saving started\r\n')

        ts = time.time()
        self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:expired',
                          10)
        self.setExpireKey(src_redis, 'afterCheckpoint~afterCheckpoint:persist',
                          20)
        self.setExpireS3Key(src_redis,
                            'S3:afterCheckpoint~afterCheckpoint:expired', 10)
        self.setExpireS3Key(src_redis,
                            'S3:afterCheckpoint~afterCheckpoint:persist', 20)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis,
                              'afterCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis,
                              'afterCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:afterCheckpoint~afterCheckpoint:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:afterCheckpoint~afterCheckpoint:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:expired',
                          10)
        self.setExpireKey(src_redis, 'afterCheckpoint~duringCatchup:persist',
                          100)
        self.setExpireS3Key(src_redis,
                            'S3:afterCheckpoint~duringCatchup:expired', 10)
        self.setExpireS3Key(src_redis,
                            'S3:afterCheckpoint~duringCatchup:persist', 100)

        # remote catchup (smr log migration)
        util.log(">>> start remote catchup (%s)" % time.asctime())

        dst_host = dst_master['ip']
        dst_smr_port = dst_master['smr_base_port']
        rle = '1 8192'
        num_part = 8192

        smr.write('migrate start %s %d %d %d %d %s\r\n' %
                  (dst_host, dst_smr_port, seq, tps, num_part, rle))
        response = smr.read_until('\r\n')
        if response[:3] != '+OK':
            util.log('failed to execute migrate start command, response:%s' %
                     response)
            return False

        while True:
            smr.write('migrate info\r\n')
            response = smr.read_until('\r\n')
            seqs = response.split()
            logseq = int(seqs[1].split(':')[1])
            mig = int(seqs[2].split(':')[1])
            util.log('migrate info: %s' % response)
            if (logseq - mig < 500000):
                util.log('Remote catchup almost done. try mig2pc')
                break
            time.sleep(1)

        util.log(">>> sleep until 90 sec pass")
        self.assertFalse(time.time() - ts >= 90)
        time.sleep(90 - (time.time() - ts))

        res = self.persistKey(src_redis,
                              'afterCheckpoint~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis,
                              'afterCheckpoint~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:afterCheckpoint~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:afterCheckpoint~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:expired', 10)
        self.setExpireKey(src_redis, 'duringCatchup~duringCatchup:persist', 20)
        self.setExpireS3Key(src_redis,
                            'S3:duringCatchup~duringCatchup:expired', 10)
        self.setExpireS3Key(src_redis,
                            'S3:duringCatchup~duringCatchup:persist', 20)

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(src_redis, 'duringCatchup~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:duringCatchup~duringCatchup:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(src_redis,
                                'S3:duringCatchup~duringCatchup:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:expired', 10)
        self.setExpireKey(src_redis, 'duringCatchup~afterMig2pc:persist', 20)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:expired',
                            10)
        self.setExpireS3Key(src_redis, 'S3:duringCatchup~afterMig2pc:persist',
                            20)

        util.log(">>> remote catchup phase almost done (%s)" % time.asctime())

        # mig2pc
        util.log(">>> start mig2pc (%s)" % time.asctime())

        cmd = 'mig2pc %s %d %d %d %d' % (self.cluster['cluster_name'],
                                         src_pg_id, dst_pg_id, 0, 8191)
        result = util.cm_command(leader_cm['ip'], leader_cm['cm_port'], cmd)
        util.log('mig2pc result : ' + result)
        if not result.startswith('{"state":"success","msg":"+OK"}\r\n'):
            util.log('failed to execute mig2pc command, result:%s' % result)
            return False

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(dst_redis, 'duringCatchup~afterMig2pc:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(dst_redis,
                                'S3:duringCatchup~afterMig2pc:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(dst_redis,
                                'S3:duringCatchup~afterMig2pc:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:expired', 10)
        self.setExpireKey(dst_redis, 'afterMig2pc~migrateEnd:persist', 20)
        self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired', 10)
        self.setExpireS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist', 20)

        # finish migration
        smr.write('migrate interrupt\r\n')
        response = smr.read_until('\r\n')
        util.log('migrate interrupt: %s' % response)
        smr.disconnect()

        # notify dst_redis of migration end
        util.log(">>> notify dst_redis of migration end (%s)" % time.asctime())

        cmd = 'migconf migend\r\n'
        dst_redis.write(cmd)
        res = dst_redis.read_until('\r\n')
        self.assertEquals(res, '+OK\r\n')

        cmd = 'migconf clearstart %d-%d\r\n' % (0, 8191)
        src_redis.write(cmd)
        res = src_redis.read_until('\r\n')
        self.assertEquals(res, '+OK\r\n')

        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistKey(dst_redis, 'afterMig2pc~migrateEnd:expired')
        self.assertEquals(res, ":0\r\n")
        res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:persist')
        self.assertEquals(res, ":1\r\n")
        res = self.persistS3Key(dst_redis, 'S3:afterMig2pc~migrateEnd:expired')
        self.assertEquals(res, ":0\r\n")

        ts = time.time()
        util.log(">>> sleep until 15 sec pass")
        self.assertFalse(time.time() - ts >= 15)
        time.sleep(15 - (time.time() - ts))

        self.assertTrue(
            self.isExist(dst_redis,
                         'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis,
                         'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis,
                         'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis,
                         'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired'))

        # remote partial checkpoint
        util.log(">>> start rangedel (%s)" % time.asctime())
        cmd = "./cluster-util --rangedel %s %d %d-%d %d" % (
            src_master['ip'], src_master['redis_port'], 0, 8191, tps)
        p = util.exec_proc_async(util.cluster_util_dir(src_master['id']), cmd,
                                 True, None, subprocess.PIPE, None)
        ret = p.wait()

        for line in p.stdout:
            util.log(">>>" + str(line.rstrip()))

        cmd = 'migconf clearend\r\n'
        src_redis.write(cmd)
        res = src_redis.read_until('\r\n')
        self.assertEqual(res, '+OK\r\n')

        time.sleep(5)  # generate load for 5 sec
        # check consistency of load_generator
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].quit()
        for i in range(len(load_gen_thrd_list)):
            load_gen_thrd_list[i].join()
            self.assertTrue(load_gen_thrd_list[i].isConsistent(),
                            'Inconsistent after migration')

        # kill dst_redis and recover from bgsave
        util.log(">>> kill dst_redis and recover from bgsave (%s)" %
                 time.asctime())

        dst_redis.disconnect()
        ret = testbase.request_to_shutdown_redis(dst_master)
        self.assertEquals(ret, 0, 'failed to shutdown redis')
        ret = testbase.request_to_shutdown_smr(dst_master)
        self.assertEquals(ret, 0, 'failed to shutdown smr')
        time.sleep(5)

        testbase.request_to_start_smr(dst_master)
        self.assertEqual(ret, 0,
                         'failed to start smr, server:%d' % dst_master['id'])

        ret = testbase.request_to_start_redis(dst_master)
        self.assertEqual(ret, 0,
                         'failed to start redis, server:%d' % dst_master['id'])

        ret = testbase.wait_until_finished_to_set_up_role(dst_master)
        self.assertEquals(
            ret, 0, 'failed to role change. server:%d' % (dst_master['id']))

        dst_redis = redis_mgmt.Redis(dst_master['id'])
        ret = dst_redis.connect(dst_master['ip'], dst_master['redis_port'])
        self.assertEquals(ret, 0, 'failed to connect to redis')

        self.assertTrue(
            self.isExist(dst_redis,
                         'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis,
                         'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis,
                         'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis,
                         'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'duringCatchup~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis,
                           'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis,
                           'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis, 'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(
            self.isExist(dst_redis, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isExist(dst_redis, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis, 'S3:afterMig2pc~migrateEnd:expired'))

        self.getS3TTL(dst_redis, 'S3:PermanentKey')

        # kill dst_slave redis and recover without dump file
        util.log(">>> kill dst_redis and recover without dump file (%s)" %
                 time.asctime())
        dst_slave = util.get_server_by_role_and_pg(self.cluster['servers'],
                                                   'slave', dst_pg_id)

        ret = testbase.request_to_shutdown_redis(dst_slave)
        self.assertEquals(ret, 0, 'failed to shutdown redis')
        ret = testbase.request_to_shutdown_smr(dst_slave)
        self.assertEquals(ret, 0, 'failed to shutdown smr')
        time.sleep(5)

        testbase.request_to_start_smr(dst_slave)
        self.assertEqual(ret, 0,
                         'failed to start smr, server:%d' % dst_slave['id'])

        ret = testbase.request_to_start_redis(dst_slave)
        self.assertEqual(ret, 0,
                         'failed to start redis, server:%d' % dst_slave['id'])

        ret = testbase.wait_until_finished_to_set_up_role(dst_slave)
        self.assertEquals(
            ret, 0, 'failed to role change. server:%d' % (dst_slave['id']))

        dst_redis_slave = redis_mgmt.Redis(dst_slave['id'])
        ret = dst_redis_slave.connect(dst_slave['ip'], dst_slave['redis_port'])
        self.assertEquals(ret, 0, 'failed to connect to redis')

        self.assertTrue(
            self.isExist(dst_redis_slave,
                         'beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave,
                         'beforeCheckpoint~beforeCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:beforeCheckpoint~beforeCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:beforeCheckpoint~beforeCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave,
                         'beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave,
                         'beforeCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:beforeCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:beforeCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave,
                         'afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave,
                         'afterCheckpoint~afterCheckpoint:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterCheckpoint~afterCheckpoint:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterCheckpoint~afterCheckpoint:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave,
                         'afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave,
                         'afterCheckpoint~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterCheckpoint~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterCheckpoint~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave,
                         'duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave,
                         'duringCatchup~duringCatchup:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:duringCatchup~duringCatchup:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:duringCatchup~duringCatchup:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave, 'duringCatchup~afterMig2pc:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:duringCatchup~afterMig2pc:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:duringCatchup~afterMig2pc:expired'))

        self.assertTrue(
            self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isExist(dst_redis_slave, 'afterMig2pc~migrateEnd:expired'))
        self.assertTrue(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterMig2pc~migrateEnd:persist'))
        self.assertFalse(
            self.isS3Exist(dst_redis_slave,
                           'S3:afterMig2pc~migrateEnd:expired'))

        self.getS3TTL(dst_redis_slave, 'S3:PermanentKey')

        # Go back to initial configuration
        self.assertTrue(
            util.migration(self.cluster, dst_pg_id, src_pg_id, 0, 8191, 40000),
            'failed to rollback migration')
Example #35
0
    def test_data_type(self):
        util.print_frame()

        dict = {}
        server0 = self.cluster['servers'][0]
        redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])

        # String
        dict['string'] = {}
        key = self.string_gen(random.randint(1,5)) + '_type_string'
        val = self.string_gen(random.randint(1,5))
        dict['string']['key'] = key
        dict['string']['val'] = val

        redis0.write('*3\r\n$3\r\nset\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val), val))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, '+OK\r\n')

        # List
        dict['list'] = {}
        key = self.string_gen(random.randint(1,5)) + '_type_list'
        val1 = self.string_gen(random.randint(1,5))
        val2 = self.string_gen(random.randint(1,5))
        dict['list']['key'] = key
        dict['list']['val1'] = val1
        dict['list']['val2'] = val1 # Duplicate value
        dict['list']['val3'] = val2

        redis0.write('*5\r\n$5\r\nrpush\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':3\r\n')

        # Set
        dict['set'] = {}
        key = self.string_gen(random.randint(1,5)) + '_type_set'
        val1 = self.string_gen(random.randint(1,5)) + '_v1'
        val2 = self.string_gen(random.randint(1,5)) + '_v2'
        dict['set']['key'] = key
        dict['set']['val1'] = val1
        dict['set']['val2'] = val2

        redis0.write('*4\r\n$4\r\nsadd\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':2\r\n')

        # Sorted Set
        dict['zset'] = {}
        key = self.string_gen(random.randint(1,5)) + '_type_zset'
        val1 = self.string_gen(random.randint(1,5)) + '_v1'
        val2 = self.string_gen(random.randint(1,5)) + '_v2'
        dict['zset']['key'] = key
        dict['zset']['val1'] = val1
        dict['zset']['score1'] = 20
        dict['zset']['val2'] = val2
        dict['zset']['score2'] = 10

        redis0.write('*6\r\n$4\r\nzadd\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$2\r\n20\r\n$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$2\r\n10\r\n$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, ':2\r\n')

        # Hash
        dict['hash'] = {}
        key = self.string_gen(random.randint(1,5)) + '_type_hash'
        key1 = self.string_gen(random.randint(1,5)) + '_k1'
        val1 = self.string_gen(random.randint(1,5))
        key2 = self.string_gen(random.randint(1,5)) + '_k2'
        val2 = self.string_gen(random.randint(1,5))
        dict['hash']['key'] = key
        dict['hash'][key1] = val1
        dict['hash'][key2] = val2

        redis0.write('*6\r\n$5\r\nhmset\r\n')
        redis0.write('$%d\r\n%s\r\n' % (len(key), key))
        redis0.write('$%d\r\n%s\r\n' % (len(key1), key1))
        redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
        redis0.write('$%d\r\n%s\r\n' % (len(key2), key2))
        redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
        ret = redis0.read_until('\r\n', 1)
        self.assertEqual(ret, '+OK\r\n')

        self.bgsave(redis0)

        cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
        p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)

        ret = p.wait()
        self.assertTrue(ret == 0)

        f = file("%s/out.json" % util.dump_util_dir(0), "r")
        skip_line = 2
        for line in f.readlines():
            # skip first 2 lines (smr_seqnum, smr_mstime)
            if skip_line > 0:
                skip_line -= 1
                continue

            data = json.loads(line.strip())
            key = self.b32hexdecode(data['key'])

            if data['type'] == 'string':
                self.assertEqual(dict['string']['key'], key,
                                 "key(%s) is not match with %s" % (dict['string']['key'], key))

                val = self.b32hexdecode(data['value'])

                self.assertEqual(dict['string']['val'], val,
                                 "val(%s) is not match with %s" % (dict['string']['val'], val))

            elif data['type'] == 'list':
                self.assertEqual(dict['list']['key'], key,
                                 "key(%s) is not match with %s" % (dict['list']['key'], key))

                val1 = self.b32hexdecode(data['value'][0])
                val2 = self.b32hexdecode(data['value'][1])
                val3 = self.b32hexdecode(data['value'][2])

                self.assertEqual(dict['list']['val1'], val1,
                                 "val(%s) is not match with %s" % (dict['list']['val1'], val1))
                self.assertEqual(dict['list']['val2'], val2,
                                 "val(%s) is not match with %s" % (dict['list']['val2'], val2))
                self.assertEqual(dict['list']['val3'], val3,
                                 "val(%s) is not match with %s" % (dict['list']['val3'], val3))

            elif data['type'] == 'set':
                self.assertEqual(dict['set']['key'], key,
                                 "key(%s) is not match with %s" % (dict['set']['key'], key))

                val1 = self.b32hexdecode(data['value'][0])
                val2 = self.b32hexdecode(data['value'][1])
                if not (val1 == dict['set']['val1'] and val2 == dict['set']['val2']
                    or val1 == dict['set']['val2'] and val2 == dict['set']['val1']):

                    util.log("values(%s, %s) is not match with (%s, %s)" % (dict['set']['val1'],
                                                                            dict['set']['val2'],
                                                                            val1,
                                                                            val2))
                    self.assertTrue(False)

            elif data['type'] == 'zset':
                self.assertEqual(dict['zset']['key'], key,
                                 "key(%s) is not match with %s" % (dict['zset']['key'], key))

                # Set variable as sort order
                val2 = self.b32hexdecode(data['value'][0]['data'])
                score2 = int(data['value'][0]['score'])
                val1 = self.b32hexdecode(data['value'][1]['data'])
                score1 = int(data['value'][1]['score'])

                self.assertEqual(dict['zset']['val1'], val1,
                                 "val(%s) is not match with %s" % (dict['zset']['val1'], val1))
                self.assertEqual(dict['zset']['score1'], score1,
                                 "score(%d) is not match with %d" % (dict['zset']['score1'], score1))
                self.assertEqual(dict['zset']['val2'], val2,
                                 "val(%s) is not match with %s" % (dict['zset']['val2'], val2))
                self.assertEqual(dict['zset']['score2'], score2,
                                 "score(%d) is not match with %d" % (dict['zset']['score2'], score2))

            elif data['type'] == 'hash':
                self.assertEqual(dict['hash']['key'], key,
                                 "key(%s) is not match with %s" % (dict['zset']['key'], key))

                key1 = self.b32hexdecode(data['value'][0]['hkey'])
                val1 = self.b32hexdecode(data['value'][0]['hval'])
                key2 = self.b32hexdecode(data['value'][1]['hkey'])
                val2 = self.b32hexdecode(data['value'][1]['hval'])

                self.assertTrue(key1 in dict['hash'].keys(), 'hkey(%s) is not in json output' % key1)
                self.assertTrue(key2 in dict['hash'].keys(), 'hkey(%s) is not in json output' % key2)
                self.assertEqual(dict['hash'][key1], val1,
                                 "val(%s) is not match with %s" % (dict['hash'][key1], val1))
                self.assertEqual(dict['hash'][key2], val2,
                                 "val(%s) is not match with %s" % (dict['hash'][key2], val2))

            else:
                self.assertTrue(False, "Unknown type")

        f.close()