def test_error_log(self): util.print_frame() cluster = self.cluster # Start test-fiall p = util.exec_proc_async('%s/.obj%d' % (constant.ARCCI_DIR, self.arch), "./test-fiall -z localhost:2181 -c %s -s 10" % cluster['cluster_name'], subprocess.PIPE, subprocess.PIPE, subprocess.PIPE); # Set up arguments server = cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Check load for i in range(20): ok = True for s in cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) time.sleep(1) # Check no error util.log(' ### BEGIN - ARCCI LOGS ### ') (stdout, stderr) = p.communicate() for line in stdout.split("\n"): util.log(line) util.log(' ### END - ARCCI LOGS ### ')
def test_a_pg_delay(self): util.print_frame() # Start load generation self.load_gen_list = {} for i in range(len(self.cluster['servers'])): arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = self.cluster['servers'][i] load_gen = LoadGenerator_ARCCI_FaultTolerance(server['id'], arc_api) load_gen.start() self.load_gen_list[i] = load_gen # Set up arguments server = self.cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Check load for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # HANG SMR smr = smr_mgmt.SMR(server['id']) ret = smr.connect(server['ip'], server['smr_mgmt_port']) self.assertEqual(ret, 0, 'failed to connect to master. %s:%d' % (server['ip'], server['smr_mgmt_port']) ) smr.write( 'fi delay sleep 1 5000\r\n' ) reply = smr.read_until( '\r\n', 1 ) if reply != None and reply.find('-ERR not supported') != -1: self.assertEqual( 0, 1, 'make sure that smr has compiled with gcov option.' ) smr.disconnect() # Check load i = 0 while i < 10: i += 1 ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing, while HANG') else: util.log('SUCCESS, loadbalancing, while HANG') for id, load_gen in self.load_gen_list.items(): util.log('err_cnt:%d' % load_gen.get_err_cnt())
def test_gateway_network_isolation(self): util.print_frame() cluster = self.cluster # Clear rules while True: out = util.sudo('iptables -t nat -D OUTPUT -d 127.0.0.100 -p tcp -j DNAT --to-destination 127.0.0.1') util.log(out) if out.succeeded == False: break while True: out = util.sudo('iptables -t nat -D PREROUTING -d 127.0.0.100 -p tcp -j DNAT --to-destination 127.0.0.1') util.log(out) if out.succeeded == False: break while True: out = util.sudo('iptables -D OUTPUT -d 127.0.0.100 -j DROP') util.log(out) if out.succeeded == False: break # Print rules out = util.sudo('iptables -L') util.log('====================================================================') util.log(out.succeeded) util.log('out : %s' % out) util.log('out.return_code : %d' % out.return_code) util.log('out.stderr : %s' % out.stderr) # Start loadgenerators self.load_gen_list = {} for i in range(len(cluster['servers'])): arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = cluster['servers'][i] load_gen = LoadGenerator_ARCCI_FaultTolerance(server['id'], arc_api) load_gen.start() self.load_gen_list[i] = load_gen # Add forwarding role (127.0.0.100 -> 127.0.0.1) out = util.sudo('iptables -t nat -A OUTPUT -d 127.0.0.100 -p tcp -j DNAT --to-destination 127.0.0.1') self.assertTrue(out.succeeded, 'add a forwarding role to iptables fail. output:%s' % out) out = util.sudo('iptables -t nat -A PREROUTING -d 127.0.0.100 -p tcp -j DNAT --to-destination 127.0.0.1') self.assertTrue(out.succeeded, 'add a forwarding role to iptables fail. output:%s' % out) # Add virtualhost information to MGMT VIRTUAL_HOST_NAME = 'virtualhost' VIRTUAL_HOST_IP = '127.0.0.100' ret = util.pm_add(VIRTUAL_HOST_NAME, VIRTUAL_HOST_IP, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'pm_add fail.') # Modify gateway information of MGMT server = cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Delete gateway ret = util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_del fail') # Add gateway ret= util.gw_add(CLUSTER_NAME, gw_id, VIRTUAL_HOST_NAME, VIRTUAL_HOST_IP, gw_port, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_add fail') # Check load balancing for i in range(5): ok = True for s in cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail') util.log('load balancing success') # Block out = util.sudo('iptables -A OUTPUT -d 127.0.0.100 -j DROP') self.assertTrue(out.succeeded, 'add a bloking role to iptables fail. output:%s' % out) # Check blocked gateway`s ops for i in range(5): ok = True tps = util.get_tps(server['ip'], server['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (server['ip'], server['gateway_port'], tps)) if tps > 10: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - blocked gateway') util.log('load balancing success - blocked gateway') # Check unblocked gateway`s ops for i in range(10): ok = True for s in cluster['servers']: if s == server: continue tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - nonblocked gateways') util.log('load balancing success - nonblocked gateways') # Unblock out = util.sudo('iptables -D OUTPUT -d 127.0.0.100 -j DROP') self.assertTrue(out.succeeded, 'delete a bloking role to iptables fail. output:%s' % out) # Check load balancing ok = False for i in xrange(5): condition = (lambda s: (s['ops'] <= 10 if s['id'] == gw_id else s['ops'] >= 50)) if util.check_ops(cluster['servers'], 'gw', condition): ok = True break time.sleep(1) self.assertTrue(ok, 'load balancing fail - all gateways after unblocking network') util.log('load balancing success - all gateways after unblocking network') server = cluster['servers'][0] # Wait until opinion for the gateway deleted. for i in xrange(5): util.log('Wait until opinions for the gateway have been deleted... %d' % i) time.sleep(1) # Delete gateway ret = util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_del fail') # Add gateway ret= util.gw_add(CLUSTER_NAME, gw_id, server['pm_name'], server['ip'], gw_port, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_add fail') # Check load balancing for i in range(10): ok = True for s in cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - all gateways after unblocking network') util.log('load balancing success - all gateways after unblocking network')
def test_zookeeper_delete_root_of_gw_znodes(self): util.print_frame() # Start load generation self.load_gen_list = {} arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = self.cluster['servers'][0] load_gen = LoadGenerator_ARCCI(server['id'], arc_api, timeout_second=10, verbose=True) load_gen.start() self.load_gen_list[0] = load_gen # Set up arguments server = self.cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Check load for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Delete root of GW znodes print 'try remove root of GW znodes' ret = util.zk_cmd('rmr /RC/NOTIFICATION/CLUSTER/%s/GW' % server['cluster_name']) ret = ret['err'] self.assertEqual(ret, '', 'failed to remove root of GW znodes, ret:%s' % ret) # Check loadbalancing for i in range(10): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps > 10: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') # Recover root of GW znodes print 'try recover GW znodes' ret = util.zk_cmd('create /RC/NOTIFICATION/CLUSTER/%s/GW test' % server['cluster_name']) ret = ret['err'] self.assertNotEqual(ret.find('Created /RC/NOTIFICATION/CLUSTER/testCluster0/GW'), -1, 'failed to create root of GW znodes, ret:%s' % ret) for s in self.cluster['servers']: path = '/RC/NOTIFICATION/CLUSTER/%s/GW/%d' % (s['cluster_name'], s['id']) cmd = 'create %s \'{"ip":"%s","port":%d}\'' % (path, s['ip'], s['gateway_port']) print cmd ret = util.zk_cmd(cmd) ret = ret['err'] self.assertNotEqual(ret.find('Created %s' % path), -1, 'failed to recover GW znode, ret:%s' % ret) # Check loadbalancing for i in range(10): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.')
def test_zookeeper_ensemble_failback(self): util.print_frame() try: api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) # Start load generation self.load_gen_list = {} for i in range(len(self.cluster['servers'])): arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = self.cluster['servers'][i] load_gen = LoadGenerator_ARCCI(server['id'], arc_api, timeout_second=10) load_gen.start() self.load_gen_list[i] = load_gen # Check load for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Stop zookeeper ensemble for zk in config.zookeeper_info: stdout, returncode = util.stop_zookeeper(zk['bin_dir']) util.log("zookeeper stop - stdout:%s" % stdout) self.assertEqual(returncode, 0, 'failed to stop zookeeper') time.sleep(1) # Check loadbalancing for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') finally: # Start zookeeper ensemble for zk in config.zookeeper_info: stdout, returncode = util.start_zookeeper(zk['bin_dir']) util.log("zookeeper start - stdout:%s" % stdout) self.assertEqual(returncode, 0, 'failed to stop zookeeper') time.sleep(1) # Check loadbalancing for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') api.destroy()
def test_gateway_upgrade(self): util.print_frame() api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) # Start load generation self.load_gen_list = {} for i in range(len(self.cluster['servers'])): arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = self.cluster['servers'][i] load_gen = LoadGenerator_ARCCI(server['id'], arc_api, timeout_second=10) load_gen.start() self.load_gen_list[i] = load_gen # Set up arguments server = self.cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Check load for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Delete gateway self.assertTrue( util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT), 'failed to delete gateway') # Check load for i in range(5): ok = True tps = util.get_tps(server['ip'], server['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (server['ip'], server['gateway_port'], tps)) if tps > 10: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Stop gateway ret = util.shutdown_gateway(gw_id, gw_port, True) self.assertEqual(ret, 0, 'failed : shutdown gateawy%d' % gw_id) # Check if gateway is deleted deleted_gw = {"ip":HOST_IP,"port":gw_port} found = check_gateway_deleted(deleted_gw, api) if not found: self.fail('FAIL, delete gateway information, gw:%s' % util.json_to_str(deleted_gw)) else: util.log('SUCCESS, delete gateway information.') # Start gateway ret = util.start_gateway( gw_id, server['ip'], MGMT_PORT, server['cluster_name'], gw_port) self.assertEqual( ret, 0, 'failed : start gateawy%d' % gw_id ) time.sleep(3) # Add gateway self.assertTrue( util.gw_add(CLUSTER_NAME, gw_id, HOST_NAME, HOST_IP, gw_port, MGMT_IP, MGMT_PORT), 'failed to add gateway') # Check if gateway is added added_gw = {"ip":HOST_IP,"port":gw_port} log_reader = LogReader(api.conf.log_file_prefix) found = False while True: line = log_reader.readline() if line == None: break if line.find(MSG_GATEWAY_ADD_ZK) == -1: continue gw = line.split('data:')[1] gw = ast.literal_eval(gw) if gw['ip'] == added_gw['ip'] and gw['port'] == added_gw['port']: found = True if not found: self.fail('FAIL, load gateway information, gw:%s' % util.json_to_str(added_gw)) else: util.log('SUCCESS, load gateway information.') # Check loadbalancing for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') api.destroy()
def _gateway_fault_failback(self, api): # Set up arguments server = self.cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Stop gateway ret = util.shutdown_gateway(gw_id, gw_port, True) self.assertEqual(ret, 0, 'failed : shutdown gateawy%d' % gw_id) time.sleep(3) # Check error saved_err_cnt = {} for i in range(len(self.load_gen_list)): saved_err_cnt[i] = 0 no_error_cnt = 0 for i in range(10): util.log('check error count loop:%d' % i) no_err = True for j in range(len(self.load_gen_list)): err_cnt = self.load_gen_list[j].get_err_cnt() if err_cnt != saved_err_cnt[j]: no_err = False util.log('saved_err_cnt:%d, err_cnt:%d' % (saved_err_cnt[j], err_cnt)) saved_err_cnt[j] = err_cnt if no_err: no_error_cnt += 1 if no_error_cnt >= 3: break time.sleep(1) self.assertTrue(no_error_cnt >= 3, 'failed to get replys from well working gateways') # Check if gateway is deleted deleted_gw = {"ip":HOST_IP,"port":gw_port} found = check_gateway_deleted(deleted_gw, api) if not found: self.fail('FAIL, delete gateway information, gw:%s' % util.json_to_str(deleted_gw)) else: util.log('SUCCESS, delete gateway information.') # Start gateway ret = util.start_gateway( gw_id, server['ip'], MGMT_PORT, server['cluster_name'], gw_port) self.assertEqual( ret, 0, 'failed : start gateawy%d' % gw_id ) time.sleep(3) # Check if gateway is added found = check_gateway_added(deleted_gw, api) if not found: self.fail('FAIL, load gateway information, gw:%s' % util.json_to_str(added_gw)) else: util.log('SUCCESS, load gateway information.') # Check loadbalancing for i in range(3): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.')
def test_argument_chekcing(self): util.print_frame() # ARC_API # Invalid zookeeper address api = ARC_API('invalidaddr:2181', CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertEqual(api.arc.value, None, 'fail, call create_zk_new with invalid zookeeper address') api.destroy() # Invalid cluster name api = ARC_API(ZK_ADDR, 'invalid_cluster_name', logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertEqual(api.arc.value, None, 'fail, call create_zk_new with invalid cluster name') api.destroy() # Null arguments api = ARC_API(None, None, logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertEqual(api.arc.value, None, 'fail, call create_zk_new with invalid zookeeper address and invalid cluster name') api.destroy() api = ARC_API(None, None, gwAddrs=None, logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertEqual(api.arc.value, None, 'fail, call crate_gw_new with invalid gateways') api.destroy() # [BEGIN] Mixed : invalid gateway address and valid gateway address s0 = self.cluster['servers'][0] s1 = self.cluster['servers'][1] servers = [s0, s1] s_no_load = self.cluster['servers'][2] gw_addrs = "%s:%d,%s:%d,123.123.123.123:8200" % (s0['ip'], s0['gateway_port'], s1['ip'], s1['gateway_port']) api = ARC_API(None, None, gwAddrs=gw_addrs, logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertNotEqual(api.arc.value, None, 'fail, call create_zk_new with mixed gateway addresses') # Start load generation self.load_gen_list = {} load_gen = LoadGenerator_ARCCI_FaultTolerance(0, api) load_gen.start() self.load_gen_list[0] = load_gen # Check loadbalancing for i in range(5): ok = True for s in servers: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') tps = util.get_tps(s_no_load['ip'], s_no_load['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s_no_load['ip'], s_no_load['gateway_port'], tps)) self.assertTrue(tps < 10, 'FAIL, loadbalancing,') # close max = len(self.load_gen_list) i = 0 while i < max: self.load_gen_list[i].quit() self.load_gen_list[i].join() self.load_gen_list.pop(i, None) i += 1 # [END] Mixed : invalid gateway address and valid gateway address # valid api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) self.assertNotEqual(api.arc.value, None, 'fail, call create_zk_new with valid arguments') rqst = api.create_request() self.assertNotEqual(rqst, None, 'fail, call create_request') # append_command ret = api.append_command(rqst, None, None) self.assertEqual(ret, -1, 'fail, call append_command with Null format and Null args.') ret = api.append_command(rqst, None, 'hahahoho') self.assertEqual(ret, -1, 'fail, call append_command with Null format') # valid set operation ret = api.append_command(rqst, 'set haha %s', 'hoho') self.assertEqual(ret, 0, 'fail, call append_command with valid arguments') ret = api.append_command(rqst, 'set %s %d', 'hoho', 1) self.assertEqual(ret, 0, 'fail call append command with valid agruments.') ret = api.append_command(rqst, 'set %d %s', 1, 'hoho') self.assertEqual(ret, 0, 'fail call append command with valid agruments.') ret = api.append_command(rqst, 'set %d %d', 2, 2) self.assertEqual(ret, 0, 'fail call append command with valid agruments.') ret = api.append_command(rqst, 'mset %d %d %s %s %f %f', 2, 2, c_char_p('mset0'), c_char_p('mset0'), c_double(1.23), c_double(1.234)) self.assertEqual(ret, 0, 'fail call append command with valid agruments.') ret = api.do_request(rqst, 3000) self.assertEqual(ret, 0, 'arguments checking fail.') i = 0 while i < 5: i += 1 be_errno, reply = api.get_reply(rqst) self.assertEqual(be_errno, 0, 'arguments checking fail.') self.assertEqual(reply[0], ARC_REPLY_STATUS) api.free_request(rqst) # valid get operation rqst = api.create_request() ret = api.append_command(rqst, 'mget %d %s %f', 2, 'mset0', c_double(1.23)) self.assertEqual(ret, 0, 'fail call append command with valid agruments.') ret = api.do_request(rqst, 3000) self.assertEqual(ret, 0, 'arguments checking fail.') be_errno, reply = api.get_reply(rqst) self.assertEqual(be_errno, 0, 'arguments checking fail.') self.assertEqual(reply[0], ARC_REPLY_ARRAY) self.assertEqual(reply[1], [(4, '2'), (4, 'mset0'), (4, '1.234000')]) api.free_request(rqst) api.destroy()
def test_no_error_scenario_and_memory_leak(self): util.print_frame() # Start with valgrind p = util.exec_proc_async('%s/.obj%d' % (constant.ARCCI_DIR, self.arch), "valgrind ./dummy-perf -z localhost:2181 -c %s -n 5 -s 60" % self.cluster['cluster_name'], subprocess.PIPE, subprocess.PIPE, subprocess.PIPE); # Set up arguments server = self.cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Check load for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Delete gateway self.assertTrue( util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT), 'failed to delete gateway') # Check load for i in range(5): ok = True tps = util.get_tps(server['ip'], server['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (server['ip'], server['gateway_port'], tps)) if tps > 10: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'failed to send requests') # Stop gateway ret = util.shutdown_gateway(gw_id, gw_port, True) self.assertEqual(ret, 0, 'failed : shutdown gateawy%d' % gw_id) time.sleep(5) # Start gateway ret = util.start_gateway( gw_id, server['ip'], MGMT_PORT, server['cluster_name'], gw_port) self.assertEqual( ret, 0, 'failed : start gateawy%d' % gw_id ) time.sleep(3) # Add gateway self.assertTrue( util.gw_add(CLUSTER_NAME, gw_id, HOST_NAME, HOST_IP, gw_port, MGMT_IP, MGMT_PORT), 'failed to add gateway') time.sleep(10) # Check loadbalancing for i in range(5): ok = True for s in self.cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) if not ok: self.fail('FAIL, loadbalancing,') else: util.log('SUCCESS, loadbalancing.') # Check no error no_memory_leak = False (stdout, stderr) = p.communicate() for line in stdout.split("\n"): print line if line.find('[ERR]') != -1: self.fail('find error, msg:%s' % line) if line.find('All heap blocks were freed -- no leaks are possible'): no_memory_leak = True self.assertTrue(no_memory_leak, 'memory leaks are possible') util.log('no leaks are possible')
def test_gateway_network_isolation(self): util.print_frame() cluster = self.cluster # Clear rules while True: if util.iptables_redirect('D', '127.0.0.100', '127.0.0.1') == False: break while True: if util.iptables_drop('D', '127.0.0.100') == False: break # Print rules util.iptables_print_list() # Start loadgenerators self.load_gen_list = {} for i in range(len(cluster['servers'])): arc_api = ARC_API(ZK_ADDR, CLUSTER_NAME, logFilePrefix = self.arcci_log, so_path = self.so_path) server = cluster['servers'][i] load_gen = LoadGenerator_ARCCI_FaultTolerance(server['id'], arc_api) load_gen.start() self.load_gen_list[i] = load_gen # Add forwarding role (127.0.0.100 -> 127.0.0.1) self.assertTrue(util.iptables_redirect('A', '127.0.0.100', '127.0.0.1'), 'add a forwarding role to iptables fail.') # Add virtualhost information to MGMT VIRTUAL_HOST_NAME = 'virtualhost' VIRTUAL_HOST_IP = '127.0.0.100' ret = util.pm_add(VIRTUAL_HOST_NAME, VIRTUAL_HOST_IP, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'pm_add fail.') # Modify gateway information of MGMT server = cluster['servers'][0] gw_id = server['id'] gw_port = server['gateway_port'] # Delete gateway ret = util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_del fail') # Add gateway ret= util.gw_add(CLUSTER_NAME, gw_id, VIRTUAL_HOST_NAME, VIRTUAL_HOST_IP, gw_port, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_add fail') # Check load balancing for i in range(5): ok = True for s in cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail') util.log('load balancing success') # Block self.assertTrue(util.iptables_drop('A', '127.0.0.100'), 'add a bloking role to iptables fail.') # Check blocked gateway`s ops for i in range(5): ok = True tps = util.get_tps(server['ip'], server['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (server['ip'], server['gateway_port'], tps)) if tps > 10: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - blocked gateway') util.log('load balancing success - blocked gateway') # Check unblocked gateway`s ops for i in range(10): ok = True for s in cluster['servers']: if s == server: continue tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - nonblocked gateways') util.log('load balancing success - nonblocked gateways') # Unblock self.assertTrue(util.iptables_drop('D', '127.0.0.100'), 'delete a bloking role to iptables fail.') # Check load balancing ok = False for i in xrange(5): condition = (lambda s: (s['ops'] <= 10 if s['id'] == gw_id else s['ops'] >= 50)) if util.check_ops(cluster['servers'], 'gw', condition): ok = True break time.sleep(1) self.assertTrue(ok, 'load balancing fail - all gateways after unblocking network') util.log('load balancing success - all gateways after unblocking network') server = cluster['servers'][0] # Wait until opinion for the gateway deleted. for i in xrange(5): util.log('Wait until opinions for the gateway have been deleted... %d' % i) time.sleep(1) # Delete gateway ret = util.gw_del(CLUSTER_NAME, gw_id, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_del fail') # Add gateway ret= util.gw_add(CLUSTER_NAME, gw_id, server['pm_name'], server['ip'], gw_port, MGMT_IP, MGMT_PORT) self.assertTrue(ret, 'gw_add fail') # Check load balancing for i in range(10): ok = True for s in cluster['servers']: tps = util.get_tps(s['ip'], s['gateway_port'], 'gw') util.log('%s:%d TPS:%d' % (s['ip'], s['gateway_port'], tps)) if tps < 50: ok = False if ok: break time.sleep(1) self.assertTrue(ok, 'load balancing fail - all gateways after unblocking network') util.log('load balancing success - all gateways after unblocking network') # Go back to initial configuration self.assertTrue(util.pm_del(MGMT_IP, MGMT_PORT, VIRTUAL_HOST_NAME), 'failed to pm_del to go back to initial configuration.')