def test_add_li_trigger(self, patch_dtcp_flowtap_client): kwargs = {"trigger_list": "trigger", "trigger_type": "interface_id"} self.assertEqual(dtcp_suites.dtcp_add_li_trigger(**kwargs), patch_dtcp_flowtap_client.return_value) kwargs["trigger_type"] = "username" self.assertEqual(dtcp_suites.dtcp_add_li_trigger(**kwargs), patch_dtcp_flowtap_client.return_value) kwargs["trigger_type"] = "session_id" self.assertEqual(dtcp_suites.dtcp_add_li_trigger(**kwargs), patch_dtcp_flowtap_client.return_value) kwargs["trigger_type"] = "ip_addr" self.assertEqual(dtcp_suites.dtcp_add_li_trigger(**kwargs), patch_dtcp_flowtap_client.return_value)
def mxvc_gres_test(**kwargs): """ GRES test for mxvc :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False) subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic gres_type: gres type, global/local/localbackup gres_method: gres method, (cli/reboot for global, cli/kernel_crash/scb_failover for local, default is cli) global_gres_method: 'cli', 'kernel_crash', 'scb_failover', default is cli post_action_wait_time: wait time in seconds, default is 600 sleep_between_iteration: wait time in seconds between iterations, default is 600 health_check: collect health check info, default is False iteration: iteration count, default is 1 :return: """ post_action_wait_time = int(kwargs.get('post_action_wait_time', 600)) sleep_between_iteration = int(kwargs.get('sleep_between_iteration', 600)) dtcp_test = kwargs.get('dtcp_test', False) device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0]) router = t.get_handle(device_id) iteration = kwargs.get('iteration', 1) gres_type = kwargs.get('gres_type', 'global') gres_method = kwargs.get('gres_method', 'cli') timeout = kwargs.get('gres_check_timeout', 1800) initial_master_re = cst.get_master_re_name(device_id) router.cli(command='show virtual-chassis status') cst.prepare_subscriber_traffic(**kwargs) cst.start_traffic() cst.get_router_sub_summary(device_id) if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() cst.check_fpc(device_id=device_id) cst.get_vcp_ports(device_id=device_id) count = 1 while count <= iteration: t.log("checking GRES ready state before test in iteration {}".format( count)) cst.check_gres_ready(device_id, check_timeout=timeout) command_list = [ 'show database-replication summary', 'show system relay group' ] ### log something before GRES for command in command_list: router.cli(command=command) health_check = kwargs.get('health_check', False) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') command_list = [ 'show chassis fpc', 'show virtual-chassis vc-port', 'show virtual-chassis status', 'show route summary', 'show route forwarding-table summary' ] ### log something before GRES for command in command_list: router.cli(command=command) t.log("starting MXVC GRES test with type {} method {}".format( gres_type, gres_method)) if gres_type == 'global': if gres_method == 'cli': t.log("Executing a Global GRES on the master by CLI.") router.cli( command= "request virtual-chassis routing-engine master switch", pattern='yes,no') router.cli(command='yes') elif gres_method == 'kernel_crash': #trigger GRES by kernel crash, old master will enter into db> and then recovered host = router.current_node.current_controller.name cst.panic_re_recover(host=host) elif gres_method == 'scb_failover': t.log("pull out scb card") input("PRESS ENTER TO CONTINUE.") time.sleep(600) t.log("push in scb card") input("PRESS ENTER TO CONTINUE.") if 'local' in gres_type: if 'localbackup' in gres_type: t.log("switch controller to the Vc-Bm before test") master_node = initial_master_re.split(sep='-')[0] backup_node = 'member1' if 'primary' in initial_master_re else 'primary' for rename in ['re0', 're1']: router.set_current_controller(system_node=backup_node, controller=rename) if router.current_node.current_controller.is_msater(): break if gres_method == 'reboot': t.log("perform GRES type {} using {}".format( gres_type, gres_method)) router.cli(command="request system reboot local", pattern='yes,no') router.cli(command='yes') if gres_method == 'cli': t.log("perform GRES type {} using {}".format( gres_type, gres_method)) resp = router.cli( command="request chassis routing-engine master switch", pattern='yes,no').resp if 'error' in resp or 'not honored' in resp: raise Exception("failed to execute local switch") router.cli(command='yes') time.sleep(post_action_wait_time) router.reconnect(all, timeout=600) master_node = router.detect_master_node() new_master_re = cst.get_master_re_name(device_id) t.log("current master re after GRES test is {}") if new_master_re == initial_master_re: if 'localbackup' not in gres_type: t.log( 'ERROR', 'GRES failed since master RE did not switch, new master is {}' .format(new_master_re)) else: t.log("localbackup switched successfully") else: t.log("GRES switched successfully, new master is {}".format( new_master_re)) master_re = new_master_re.split(sep='-')[1] router.set_current_controller(system_node=master_node, controller=master_re) BBEJunosUtil.cpu_settle(cpu_threshold=20, idle_min=int(kwargs.get('cpu_settle', '75')), dead_time=1200, interval=20) cst.check_fpc(device_id=device_id) cst.get_vcp_ports(device_id=device_id) cst.get_router_sub_summary(device_id) cst.check_link_status(router=device_id) cst.stop_traffic() cst.verify_traffic(**kwargs) if dtcp_test: dtcp.dtcp_list_li_trigger(**kwargs) count += 1 time.sleep(sleep_between_iteration) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') if dtcp_test: dtcp.dtcp_delete_li_trigger()
def mxvc_fpc_mic_reboot_test(**kwargs): """ The method simulates the event of fpc, mic or pic reboot in a MXVC setup :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False) subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic method: offline/restart/panic, default is offline component: fpc/pic/mic slot_info: (optional), fpc slot in dictionary, e.g. {'member':'member0', 'slot': '1'} iteration: iteration count post_action_wait_time: wait time in seconds, default is 600 health_check: collect health check info, default is False :return: """ iteration = kwargs.get('iteration', 1) component = kwargs.get('component', 'fpc') method = 'offline' if component != 'fpc' else kwargs.get( 'method', 'offline') action_list = ['offline', 'online'] if method == 'offline' else [method] post_action_wait_time = int(kwargs.get('post_action_wait_time', 600)) dtcp_test = kwargs.get('dtcp_test', False) device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0]) router = t.get_handle(device_id) if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() health_check = kwargs.get('health_check', False) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') cst.prepare_subscriber_traffic(**kwargs) cst.start_traffic() cst.get_router_sub_summary(device_id) pic_info = cst.get_pic_info(device_id) chosen_list = [] while len(chosen_list) < iteration: if 'slot_info' in kwargs: member = kwargs['slot_info']['member'] chosen_fpc = kwargs['slot_info']['slot'] else: member = random.choice(list(pic_info.keys())) chosen_fpc = random.choice(list(pic_info[member].keys())) pic_list = pic_info[member][chosen_fpc] chosen_mic = '0' if 'MPC' in pic_list.pop(-1): mic_list = [] for item in pic_list: if item in ['0', '1']: mic_list.append('0') if item in ['2', '3']: mic_list.append('1') chosen_mic = random.choice(mic_list) chosen_pic = random.choice(pic_list) chosen_set = (member, chosen_fpc, chosen_mic, chosen_pic) if chosen_set not in chosen_list: chosen_list.append(chosen_set) t.log("the chosen list for action {} is {}".format(method, chosen_list)) count = 1 for item in chosen_list: for action in action_list: t.log("action {} on chosen {} in iteration #{}".format( action, item, count)) if action == 'panic': router.vty(command='set parser security 10', destination='{}-fpc{}'.format(item[0], item[1])) router.vty(command='test panic', destination='{}-fpc{}'.format(item[0], item[1]), pattern='(.*)') t.log("waiting for coredump to be generated after panic test") #time.sleep(200) if component == 'fpc': command = "request chassis fpc slot {} member {} {}".format( item[1], item[0].strip('member'), action) router.cli(command=command) if component == 'mic': command = "request chassis mic mic-slot {} " \ "fpc-slot {} member {} {}".format(item[2], item[1], item[0].strip('member'), action) router.cli(command=command) if component == 'pic': command = "request chassis pic pic-slot {} fpc-slot {} member" \ " {} {}".format(item[3], item[1], item[0].strip('member'), action) router.cli(command=command) base_time = time.time() shutdown = False while True: t.log("waiting for FPC/PIC state change") time.sleep(20) resp = router.cli( command="show chassis fpc pic-status {} member {}".format( item[1], item[0].strip('member'))).resp if action in ['restart', 'panic']: if re.search(r'Offline', resp) and not shutdown: shutdown = True continue if not re.search(r'Offline', resp): t.log("action {} finished correctly on {} fpc {}" ).format(action, item[0], item[1]) break elif action == 'offline': if re.search(r'Offline', resp): t.log( "action {} started correctly on {} fpc {}".format( action, item[0], item[1])) break elif action == 'online': if not re.search(r'Offline', resp): t.log( "action {} started correctly on {} fpc {}".format( action, item[0], item[1])) break if time.time() - base_time > post_action_wait_time: raise Exception( '{} FPC {} failed to transit to the expected state'. format(item[0], item[1])) cst.prepare_subscriber_traffic(**kwargs) if dtcp_test: dtcp.dtcp_list_li_trigger(**kwargs) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') count += 1 if dtcp_test: dtcp.dtcp_delete_li_trigger()
def mxvc_blackout_period_test(**kwargs): """ The MXVC blackout period test, must have dhcp clients, which used to test blackout :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False), default is true subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds remove_traffic_after_test: remove traffic items after test, by default is True traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic :return: """ device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0]) router = t.get_handle(device_id) dtcp_test = kwargs.get('dtcp_test', False) if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() cst.prepare_subscriber_traffic(**kwargs) cst.start_traffic() initial_master_re = cst.get_master_re_name(device_id) clients_info = cst.get_router_sub_summary(device_id) t.log("subscriber info before blackout period test is {}".format( clients_info)) cst.get_vcp_ports(device_id=device_id) cst.check_fpc(device_id=device_id) timeout = kwargs.get('gres_check_timeout', 1800) t.log("releasing the dhcp susbcribers for blackout test") subs = bbe.get_subscriber_handles(protocol='dhcp') t.log("checking GRES ready state before test") cst.check_gres_ready(device_id, check_timeout=timeout) cst.cst_release_clients(subs=subs) health_check = kwargs.get('health_check', False) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') command_list = [ 'show chassis fpc', 'show virtual-chassis vc-port', 'show virtual-chassis status', 'show route summary', 'show route forwarding-table summary' ] ### log something before GRES for command in command_list: router.cli(command=command) rt_output = cst.get_rt_subs_info() base_up_subs_in_rt = rt_output['rt_sessions_up'] base_down_subs_in_rt = rt_output['rt_sessions_down'] result = cst.get_configured_subs(subs=subs) expected_subs_in_rt = result['expected_total_session_in_testers'] t.log("starting GRES, and then rebinding subscribers") router.cli(command="request virtual-chassis routing-engine master switch", pattern='yes,no') router.cli(command='yes') t.log("Start dhcp clients in blackout period") base_time = time.time() for subscriber in subs: subscriber.start() retry = 0 while retry <= kwargs.get('blackout_retry', 1): time.sleep(20) rt_output = cst.get_rt_subs_info() delta_up = rt_output['rt_sessions_up'] - base_up_subs_in_rt delta_down = rt_output['rt_sessions_down'] - base_down_subs_in_rt delta_time = time.time() - base_time if delta_up > 0: t.log("{} subscribers login in {} seconds after gres".format( delta_up, delta_time)) break else: if (expected_subs_in_rt - delta_up - delta_down) == 0 and delta_time < 1200: t.log("restart clients") for subscriber in subs: subscriber.restart() elif delta_time > 1200: t.log( 'ERROR', 'Failed to login any client in blackout time {}'.format( delta_time)) break t.log("waiting for MXVC to finish GRES switch") router.reconnect(all, timeout=600) master_node = router.detect_master_node() new_master_re = cst.get_master_re_name(device_id) if new_master_re == initial_master_re: t.log( 'ERROR', 'GRES failed since master RE did not switch, new master is {}'. format(new_master_re)) master_re = new_master_re.split(sep='-')[1] router.set_current_controller(system_node=master_node, controller=master_re) BBEJunosUtil.cpu_settle(cpu_threshold=20, idle_min=int(kwargs.get('cpu_settle', '75')), dead_time=1200, interval=20) cst.cst_start_clients(restart_unbound_only=True) if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') cst.get_vcp_ports(device_id=device_id) cst.check_fpc(device_id=device_id) cst.get_router_sub_summary(device_id) cst.stop_traffic() t.log("verify traffic after blackout test") cst.prepare_subscriber_traffic(**kwargs) if dtcp_test: t.log("remove dtcp trigger after test") dtcp.dtcp_delete_li_trigger()
def mxvc_chassis_reboot(**kwargs): """ The method simulates the event of backup/master chassis reboot in a MXVC setup which will be used by VCMasterReboot/vcStandbyREReboot/VCMasterPowerCycle/VCBackupReboot/VCBackupPowerCycle :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False) subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic chassis: reboot chassis, e.g. 'VC-M' or 'VC-B' or 'VC-STDBY-RE-ALL' method: 'cli' or 'powercycle', default is cli post_action_wait_time: wait time in seconds, default is 600 health_check: collect health check info, default is False :return: """ post_action_wait_time = int(kwargs.get('post_action_wait_time', 600)) dtcp_test = kwargs.get('dtcp_test', False) device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0]) router = t.get_handle(device_id) reboot_chassis = kwargs.get('chassis', 'VC-B').upper() method = kwargs.get('method', 'cli') health_check = kwargs.get('health_check', False) cst.prepare_subscriber_traffic(**kwargs) if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() cst.start_traffic(**kwargs) router.cli(command='show virtual-chassis vc-port') if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') master_node = router.detect_master_node() cst.check_gres_ready(device_id) ###reboot chassis REs if method == 'cli': if reboot_chassis == 'VC-STDBY-RE-ALL': router.cli( command="request system reboot all-members other-routing-engine" ) elif reboot_chassis == 'VC-M': if master_node == 'primary': slot = '0' else: slot = '1' command = "request system reboot member {} both-routing-engines".format( slot) router.cli(command=command, pattern='yes,no') router.cli(command='yes') elif reboot_chassis == 'VC-B': if master_node == 'primary': slot = '1' else: slot = '0' command = "request system reboot member {} both-routing-engines".format( slot) router.cli(command=command, pattern='yes,no') router.cli(command='yes') if method == 'powercycle': hostname = router.current_node.current_controller.name cst.power_manager(chassis=hostname, action='cycle') t.log("waiting for {} seconds after reboot".format(post_action_wait_time)) time.sleep(post_action_wait_time) router.reconnect(all) master_node = router.detect_master_node() master_re = cst.get_master_re_name(device_id).split(sep='-')[1] router.set_current_controller(system_node=master_node, controller=master_re) BBEJunosUtil.cpu_settle(cpu_threshold=20, idle_min=int(kwargs.get('cpu_settle', '75')), dead_time=1200, interval=20) router.cli(command='show virtual-chassis status') if health_check: cst.get_re_fpc_memory() hk.healthcheck_pfe_resource_monitor() hk.healthcheck_get_task_memory() hk.healthcheck_run_pfe_command(command='show heap 0 accounting pc') hk.healthcheck_run_pfe_command( command='show pfe manager session statistics') t.log('verify client count after action {}'.format(method)) cst.verify_client_count(device_id=device_id) t.log('verify client traffic after action {}'.format(method)) cst.stop_traffic(**kwargs) cst.verify_traffic(**kwargs) if dtcp_test: t.log("remove dtcp trigger after action {}".format(method)) dtcp.dtcp_delete_li_trigger()
def lns_cluster_failover_test(**kwargs): """ Bring down one LNS in LNS cluster and verify that affected subscribers can be bound on working LNS :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False) subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic post_action_wait_time: wait time in seconds, default is 600 sleep_between_iteration: wait time in seconds between iterations, default is 600 dtcp_test: True/False iteration: iteration count, default is 1 :return: """ dut_list = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)) dtcp_test = kwargs.get('dtcp_test', False) lac_device = bbe.get_devices(device_tags='lac', id_only=True) lns_device = bbe.get_devices(device_tags='lns', id_only=True) device_list = lac_device + lns_device for iteration in range(1, int(kwargs.get('iteration', 1)) + 1): cst.prepare_subscriber_traffic(**kwargs) cst.start_traffic() for device in device_list: dev1 = t.get_handle(device) dev1.cli(command="show service l2tp summary") cst.get_aaa_accounting_stats(device) lns_id = random.choice(lns_device) router = t.get_handle(lns_id) t.log("bring down interfaces on {} and reboot".format(lns_id)) command_list = [] for intf in bbe.get_interfaces(device=lns_id): command_list.append('set interfaces {} disable'.format(intf.pic)) command_list.append('commit') router.config(command_list=command_list) router.reboot(all=True) t.log("waiting for cpu settle in LNS") base_time = time.time() for device in device_list: dev1 = t.get_handle(device) BBEJunosUtil.set_bbe_junos_util_device_handle(dev1) BBEJunosUtil.cpu_settle(cpu_threshold=10, idle_min=int(kwargs.get('cpu_settle', '85')), dead_time=1200, interval=20) initial_client = 0 for dutid in dut_list: summary = cst.get_router_sub_summary(dutid) initial_client += summary['client'] while True: current_client = 0 for dutid in dut_list: summary = cst.get_router_sub_summary(dutid) current_client += summary['client'] if current_client != initial_client: initial_client = current_client else: t.log("client count {} is in stable state".format(current_client)) break if time.time() - base_time > 1200: cst.stop_traffic() raise Exception("client count is not stable after 1200s") t.log("waiting for 60s to check the subscriber count again") time.sleep(60) cst.stop_traffic() t.log("relogin unbounded clients and verify traffic") cst.cst_start_clients(restart_unbound_only=True) if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() suites.unicast_traffic_test(**kwargs) if dtcp_test: dtcp.dtcp_list_li_trigger(**kwargs) if dtcp_test: dtcp.dtcp_delete_li_trigger()
def l2tp_disconnect_test(**kwargs): """ Disconnect all L2TP sessions/tunnel on given box either by: clear services l2tp tunnel all clear services l2tp session all in yaml file, the lac/lns needs to be tagged with lac/lns, l2tp clients needs with tag l2tpx :param kwargs: device_id: device name, e.g.'r0' new_mesh: create new traffic mesh(True/False) subs: subs involved in traffic (see add_susbcriber_mesh) duration: traffic running time, by default is 60 seconds traffic_args: dictionary for creating traffic which include name, rate, frame_size, etc (check add_subscriber_mesh in cstutils.py) mininum_rx_percentage: minimum percentage for received traffic maximum_rx_percentage: maximum percentage for received traffic clear_by: session/tunnel, default is session clear_on: lac/lns, default is lns post_action_wait_time: wait time in seconds, default is 600 sleep_between_iteration: wait time in seconds between iterations, default is 600 dtcp_test: True/False check_interval: interval to check if the subs on lac/lns are synced, default is 120s check_tunnel_close: True/False, default is False iteration: iteration count, default is 1 :return: """ interval = int(kwargs.get('check_interval', 120)) check_tunnel_close = kwargs.get('check_tunnel_close', False) clear_by = kwargs.get('clear_by', 'session') clear_on = kwargs.get('clear_on', 'lns') iteration = int(kwargs.get('iteration', 1)) dut_list = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)) dtcp_test = kwargs.get('dtcp_test', False) lac_device = bbe.get_devices(device_tags='lac', id_only=True) lns_device = bbe.get_devices(device_tags='lns', id_only=True) device_list = lac_device + lns_device count = 1 while count <= iteration: cst.prepare_subscriber_traffic(**kwargs) cst.start_traffic() for device in device_list: dev1 = t.get_handle(device) dev1.cli(command="show service l2tp summary") cst.get_aaa_accounting_stats(device) t.log("clearing {} on {} in iteration {}".format(clear_by, clear_on, count)) clear_on_device = bbe.get_devices(device_tags=clear_on, id_only=True) for device in clear_on_device: dev2 = t.get_handle(device) dev2.cli(command="clear services l2tp {} all | no-more".format(clear_by)) time.sleep(90) for device in device_list: dev1 = t.get_handle(device) BBEJunosUtil.set_bbe_junos_util_device_handle(dev1) BBEJunosUtil.cpu_settle(cpu_threshold=10, idle_min=int(kwargs.get('cpu_settle', '85')), dead_time=1200, interval=20) t.log("waiting for subscriber stable after teardown l2tp subs") total_subs = bbe.get_subscriber_handles() l2tp_subs = bbe.get_subscriber_handles(tag='l2tp') for item in l2tp_subs: total_subs.remove(item) other_subs = total_subs result1 = cst.get_configured_subs(subs=other_subs) basetime = time.time() while True: try: cst.verify_client_count(subs=other_subs) except: t.log("waiting {}s to check the client counts".format(interval)) time.sleep(interval) if time.time() - basetime > 1800: raise Exception("clients failed to reach the specified count after 1800s") else: break result = cst.get_rt_subs_info() if result['rt_sessions_up'] != result1['expected_total_session_in_testers']: raise Exception("subscribers count {} in tester is not the same as expected" " {}".format(result['rt_sessions_up'], result1['expected_total_session_in_testers'])) status = True for device in dut_list: dev1 = t.get_handle(device) resp = dev1.pyez('get_l2tp_summary_information').resp tunnel_count = int(resp.findtext('l2tp-summary-table/l2tp-tunnels')) session_count = int(resp.findtext('l2tp-summary-table/l2tp-sessions')) if clear_by == 'tunnel' and tunnel_count > 0: status = False t.log('WARN', 'failed to clear tunnel in iteration {} clear by {}'.format(count, clear_by)) if clear_by == 'session' and check_tunnel_close and tunnel_count > 0: status = False t.log('WARN', 'failed to clear tunnel in iteration {} clear by {}'.format(count, clear_by)) if session_count > 0: status = False t.log('WARN', 'failed to clear session in iteration {} clear by {}'.format(count, clear_by)) cst.get_aaa_accounting_stats(device) if not status: raise Exception("failed to clear session or tunnel in iteration {}".format(count)) cst.stop_traffic() if dtcp_test: dtcp.dtcp_delete_li_trigger() dtcp.dtcp_add_li_trigger() cst.cst_start_clients(subs=l2tp_subs, restart_unbound_only=True) suites.unicast_traffic_test(**kwargs) t.log("l2tp disconnect test finished in iteration #{}".format(count)) count += 1