def test_ft_port_fn_verify_shut_noshut(): if not ipapi.config_ip_addr_interface(vars.D1, interface_name=vars.D1D2P1, ip_address=intf_data.ip_address, subnet=intf_data.mask, family="ipv4", config='add'): st.report_fail("operation_failed") if not ipapi.config_ip_addr_interface(vars.D2, interface_name=vars.D2D1P1, ip_address=intf_data.ip_address1, subnet=intf_data.mask, family="ipv4", config='add'): st.report_fail("operation_failed") if not ipapi.ping(vars.D1, intf_data.ip_address1, family='ipv4', count=1): st.report_fail("ping_fail", intf_data.ip_address, intf_data.ip_address1) if not ipapi.ping(vars.D2, intf_data.ip_address, family='ipv4', count=1): st.report_fail("ping_fail", intf_data.ip_address1, intf_data.ip_address) for _ in range(3): intfapi.interface_shutdown(vars.D1, [vars.D1D2P1], skip_verify=True) intfapi.interface_noshutdown(vars.D1, [vars.D1D2P1], skip_verify=True) if not ipapi.ping(vars.D1, intf_data.ip_address1, family='ipv4', count=5): st.report_fail("ping_fail", intf_data.ip_address, intf_data.ip_address1) if not ipapi.ping(vars.D2, intf_data.ip_address, family='ipv4', count=1): st.report_fail("ping_fail", intf_data.ip_address1, intf_data.ip_address) rbapi.config_save_reload(vars.D1) if not ipapi.config_ip_addr_interface(vars.D1, interface_name=vars.D1D2P1, ip_address=intf_data.ip_address, subnet=intf_data.mask, family="ipv4", config='remove'): st.report_fail("operation_failed") if not ipapi.config_ip_addr_interface(vars.D2, interface_name=vars.D2D1P1, ip_address=intf_data.ip_address1, subnet=intf_data.mask, family="ipv4", config='remove'): st.report_fail("operation_failed") for _ in range(3): intfapi.interface_shutdown(vars.D1, [vars.D1D2P1], skip_verify=True) intfapi.interface_noshutdown(vars.D1, [vars.D1D2P1], skip_verify=True) if not st.poll_wait(intfapi.verify_interface_status, 15, vars.D1, vars.D1D2P1, "oper", "up"): st.report_fail("interface_is_down_on_dut", [vars.D1D2P1]) if not st.poll_wait(intfapi.verify_interface_status, 15, vars.D2, vars.D2D1P1, "oper", "up"): st.report_fail("interface_is_down_on_dut", [vars.D2D1P1]) st.report_pass("test_case_passed")
def test_ft_nat_config_reload(): # ################ Author Details ################ # Name: Kiran Vedula # Eamil: [email protected] # ################################################ # Objective - Verify dynamic NAPT translations after config save and reload # ################################################# nat_obj.clear_nat(vars.D1, translations=True) nat_obj.clear_nat(vars.D1, statistics=True) st.log("Config reload the DUT") reboot_obj.config_save_reload(vars.D1) st.log("Get some debug info after config reload is complete") ip_obj.show_ip_route(vars.D1) arp_obj.show_arp(vars.D1) nat_obj.show_nat_translations(vars.D1) st.wait(2) st.log("Traffic for snat case") tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) st.wait(data.wait_nat_stats) trn_val_1 = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0]) if not trn_val_1: nat_reboot_debug_fun() st.report_fail("nat_translation_table_entry_deleted_incorrectly") count = data.pkt_count trn_val = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0]) if not trn_val: nat_reboot_debug_fun() st.error("Received empty list,nat translation table not updated") st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) trn_src_ip = trn_val[0]["trn_src_ip"] trn_src_port = trn_val[0]["trn_src_ip_port"] st.log("Traffic for dnat case") tg2_str_obj = tg2_str_selector(trn_src_ip, trn_src_port) tg2.tg_traffic_control(action='run', handle=tg2_str_obj) tg2.tg_traffic_control(action='stop', handle=tg2_str_obj) st.wait(data.wait_nat_stats) nat_stats_s = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0]) if not nat_stats_s: nat_reboot_debug_fun() st.error("Received empty list,nat statistics are not updated") st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) if not int(nat_stats_s[0]['packets']) >= (0.80 * (int(count))): nat_reboot_debug_fun() st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) nat_stats_d = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=trn_src_ip, dst_ip_port=trn_src_port) if not nat_stats_d: nat_reboot_debug_fun() st.error("Received empty list, nat statistics are not updated") st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) if not int(nat_stats_d[0]['packets']) >= (0.80 * (int(count))): nat_reboot_debug_fun() st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) st.report_pass("nat_translation_successful_after_config_reload")
def test_ft_security_config_mgmt_verifying_config_with_save_reboot(): ''' Author: Sai Durga <*****@*****.**> FtOpSoScRaFn006: Verify that radius config retained after config save and reboot ''' st.log("performing Config save and reloading the device") reboot.config_save_reload(vars.D1) tacacs_config_verify() checking_radius_config(security_data.radius_host_ip) st.report_pass("security_config_retained_after_save_reboot")
def test_ft_verify_static_portchannel_funtionality_after_save_and_reboot(): ''' Author: Venkatesh Terli <*****@*****.**> Scenario - 3.3.1 Verify that the Static LAG configuration should be retained after save and reboot. ''' st.log( 'Scenario - 3.3.1 Verify that the Static LAG configuration should be retained after save and reboot.' ) st.log("performing Config save and reboot") rbobj.config_save_reload([vars.D1, vars.D2]) st.wait(10) exceptions = exec_all(True, [[ portchannelobj.verify_portchannel_state, vars.D1, static_data.portchannel_name, "up" ], [ portchannelobj.verify_portchannel_state, vars.D2, static_data.portchannel_name, "up" ]])[1] ensure_no_exception(exceptions) exceptions = exec_all(True, [[intfobj.clear_interface_counters, vars.D1], [intfobj.clear_interface_counters, vars.D2]])[1] ensure_no_exception(exceptions) static_data.tg.tg_traffic_control( action='run', stream_handle=[static_data.streams['D1T1_SD_Mac_Hash1']]) st.wait(5) static_data.tg.tg_traffic_control( action='stop', stream_handle=[static_data.streams['D1T1_SD_Mac_Hash1']]) verify_traffic_hashed_or_not( vars.D1, [vars.D1D2P1, vars.D1D2P2, vars.D1D2P3, vars.D1D2P4], 20, traffic_loss_verify=True, rx_port=vars.D1T1P1, tx_port=vars.D2T1P1, dut2=vars.D2) st.report_pass('portchannel_functionality_after_save_and_reboot')
def test_ft_erspan_config_upload_save_reload_reboot(): """ Author: Prudviraj kristipati ([email protected]) Verify that ERSPAN configuration is stored to config_db file, after config save & reload and save & reboot. :return: """ mirror.delete_session(vars.D1, data.session_name) mirror_args = {"session_name": data.session_name, "src_ip": data.ip_D1T1P1, "dst_ip": data.ip_T1D2P1, "gre_type": data.gre_type, "dscp": data.dscp, "ttl": data.ttl, "queue": data.queue} retval = mirror.create_session_table(vars.D1, **mirror_args) if not retval: st.log("Failed to create mirror session using json file.") st.report_fail("mirror_session_fail", data.session_name) if not sconf.verify_running_config(vars.D1, "MIRROR_SESSION", "Mirror_Ses", "dst_ip", "15.1.1.2"): st.log("Failed to show mirror session details in running config.") st.report_fail("test_case_failure_message", "Failed to display mirror session details in running-config.") reboot_obj.config_save_reload(vars.D1) if not mirror.verify_session(vars.D1, **mirror_args): st.log("Failed to show mirror session details after reload.") st.report_fail("mirror_session_fail", data.session_name + "after config save and reload") if not sconf.verify_running_config(vars.D1, "MIRROR_SESSION", "Mirror_Ses", "dst_ip", "15.1.1.2"): st.log("Failed to show mirror session details in running config after reload.") st.report_fail("test_case_failure_message", "Failed to display mirror session details in running-config after config save and reload") st.reboot(vars.D1) if not mirror.verify_session(vars.D1, **mirror_args): st.log("failed to show mirror session details after reboot.") st.report_fail("mirror_session_fail", data.session_name + "after save and reboot") if not sconf.verify_running_config(vars.D1, "MIRROR_SESSION", "Mirror_Ses", "dst_ip", "15.1.1.2"): st.log("failed to show mirror session details in running config after reboot.") st.report_fail("test_case_failure_message", "Failed to display mirror session details in running-config after save and reboot") st.report_pass("test_case_passed")
def test_mgmt_vrf_reboot_cfgreload(): st.banner('mgmt_vrf_reboot, mgmt_vrf_cfgreload') st.log("Config reload the DUT") report_flag = 0 config_save_reload(vars.D1) ip_addr = get_ifconfig_inet(vars.D1, 'eth0') if not ip_addr: st.report_fail('ip_verification_fail') if not ping(vars.D1, mgmt_vrf.extip, interface=mgmt_vrf.vrfname): report_flag = 1 if not ping(vars.D1, ip_addr[0], external=True): report_flag = 1 if report_flag == 1: st.report_tc_fail("ft_mgmtVrf_cfgreload", "mgmt_vrf_cfgreload_fail") st.generate_tech_support(vars.D1, "ft_mgmtVrf_cfgreload") else: st.report_tc_pass("ft_mgmtVrf_cfgreload", "mgmt_vrf_cfgreload_pass") config_save(vars.D1, "sonic") config_save(vars.D1, "vtysh") st.reboot(vars.D1) ip_addr = get_ifconfig_inet(vars.D1, 'eth0') if not ip_addr: st.report_fail('ip_verification_fail') if not ping(vars.D1, mgmt_vrf.extip, interface=mgmt_vrf.vrfname): report_flag = 2 if not ping(vars.D1, ip_addr[0], external=True): report_flag = 2 if report_flag == 2: st.report_tc_fail("ft_mgmtVrf_reboot", "mgmt_vrf_reboot_fail") st.generate_tech_support(vars.D1, "ft_mgmtVrf_reboot") else: st.report_tc_pass("ft_mgmtVrf_reboot", "mgmt_vrf_reboot_pass") if report_flag: st.report_fail("test_case_failed") else: st.report_pass("test_case_passed")
def test_ft_sf_verify_buffer_pool_counters(): """ Author : prudviraj k ([email protected]) and phani kumar ravula([email protected]) """ result = 0 per_result = 0 sf_data.platform_name_summary = get_platform_summary(vars.D1) sf_data.platform_name = sf_data.platform_name_summary["platform"] sf_data.platform_hwsku = sf_data.platform_name_summary["hwsku"] path = "/usr/share/sonic/device/{}/{}/{}".format(sf_data.platform_name, sf_data.platform_hwsku, sf_data.device_j2_file) convert_json = "sonic-cfggen -d -t " "{} > {}".format( path, sf_data.config_file) sfapi.load_json_config(vars.D1, convert_json, sf_data.config_file) reboot_api.config_save_reload(vars.D1) st.log("To make sure after reload DUT is fully operational") st.wait(sf_data.reload_interval) if not sfapi.config_snapshot_interval( vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): st.error("Failed to configure snapshot interval") result += 1 match = [{'snapshotinterval': sf_data.snapshot_interval}] if not sfapi.verify(vars.D1, 'snapshot_interval', verify_list=match): st.error("Failed to verify the configured snapshot interval") result += 1 sf_tg_traffic_start_stop(sf_data.unicast, True) st.log( "waiting for two snapshot interval times to get the counter values reflect correctly" ) st.wait(2 * sf_data.snapshot_interval) st.banner('#### buffer_Pool_for_user_watermark####') st.banner('TC name :::: ft_sf_buffer_pool_using_uwm ::::') match = {'pool': 'ingress_lossless_pool'} value = {'bytes': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_watermark', verify_list=match, key=value): st.error( "Failed to verify the buffer pool counters for user watermark") result += 1 st.report_tc_fail("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "successful") st.banner('TC name :::: ft_sf_buffer_pool_using_persistent_wm ::::') st.banner('#### buffer_pool_for_persistent_watermark using percentage####') match = {'pool': 'egress_lossless_pool'} value = {'percent': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value, percent=sf_data.percentage[0]): st.error( "Failed to verify the buffer pool counters for persistent watermark" ) result += 1 per_result += 1 st.banner('#### buffer_pool_for_persistent_watermark using CLI####') match = {'pool': 'egress_lossless_pool'} value = {'bytes': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value): st.error( "Failed to verify the buffer pool counters for persistent watermark" ) result += 1 per_result += 1 if per_result: st.report_tc_fail("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_for_persistent_watermark", "successful") st.banner('#### buffer_pool_using_counter_DB ####') st.banner('TC name :::: ft_sf_buffer_pool_using_counter_DB ::::') match = [{ 'SAI_BUFFER_POOL_STAT_WATERMARK_BYTES': sf_data.initial_counter_value }] if sfapi.verify(vars.D1, 'buffer_pool_counters_DB', oid_type='ingress_lossless_pool', verify_list=match): st.error( "Failed to verify the ingress lossless buffer pool counter using counter DB value" ) result += 1 st.report_tc_fail("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", "successful") sf_tg_traffic_start_stop(sf_data.unicast, False) st.log( "waiting for two snapshot interval times to get the counter values reflect correctly" ) st.wait(2 * sf_data.snapshot_interval) st.banner('#### clear_buffer_Pool_for_user_watermark####') st.banner('TC name :::: ft_sf_buffer_pool_clear_using_uwm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_buffer-pool watermark"): st.error("Failed to clear buffer-pool watermark") st.log( "After clear buffer_pool checking the stats with 10 cells tolerance") counters = sfapi.get(vars.D1, 'buffer_pool_watermark', get_value='bytes', match={'pool': 'ingress_lossless_pool'}) if counters > sf_data.buffer_pool_tolerance: st.error("Failed to clear the buffer pool counters for user watermark") result += 1 st.report_tc_fail("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "successful") st.banner('#### clear_buffer_pool_for_persistent_watermark ####') st.banner('TC name :::: ft_sf_buffer_pool_clear_using_persistent_wm ::::') if not sfapi.config_snapshot_interval( vars.D1, snap="clear_buffer-pool persistent-watermark"): st.error("Failed to clear_buffer-pool persistent-watermark") st.log( "After clear buffer_pool checking the stats with 10 cells tolerance") counters = sfapi.get(vars.D1, 'buffer_pool_watermark', get_value='bytes', match={'pool': 'egress_lossless_pool'}) if counters > sf_data.buffer_pool_tolerance: st.error( "Failed to clear the buffer pool counters for persistent watermark" ) result += 1 st.report_tc_fail("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_clear_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_clear_for_persistent_watermark", "successful") if not result: st.report_pass("snapshot_tc_verify", "buffer pool", "successful") else: sf_collecting_debug_logs_when_test_fails() st.report_fail("snapshot_tc_verify", "buffer pool", "failed")
def test_ft_bgp_unnumbered_nondefault_vrf(): """ # ################ Author Details ################ # Name: Sesha Reddy Koilkonda # Email: [email protected] # ################################################ :return: """ utils_obj.banner_log("FtOtSoRtBgpUnFn016,FtOtSoRtBgpUnFn017") result = 0 bgp_obj.cleanup_router_bgp(st.get_dut_names()) dict1 = {'vrf_name': data.vrf_name, 'skip_error': True} parallel.exec_parallel(True, [vars.D1, vars.D2], vrf_api.config_vrf, [dict1, dict1]) utils.exec_all(True, [[ ip_obj.config_interface_ip6_link_local, vars.D1, vars.D1D2P1, "disable" ], [ ip_obj.config_interface_ip6_link_local, vars.D2, vars.D2D1P1, "disable" ]]) dict1 = { 'vrf_name': data.vrf_name, 'intf_name': vars.D1D2P1, 'skip_error': True } dict2 = { 'vrf_name': data.vrf_name, 'intf_name': vars.D2D1P1, 'skip_error': True } parallel.exec_parallel(True, [vars.D1, vars.D2], vrf_api.bind_vrf_interface, [dict1, dict2]) utils.exec_all( True, [[ip_obj.config_interface_ip6_link_local, vars.D1, vars.D1D2P1], [ip_obj.config_interface_ip6_link_local, vars.D2, vars.D2D1P1]]) utils.exec_all(True, [[ip_obj.get_interface_ip_address, vars.D1, None, "ipv6"], [ip_obj.get_interface_ip_address, vars.D2, None, "ipv6"]]) dict1 = { 'vrf_name': data.vrf_name, 'router_id': data.d1_rid, 'local_as': data.d1_local_as, 'addr_family': 'ipv6', 'neighbor': vars.D1D2P1, 'remote_as': 'external', 'config_type_list': ["remote-as", "activate"], 'interface': vars.D1D2P1 } dict2 = { 'vrf_name': data.vrf_name, 'router_id': data.d2_rid, 'local_as': data.d2_local_as, 'addr_family': 'ipv6', 'neighbor': vars.D2D1P1, 'remote_as': 'external', 'config_type_list': ["remote-as", "activate"], 'interface': vars.D2D1P1 } parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) if not utils.poll_wait(bgp_obj.verify_bgp_summary, data.wait_timer, vars.D1, family='ipv6', shell=bgp_cli_type, neighbor=vars.D1D2P1, vrf=data.vrf_name, state='Established'): st.error( "BGP unnumbered neighbourship with the non-default vrf configuration failed." ) result += 1 st.log('######------Save and reboot------######') reboot_obj.config_save(vars.D1, "sonic") reboot_obj.config_save(vars.D1, "vtysh") st.reboot(vars.D1) if not utils.poll_wait(bgp_obj.verify_bgp_summary, data.wait_timer, vars.D1, family='ipv6', shell=bgp_cli_type, neighbor=vars.D1D2P1, vrf=data.vrf_name, state='Established'): st.error( "BGP unnumbered neighbourship with the non-default vrf configuration failed after save and reboot." ) result += 1 st.log('######------Config reload with BGP unnumbered------######') st.log("Config reload the DUT") reboot_obj.config_save_reload(vars.D1) if not utils.poll_wait(bgp_obj.verify_bgp_summary, data.wait_timer, vars.D1, family='ipv6', shell=bgp_cli_type, neighbor=vars.D1D2P1, vrf=data.vrf_name, state='Established'): st.error( "BGP unnumbered neighbourship with the non-default vrf configuration failed after config reload." ) result += 1 # unconfig part: dict1 = { 'vrf_name': data.vrf_name, 'local_as': data.d1_local_as, 'config': 'no', 'removeBGP': 'yes', 'config_type_list': ['removeBGP'] } dict2 = { 'vrf_name': data.vrf_name, 'local_as': data.d2_local_as, 'config': 'no', 'removeBGP': 'yes', 'config_type_list': ['removeBGP'] } parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) utils.exec_all(True, [[ ip_obj.config_interface_ip6_link_local, vars.D1, vars.D1D2P1, "disable" ], [ ip_obj.config_interface_ip6_link_local, vars.D2, vars.D2D1P1, "disable" ]]) dict1 = { 'vrf_name': data.vrf_name, 'intf_name': vars.D1D2P1, 'skip_error': True, 'config': 'no' } dict2 = { 'vrf_name': data.vrf_name, 'intf_name': vars.D2D1P1, 'skip_error': True, 'config': 'no' } parallel.exec_parallel(True, [vars.D1, vars.D2], vrf_api.bind_vrf_interface, [dict1, dict2]) dict1 = {'vrf_name': data.vrf_name, 'skip_error': True, 'config': 'no'} parallel.exec_parallel(True, [vars.D1, vars.D2], vrf_api.config_vrf, [dict1, dict1]) bgp_obj.cleanup_router_bgp(st.get_dut_names()) utils.exec_all( True, [[ip_obj.config_interface_ip6_link_local, vars.D1, d1_int_ipv6_list], [ip_obj.config_interface_ip6_link_local, vars.D2, d2_int_ipv6_list]]) if result == 0: st.report_pass("test_case_passed") else: st.error( "BGP unnumbered neighborship failed with the non-default vrf configuration." ) st.report_fail("test_case_failed")