def test_ecmp_vxlan_func003(): tc_list = ['FtOpSoRoLBFunc004', 'FtOpSoRoLBFunc008', 'FtOpSoRoLBFunc010'] st.banner( "Testcase: Verify IPv4 and IPv6 ECMP Loadbalance and Hash polarization in VxLAN topology.\n TCs:{}." .format(tc_list)) retvar = True fail_msgs = '' tc_res = {} for tc in tc_list: tc_res[tc] = True spine1 = data.dut1 spine2 = data.dut2 leaf1 = data.dut3 leaf2 = data.dut4 leaf3 = data.dut5 vxlan_tolerance = 30 st.banner("Step T1: Verify default show cli.") def f3_t1_1(): res1 = ip.verify_ip_loadshare(spine1, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T1 Default show failed on spine1." st.log(fail_msg) return False return True def f3_t1_2(): res1 = ip.verify_ip_loadshare(spine2, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T1 Default show failed on spine2." st.log(fail_msg) return False return True def f3_t1_3(): res1 = ip.verify_ip_loadshare(leaf1, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T1 Default show failed on leaf1." st.log(fail_msg) return False return True def f3_t1_4(): res1 = ip.verify_ip_loadshare(leaf2, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T1 Default show failed on leaf2." st.log(fail_msg) return False return True def f3_t1_5(): res1 = ip.verify_ip_loadshare(leaf3, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T1 Default show failed on leaf3." st.log(fail_msg) return False return True [res, _] = utils.exec_all( True, [[f3_t1_1], [f3_t1_2], [f3_t1_3], [f3_t1_4], [f3_t1_5]]) if False in set(res): fail_msg = "ERROR: Step T1 show cli for default values failed." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = False retvar = False ''' st.log("Step T1a: Fine tuning the config and verification.") f3_f2_1=lambda x: intf.interface_shutdown(spine1, [vars.D1D3P2, vars.D1D4P2, vars.D1D5P2]) f3_f2_2=lambda x: intf.interface_shutdown(spine2, [vars.D2D3P2, vars.D2D4P2, vars.D2D5P2]) [res, _] = utils.exec_all(True, [[f3_f2_1, 1], [f3_f2_2, 1]]) st.wait(waitvar) ''' st.banner("Step T2: Start all IPv4 and IPv6 Streams.") tg_v4s = [tg_l3l1_1['stream_id']] tg_v6s = [tg_l3l1_6_1['stream_id']] tg_strs = tg_v4s + tg_v6s f3_f1_1 = lambda x: intf.clear_interface_counters(spine1) f3_f1_2 = lambda x: intf.clear_interface_counters(spine2) f3_f1_3 = lambda x: intf.clear_interface_counters(leaf1) f3_f1_4 = lambda x: intf.clear_interface_counters(leaf2) f3_f1_5 = lambda x: intf.clear_interface_counters(leaf3) [res, _] = utils.exec_all( True, [[f3_f1_1, 1], [f3_f1_2, 1], [f3_f1_3, 1], [f3_f1_4, 1], [f3_f1_5, 1]]) tg.tg_traffic_control(action='clear_stats', port_handle=tg_all) res = tg.tg_traffic_control(action='run', handle=tg_strs) st.wait(waitvar) res = tg.tg_traffic_control(action='stop', handle=tg_strs) st.wait(waitvar / 2) st.banner("Step T3: Verify ECMP.") leaf_tg_ports = [[leaf1, vars.D3T1P1], [leaf1, vars.D3T1P2], [leaf2, vars.D4T1P1], [leaf2, vars.D4T1P2]] l3_spine_ports = [[leaf3, vars.D5D1P1], [leaf3, vars.D5D1P2], [leaf3, vars.D5D2P1], [leaf3, vars.D5D2P2]] spine_leaf_ports = [[spine1, vars.D1D3P1], [spine1, vars.D1D3P2], [spine1, vars.D1D4P1], [spine1, vars.D1D4P2], [spine2, vars.D2D3P1], [spine2, vars.D2D3P2], [spine2, vars.D2D4P1], [spine2, vars.D2D4P2]] res1 = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0.25, 0.25, 0.25, 0.25]], clear_save=True, tolerance=vxlan_tolerance) res2 = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=l3_spine_ports, ratio=[[1], [0.25, 0.25, 0.25, 0.25]], saved_flag=True, tolerance=vxlan_tolerance) res3 = verify_intf_counters( rx=[[leaf3, vars.D5T1P1]], tx=spine_leaf_ports, ratio=[[1], [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125]], saved_flag=True, tolerance=vxlan_tolerance) st.log("Step T3: res1={}, res2={}, res3={} - all should be True".format( res1, res2, res3)) if res1 is False or res2 is False or res3 is False: fail_msg = "ERROR: Step T3 Initial ECMP failed." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = tc_res[tc_list[1]] = False retvar = False gen_tech_supp(filename='f3_t3_') more_debugs(duts=dut_list) st.banner("Step T4: Remove dst-ip on leaf3, src-l4-port from other duts.") def f3_t4_1(): ip.config_ip_loadshare_hash(spine1, key='ip', val=ecmpv4[3], config='no') ip.config_ip_loadshare_hash(spine1, key='ipv6', val=ecmpv6[3], config='no') res1 = ip.verify_ip_loadshare(spine1, ip=ecmpv4[3], ipv6=ecmpv6[3]) if res1 is True: fail_msg = "ERROR: Step T4 show failed on spine1." st.log(fail_msg) return False return True def f3_t4_2(): ip.config_ip_loadshare_hash(spine2, key='ip', val=ecmpv4[3], config='no') ip.config_ip_loadshare_hash(spine2, key='ipv6', val=ecmpv6[3], config='no') res1 = ip.verify_ip_loadshare(spine2, ip=ecmpv4[3], ipv6=ecmpv6[3]) if res1 is True: fail_msg = "ERROR: Step T4 show failed on spine2." st.log(fail_msg) return False return True def f3_t4_3(): ip.config_ip_loadshare_hash(leaf1, key='ip', val=ecmpv4[3], config='no') ip.config_ip_loadshare_hash(leaf1, key='ipv6', val=ecmpv6[3], config='no') res1 = ip.verify_ip_loadshare(leaf1, ip=ecmpv4[3], ipv6=ecmpv6[3]) if res1 is True: fail_msg = "ERROR: Step T4 show failed on leaf1." st.log(fail_msg) return False return True def f3_t4_4(): ip.config_ip_loadshare_hash(leaf2, key='ip', val=ecmpv4[3], config='no') ip.config_ip_loadshare_hash(leaf2, key='ipv6', val=ecmpv6[3], config='no') res1 = ip.verify_ip_loadshare(leaf2, ip=ecmpv4[3], ipv6=ecmpv6[3]) if res1 is True: fail_msg = "ERROR: Step T4 show failed on leaf2." st.log(fail_msg) return False return True def f3_t4_5(): ip.config_ip_loadshare_hash(leaf3, key='ip', val=ecmpv4[1], config='no') ip.config_ip_loadshare_hash(leaf3, key='ipv6', val=ecmpv6[1], config='no') res1 = ip.verify_ip_loadshare(leaf3, ip=ecmpv4[1], ipv6=ecmpv6[1]) if res1 is True: fail_msg = "ERROR: Step T4 show failed on leaf3." st.log(fail_msg) return False return True [res, _] = utils.exec_all( True, [[f3_t4_1], [f3_t4_2], [f3_t4_3], [f3_t4_4], [f3_t4_5]]) if False in set(res): fail_msg = "ERROR: Step T4 show cli failed." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = tc_res[tc_list[1]] = False retvar = False st.wait(waitvar, "Waiting for hardware programming to complete.") st.banner("Step T5: Start the Streams - IPv4.") [res, _] = utils.exec_all( True, [[f3_f1_1, 1], [f3_f1_2, 1], [f3_f1_3, 1], [f3_f1_4, 1], [f3_f1_5, 1]]) tg.tg_traffic_control(action='clear_stats', port_handle=tg_all) res = tg.tg_traffic_control(action='run', handle=tg_v4s) st.wait(waitvar) res = tg.tg_traffic_control(action='stop', handle=tg_v4s) st.wait(waitvar / 2) st.banner("Step T6: Verify no ECMP at leaf1 and leaf2 - IPv4.") res1a = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [1, 0, 0, 0]], clear_save=True) res1b = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 1, 0, 0]], saved_flag=True) res1c = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 0, 1, 0]], saved_flag=True) res1d = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 0, 0, 1]], saved_flag=True) st.log( "Step T6: tx11={}, tx12={}, tx21={}, tx22={} - only one of these should be True" .format(res1a, res1b, res1c, res1d)) res1 = len([r for r in [res1a, res1b, res1c, res1d] if r is True]) ratio_list = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] res2_list = [] for r_l in ratio_list: res = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=l3_spine_ports, ratio=[[1], r_l], saved_flag=True) res2_list.append(res) st.log( "Step T6: L3S1P1,L3S1P2,L3S2P1,L3S2P2={} - only one of these should be True" .format(res2_list)) res2 = len([r for r in res2_list if r is True]) ratio_list3 = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1]] res3_list = [] for r_l in ratio_list3: res = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=spine_leaf_ports, ratio=[[1], r_l], saved_flag=True) res3_list.append(res) st.log( "Step T6: S1L1P1,S1L1P2,S1L2P1,S1L2P2,S2L1P1,S2L1P2,S2L2P1,S2L2P2={} - only one of these should be True" .format(res3_list)) res3 = len([r for r in res3_list if r is True]) st.log("Step T8: res1={}, res2={}, res3={} - all should be 1".format( res1, res2, res3)) if res1 != 1 or res2 != 1 or res3 != 1: st.log("Step T6: res1={}, res2={}, res3={} - all should be 1".format( res1, res2, res3)) st.log( "Step T6: tx11={}, tx12={}, tx21={}, tx22={} - only one of these should be True" .format(res1a, res1b, res1c, res1d)) st.log( "Step T6: L3S1P1,L3S1P2,L3S2P1,L3S2P2={} - only one of these should be True" .format(res2_list)) st.log( "Step T6: S1L1P1,S1L1P2,S1L2P1,S1L2P2,S2L1P1,S2L1P2,S2L2P1,S2L2P2={} - only one of these should be True" .format(res3_list)) fail_msg = "ERROR: Step T6 IPv4 - ECMP still working even after disabling." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = False retvar = False st.banner("Step T7: Start the Streams - IPv6.") [res, _] = utils.exec_all( True, [[f3_f1_1, 1], [f3_f1_2, 1], [f3_f1_3, 1], [f3_f1_4, 1], [f3_f1_5, 1]]) tg.tg_traffic_control(action='clear_stats', port_handle=tg_all) res = tg.tg_traffic_control(action='run', handle=tg_v6s) st.wait(waitvar) res = tg.tg_traffic_control(action='stop', handle=tg_v6s) st.wait(waitvar / 2) st.banner("Step T8: Verify no ECMP at leaf1 and leaf2 - IPv6.") res1a = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [1, 0, 0, 0]], clear_save=True) res1b = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 1, 0, 0]], saved_flag=True) res1c = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 0, 1, 0]], saved_flag=True) res1d = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0, 0, 0, 1]], saved_flag=True) st.log( "Step T8: tx11={}, tx12={}, tx21={}, tx22={} - only one of these should be True" .format(res1a, res1b, res1c, res1d)) res1 = len([r for r in [res1a, res1b, res1c, res1d] if r is True]) res2_list = [] for r_l in ratio_list: res = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=l3_spine_ports, ratio=[[1], r_l], saved_flag=True) res2_list.append(res) st.log( "Step T8: L3S1P1,L3S1P2,L3S2P1,L3S2P2={} - only one of these should be True" .format(res2_list)) res2 = len([r for r in res2_list if r is True]) res3_list = [] for r_l in ratio_list3: res = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=spine_leaf_ports, ratio=[[1], r_l], saved_flag=True) res3_list.append(res) st.log( "Step T8: S1L1P1,S1L1P2,S1L2P1,S1L2P2,S2L1P1,S2L1P2,S2L2P1,S2L2P2={} - only one of these should be True" .format(res3_list)) res3 = len([r for r in res3_list if r is True]) st.log("Step T8: res1={}, res2={}, res3={} - all should be 1".format( res1, res2, res3)) if res1 != 1 or res2 != 1 or res3 != 1: st.log("Step T8: res1={}, res2={}, res3={} - all should be 1".format( res1, res2, res3)) st.log( "Step T8: tx11={}, tx12={}, tx21={}, tx22={} - only one of these should be True" .format(res1a, res1b, res1c, res1d)) st.log( "Step T8: L3S1P1,L3S1P2,L3S2P1,L3S2P2={} - only one of these should be True" .format(res2_list)) st.log( "Step T8: S1L1P1,S1L1P2,S1L2P1,S1L2P2,S2L1P1,S2L1P2,S2L2P1,S2L2P2={} - only one of these should be True" .format(res3_list)) fail_msg = "ERROR: Step T8 IPv6 - ECMP still working even after disabling." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[1]] = False retvar = False if retvar is False: gen_tech_supp(filename='f3_t8_') more_debugs(duts=dut_list) st.banner("Step T10: Reconfigure the ECMP params.") def f3_t10_1(): ip.config_ip_loadshare_hash(spine1, key='ip', val=ecmpv4[3]) ip.config_ip_loadshare_hash(spine1, key='ipv6', val=ecmpv6[3]) res1 = ip.verify_ip_loadshare(spine1, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T10 show failed on spine1." st.log(fail_msg) return False return True def f3_t10_2(): ip.config_ip_loadshare_hash(spine2, key='ip', val=ecmpv4[3]) ip.config_ip_loadshare_hash(spine2, key='ipv6', val=ecmpv6[3]) res1 = ip.verify_ip_loadshare(spine2, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T10 show failed on spine2." st.log(fail_msg) return False return True def f3_t10_3(): ip.config_ip_loadshare_hash(leaf1, key='ip', val=ecmpv4[3]) ip.config_ip_loadshare_hash(leaf1, key='ipv6', val=ecmpv6[3]) res1 = ip.verify_ip_loadshare(leaf1, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T10 show failed on leaf1." st.log(fail_msg) return False return True def f3_t10_4(): ip.config_ip_loadshare_hash(leaf2, key='ip', val=ecmpv4[3]) ip.config_ip_loadshare_hash(leaf2, key='ipv6', val=ecmpv6[3]) res1 = ip.verify_ip_loadshare(leaf2, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T10 show failed on leaf2." st.log(fail_msg) return False return True def f3_t10_5(): ip.config_ip_loadshare_hash(leaf3, key='ip', val=ecmpv4[1]) ip.config_ip_loadshare_hash(leaf3, key='ipv6', val=ecmpv6[1]) res1 = ip.verify_ip_loadshare(leaf3, ip=ecmpv4, ipv6=ecmpv6, seed=data.seed_def) if res1 is False: fail_msg = "ERROR: Step T10 show failed on leaf3." st.log(fail_msg) return False return True [res, _] = utils.exec_all( True, [[f3_t10_1], [f3_t10_2], [f3_t10_3], [f3_t10_4], [f3_t10_5]]) if False in set(res): fail_msg = "ERROR: Step T10 show cli failed." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = tc_res[tc_list[1]] = tc_res[tc_list[2]] = False retvar = False st.wait(waitvar, "Waiting for hardware programming to complete.") st.banner("Step T11: Start the Streams.") [res, _] = utils.exec_all( True, [[f3_f1_1, 1], [f3_f1_2, 1], [f3_f1_3, 1], [f3_f1_4, 1], [f3_f1_5, 1]]) tg.tg_traffic_control(action='clear_stats', port_handle=tg_all) res = tg.tg_traffic_control(action='run', handle=tg_strs) st.wait(waitvar) res = tg.tg_traffic_control(action='stop', handle=tg_strs) st.wait(waitvar / 2) st.banner("Step T12: Verify ECMP is restored.") res1 = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=leaf_tg_ports, ratio=[[1], [0.25, 0.25, 0.25, 0.25]], clear_save=True, tolerance=vxlan_tolerance) res2 = verify_intf_counters(rx=[[leaf3, vars.D5T1P1]], tx=l3_spine_ports, ratio=[[1], [0.25, 0.25, 0.25, 0.25]], saved_flag=True, tolerance=vxlan_tolerance) res3 = verify_intf_counters( rx=[[leaf3, vars.D5T1P1]], tx=spine_leaf_ports, ratio=[[1], [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125]], saved_flag=True, tolerance=vxlan_tolerance) st.log("Step T12: res1={}, res2={}, res3={} - all should be True".format( res1, res2, res3)) if res1 is False or res2 is False or res3 is False: fail_msg = "ERROR: Step T12 ECMP restoration failed." fail_msgs += fail_msg st.log(fail_msg) tc_res[tc_list[0]] = tc_res[tc_list[1]] = tc_res[tc_list[2]] = False retvar = False gen_tech_supp(filename='f3_t12_') more_debugs(duts=dut_list) for tc in tc_list: if tc_res[tc]: st.report_tc_pass(tc, "tc_passed") if retvar is False: st.report_fail("test_case_failure_message", fail_msgs) st.report_pass("test_case_passed")
def evpn_underlay_hooks(request): global vars create_glob_vars() vars = st.get_testbed_vars() if st.get_ui_type() == 'click': st.report_unsupported("test_execution_skipped", "Not supported for ui_type - click") api_list = [[create_evpn_5549_config]] parallel.exec_all(True, api_list, True) create_stream() st.log("verify MC LAG status in LVTEP nodes") mclag.verify_domain( evpn_dict["leaf_node_list"][0], domain_id=evpn_dict["l3_vni_sag"]['mlag_domain_id'], session_status='OK', local_ip=evpn_dict['leaf1']['iccpd_ip_list'][0], peer_ip=evpn_dict['leaf1']['iccpd_ip_list'][1], mclag_intfs=1, peer_link_inf=evpn_dict["leaf1"]["iccpd_pch_intf_list"][0], node_role='Active') mclag.verify_domain( evpn_dict["leaf_node_list"][1], domain_id=evpn_dict["l3_vni_sag"]['mlag_domain_id'], session_status='OK', local_ip=evpn_dict['leaf2']['iccpd_ip_list'][0], peer_ip=evpn_dict['leaf2']['iccpd_ip_list'][1], mclag_intfs=1, peer_link_inf=evpn_dict["leaf2"]["iccpd_pch_intf_list"][0], node_role='Standby') st.log("verify MC LAG interface status in LVTEP nodes") mclag.verify_interfaces( evpn_dict["leaf_node_list"][0], domain_id=evpn_dict["l3_vni_sag"]['mlag_domain_id'], mclag_intf=evpn_dict["leaf1"]["mlag_pch_intf_list"][0], mclag_intf_local_state="Up", mclag_intf_peer_state="Up", mclag_intf_l3_status='No', isolate_peer_link='Yes', traffic_disable='No') mclag.verify_interfaces( evpn_dict["leaf_node_list"][1], domain_id=evpn_dict["l3_vni_sag"]['mlag_domain_id'], mclag_intf=evpn_dict["leaf2"]["mlag_pch_intf_list"][0], mclag_intf_local_state="Up", mclag_intf_peer_state="Up", mclag_intf_l3_status='No', isolate_peer_link='Yes', traffic_disable='No') st.log("verify BGP EVPN neighborship for all nodes ") result = st.exec_all([[spine1_verify_evpn], [spine2_verify_evpn], [leaf1_verify_evpn], [leaf2_verify_evpn], [leaf3_verify_evpn]]) if result[0].count(False) > 0: st.error( "########## BGP EVPN neighborship is NOT UP on all spine and leaf nodes; Abort the suite ##########" ) st.report_fail("base_config_verification_failed") st.log("verify vxlan tunnel status on leaf nodes") result = st.exec_all([[leaf1_verify_vxlan], [leaf2_verify_vxlan], [leaf3_verify_vxlan]]) if result[0].count(False) > 0: st.error( "########## VxLAN tunnel status is NOT up on all leaf nodes; Abort the suite ##########" ) st.report_fail("base_config_verification_failed") get_mlag_active_stdby('Active') st.exec_all([[reboot_api.config_save, data['active']], [reboot_api.config_save, data['stdby']], [reboot_api.config_save, evpn_dict['spine_node_list'][0]]]) st.exec_all( [[reboot_api.config_save, data['active'], 'vtysh'], [reboot_api.config_save, data['stdby'], 'vtysh'], [reboot_api.config_save, evpn_dict['spine_node_list'][0], 'vtysh']]) data.config_tgen_bgp = False yield
def test_convergence_ecmp(ecmp_fixture): func_result = True table = {} ecmp_intf_list = [ evpn_dict["leaf3"]["pch_intf_list"][1], evpn_dict["leaf3"]["intf_list_spine"][4], evpn_dict["leaf3"]["intf_list_spine"][7] ] port_flap_list = evpn_dict["spine2"]["intf_list_leaf"][8:] port_flap_list_2 = evpn_dict["leaf3"]["intf_list_spine"][4:] header1 = [ 'link_{}_shut'.format(i + 1) for i in range(len(port_flap_list[:-1])) ] header2 = [ 'link_{}_noshut'.format(i + 1) for i in range(len(port_flap_list[::-1][1:])) ] header1[1] = 'link_2_shut(Po member port)' header2[1] = 'link_2_noshut(Po member port)' data['table_header'] = ['ECMP'] + header1 + header2 ################################################ st.banner( "Verify Traffic is getting hashed along all ECMP paths between Leaf3-SPine2" ) ################################################ start_traffic(stream_han_list=stream_dict['scale']) st.wait(10, 'Wait for 10 sec before checking ECMP hashing') if not retry_api(verify_ecmp_hashing, evpn_dict['leaf_node_list'][2], ecmp_intf_list): err = 'Traffic hashing did not happen across ecmp paths from Leaf3 to SPine2' st.report_fail('test_case_failure_message', err) start_traffic(action='stop', stream_han_list=stream_dict['scale']) for dut, port_list in zip( [evpn_dict['spine_node_list'][1], evpn_dict['leaf_node_list'][2]], [port_flap_list, port_flap_list_2]): st.banner(">>> ECMP test with port flap on DUT {} <<<<".format(dut)) data['table_data'] = [] for iter in range(int(data.iteration_count)): st.banner("\n\n >>>>> Iteration : {} <<<<<\n\n".format(iter + 1)) result = convergence_ecmp(dut, port_list, streams=stream_dict['scale'], iteration=(iter + 1)) data['table_data'].append(data['table_data_{}'.format(iter + 1)]) if not result: func_result = False table[dut] = tabulate(data['table_data'], headers=data['table_header'], tablefmt="grid") for dut in [ evpn_dict['spine_node_list'][1], evpn_dict['leaf_node_list'][2] ]: st.log( "\n\n>>>> ECMP Convergence Table with port flap done on {} <<<<<\n\n" .format(dut)) st.log("\n\n" + table[dut]) if not func_result: st.report_fail('test_case_failure_message', 'ECMP convergence test failed') st.report_pass('test_case_passed')
def test_vrrp2vrf_func_003(prologue_epilogue): tc_list = [ "FtOpSoRoVrrpvrfFn009", "FtOpSoRoVrrpvrfFn010", "FtOpSoRoVrrpvrfFn011", "FtOpSoRoVrrpvrfFn012" ] tc_result = True err_list = [] ########################################################### hdrMsg( "Step T1 : Verify VRRP Master/Backup election for {} configured sessions" .format(vrrp_sessions)) ############################################################ result = verify_vrrp(summary='no') if result is False: err = "VRRP Master/Backup election is incorrect for one or more VRRP sessions" debug_vrrp() st.report_fail('test_case_failure_message', err) ########################################################### hdrMsg( "Step T2 : Change the advertisement interval on dut2 (Backup) - {} for VRRP session {})" .format(data.dut2, vrid_list[0])) ############################################################ vrrp.configure_vrrp(data.dut2, vrid=vrid_list[0], interface=dut1_vlan_intf[0], adv_interval=2) ########################################################### hdrMsg( "Step T3 : Verify the advertisement interval is set to 2 seconds on dut2 (Backup) - {} for VRRP session {})" .format(data.dut2, vrid_list[0])) ############################################################ vrrp.verify_vrrp(data.dut2, state='Backup', vrid=vrid_list[0], interface=dut1_vlan_intf[0], adv_interval=2) run_traffic() st.wait(2) ########################################################### hdrMsg( "Step T4:Verify for VRRP sessions {} ,Master DUT {} forwards data traffic" .format(vrid_list[0:int(vrrp_sessions / 2)], data.dut1)) ########################################################### result = verify_tg_traffic_rate(src_tg_obj=data.tg1, dest_tg_obj=data.tg2, src_port=data.tgd3_ports, dest_port=data.tgd4_ports) if result is False: err = "Testcase {} Master DUT {} not forwarding data traffic for VRIDs {}".format( tc_list[0], data.dut1, vrid_list[0:int(vrrp_sessions / 2)]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T5 : Change the advertisement interval on dut1 (Master) - {} for VRRP session {} to 2 as well)" .format(data.dut1, vrid_list[0])) ############################################################ vrrp.configure_vrrp(data.dut1, vrid=vrid_list[0], interface=dut1_vlan_intf[0], adv_interval=2) ########################################################### hdrMsg( "Step T6 : Verify VRRP Master/Backup election for all {} configured sessions" .format(vrrp_sessions)) ############################################################ result = verify_vrrp(summary='no') if result is False: err = "VRRP Master/Backup election is incorrect for one or more VRRP sessions" failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T7 : Revert back the advertisement interval on both the nodes to 1 seconds" ) ############################################################ dict1 = { 'vrid': vrid_list[0], 'interface': dut1_vlan_intf[0], 'adv_interval': 1 } parallel.exec_parallel(True, [data.dut1, data.dut2], vrrp.configure_vrrp, [dict1, dict1]) ########################################################### hdrMsg( "Step T8 : Verify VRRP Master/Backup election for all {} configured sessions" .format(vrrp_sessions)) ############################################################ result = verify_vrrp(summary='no') if result is False: err = "VRRP Master/Backup election is incorrect for one or more VRRP sessions" failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T9 : Set the priority on the backup to higher with preemption mode set to false on {} session" .format(vrid_list[0])) ############################################################ vrrp.configure_vrrp(data.dut2, vrid=vrid_list[0], interface=dut1_vlan_intf[0], preempt="disable", config="no") vrrp.configure_vrrp(data.dut2, vrid=vrid_list[0], interface=dut1_vlan_intf[0], priority=230) ########################################################### hdrMsg( "Step T10 : Verify VRRP Backup remains in backup state even though it has high priority for the {} session" .format(vrid_list[0])) ############################################################ result = vrrp.verify_vrrp(data.dut2, state='Backup', vrid=vrid_list[0], interface=dut1_vlan_intf[0], current_prio=230, preempt="disabled") if result is False: err = "Testcase {} , after disabling preempt on {} the VRRP Master/Backup election is incorrect for the vrrp session {}".format( tc_list[1], data.dut2, vrid_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False result = verify_tg_traffic_rate(src_tg_obj=data.tg1, dest_tg_obj=data.tg2, src_port=data.tgd3_ports, dest_port=data.tgd4_ports) if result is False: err = "Testcase {} Master DUT {} not forwarding data traffic for VRIDs {}".format( tc_list[0], data.dut1, vrid_list[0:int(vrrp_sessions / 2)]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T11 : Enable the preempt and verify the backup takes over the master on {} session" .format(vrid_list[0])) ############################################################ vrrp.configure_vrrp(data.dut2, vrid=vrid_list[0], interface=dut1_vlan_intf[0], preempt="enable") st.wait(3) ########################################################### hdrMsg( "Step T12:Verify for VRRP sessions {} ,Master DUT {} forwards data traffic" .format(vrid_list[int(vrrp_sessions / 2):], data.dut2)) ########################################################### result = verify_tg_traffic_rate(src_tg_obj=data.tg1, dest_tg_obj=data.tg2, src_port=data.tgd3_ports, dest_port=data.tgd4_ports) if result is False: err = "Testcase {} Master DUT {} not forwarding data traffic for VRIDs {}".format( tc_list[0], data.dut2, vrid_list[0:int(vrrp_sessions / 2)]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False run_traffic(action='stop') revert_vrrp() if tc_result is False: st.report_fail('test_case_failure_message', err_list[0]) else: st.report_pass('test_case_passed')
def test_vrrp2vrf_func_002(prologue_epilogue): tc_list = [ "FtOpSoRoVrrpvrfFn006", "FtOpSoRoVrrpvrfFn007", "FtOpSoRoVrrpvrfFn008" ] tc_result = True err_list = [] ########################################################### hdrMsg( "Step T1 : Configure Virtual IP {} for vrid {} same as that of vrid {} and verify Cli gets " "rejected".format(vip_list[0], '100', vrid_list[0])) ############################################################ result = vrrp.configure_vrrp(data.dut1, vrid='100', interface=dut1_vlan_intf[1], vip=vip_list[0], enable='', skip_error=True) expected_err = "Error" if expected_err not in str(result): err = "Testcase {} Same VIP {} accepted for two different VRRP sessions ".format( tc_list[1], vip_list[0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) st.log("Start Traffic for VRRP instance {}".format(vrid_list[0])) run_traffic( stream_handle=data.stream_handles['vrrp_{}'.format(vrid_list[0])]) ########################################################### hdrMsg("Step T2 : Configure secondary ip {} to VRRP {} on dut1".format( vrrp_sec_ip_list[0], vrrp_vlan_intf[0])) ############################################################ ip_api.config_ip_addr_interface(data.dut1, vrrp_vlan_intf[0], vrrp_sec_ip_list[0], 30, is_secondary_ip='yes') ########################################################### hdrMsg("Step T3 : Configure secondary ip {} as Virtual ip for vrrp " "session {} on vlan {}".format(vrrp_sec_ip_list[0], vrid_list[0], vrrp_vlan_intf[0])) ############################################################ st.log( "Remove old vritual-ip {} first before configuring secondar ip as virtual-ip" .format(vip_list[0])) dict1 = { 'vrid': vrid_list[0], 'vip': vip_list[0], 'interface': vrrp_vlan_intf[0], 'config': 'no' } parallel.exec_parallel(True, [data.dut1, data.dut2], vrrp.configure_vrrp, [dict1, dict1]) st.log( "Configure secondary ip address {} as virtual-ip on both DUTs".format( vrrp_sec_ip_list[0])) vrrp.configure_vrrp(data.dut1, vrid=vrid_list[0], interface=vrrp_vlan_intf[0], priority='100') dict1 = { 'vrid': vrid_list[0], 'vip': vrrp_sec_ip_list[0], 'interface': vrrp_vlan_intf[0] } parallel.exec_parallel(True, [data.dut1, data.dut2], vrrp.configure_vrrp, [dict1, dict1]) ########################################################### hdrMsg( "Step T4 : Verify secondary ip elected as VIP and vrrp roles on dut1 and dut2" ) ############################################################ result = verify_master_backup(vrid=vrid_list[0], interface=vrrp_vlan_intf[0], vmac=vmac_list_1[0], vip=vrrp_sec_ip_list[0], master_dut=data.dut1, backup_dut=data.dut2) if result is False: err = "Testcase {} VRRP elections incorrect with secondary ip {} configured as Virtualip ".format( tc_list[0], vrrp_sec_ip_list[0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) result = verify_tg_traffic_rate(data.tg1, data.tg2, data.tgd3_ports, data.tgd4_ports) if result is False: err = "Testcase {} Traffic check failed with secondary ip {} configured as Virtualip ".format( tc_list[0], vrrp_sec_ip_list[0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) ########################################################### hdrMsg( "Step T5 : Verify primary ip {} used as source ip for vrrp advertisements sent out from dut1 master" .format(vrrp_ip_list[0][0])) ############################################################ data.tg1.tg_packet_control(port_handle=data.tg_handles[0], action='start') st.wait(3) data.tg1.tg_packet_control(port_handle=data.tg_handles[0], action='stop') pkts_captured = data.tg1.tg_packet_stats(port_handle=data.tg_handles[0], format='var', output_type='hex') capture_result = tgapi.validate_packet_capture( tg_type=data.tg1.tg_type, pkt_dict=pkts_captured, offset_list=[0, 26], value_list=['01:00:5E:00:00:12', vrrp_ip_list[0][0]]) if not capture_result: err = "Testcase {} VRRP advertisement not using primary IP {} as source".format( tc_list[0], vrrp_ip_list[0][0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) ########################################################### hdrMsg( "Step T7 : Delete secondary ip and verify VRRP elections happens on dut1 and dut2 " ) ############################################################ st.log("\n#### Remove secondary ip as Virtual-IP config first ###\n") dict1 = { 'vrid': vrid_list[0], 'vip': vrrp_sec_ip_list[0], 'interface': vrrp_vlan_intf[0], 'config': 'no' } parallel.exec_parallel(True, [data.dut1, data.dut2], vrrp.configure_vrrp, [dict1, dict1]) #st.log("\n##### Delete secondary IP {} from {} #####\n".format(vrrp_sec_ip_list[0],vrrp_vlan_intf[0])) #ip_api.delete_ip_interface(data.dut1,vrrp_vlan_intf[0],vrrp_sec_ip_list[0],30,is_secondary_ip='yes') st.log("\n#### Configure Virtual IP {} for {} ###\n".format( vip_list[0], vrrp_vlan_intf[0])) dict1 = { 'vrid': vrid_list[0], 'vip': vip_list[0], 'interface': vrrp_vlan_intf[0] } parallel.exec_parallel(True, [data.dut1, data.dut2], vrrp.configure_vrrp, [dict1, dict1]) result = verify_master_backup(vrid=vrid_list[0], interface=vrrp_vlan_intf[0], vmac=vmac_list_1[0], vip=vip_list[0], master_dut=data.dut1, backup_dut=data.dut2) if result is False: err = "Testcase {} VRRP elections incorrect after deleting secondary ip {} which is also configured as Virtualip ".format( tc_list[0], vrrp_sec_ip_list[0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) ########################################################### hdrMsg("Step T8 : Verify Traffic after delete/add primary IP for VRID {} ". format(vrid_list[0])) ############################################################ result = verify_tg_traffic_rate() if result is False: err = "Testcase {} Traffic check failed with after delete/add primary ip {} ".format( tc_list[0], vrrp_ip_list[0][0]) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) vrrp.configure_vrrp(data.dut1, vrid=vrid_list[0], interface=vrrp_vlan_intf[0], priority=vrrp_priority_list_dut1[0]) st.log("Stop Traffic for VRRP instance {}".format(vrid_list[0])) run_traffic(stream_handle=data.stream_handles['vrrp_{}'.format( vrid_list[0])], action='stop') ########################################################### hdrMsg( "Step T9 : Verify out of range values gets rejected for advertisement interval " ) ############################################################ for interval in [0, 256]: result = vrrp.configure_vrrp(data.dut1, vrid=vrid_list[0], interface=vrrp_vlan_intf[0], adv_interval=interval, skip_error=True) expected_err = "Error" if expected_err not in str(result): err = "Testcase {} {} sec should not be accepted for advertisement interval".format( tc_list[2], interval) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) ########################################################### hdrMsg( "Step T10 : Verify out of range values gets rejected for vrrp priority " ) ############################################################ for prio in [0, 255]: result = vrrp.configure_vrrp(data.dut1, vrid=vrid_list[0], interface=vrrp_vlan_intf[0], priority=prio, skip_error=True) expected_err = "Error" if expected_err not in str(result): err = "Testcase {}: {} should not be accepted for vrrp priority".format( tc_list[2], prio) failMsg(err) debug_vrrp() tc_result = False err_list.append(err) if tc_result is False: st.report_fail('test_case_failure_message', err_list[0]) else: st.report_pass('test_case_passed')
def test_ft_control_plane_acl_icmp(): result = True [output, exceptions] = parallel.exec_all( True, [[st.get_mgmt_ip, vars.D1], [st.get_mgmt_ip, vars.D2]]) parallel.ensure_no_exception(exceptions) d1_ipaddress, d2_ipaddress = output d1_ipv6address = ip_obj.get_link_local_addresses( vars.D1, "eth0", cli_type='click' ) ##Passing parameter cli_type as click until the SONIC-32291 fixed if not d1_ipv6address: st.report_fail('dut_not_getting_ip_address') d1_ipv6address = d1_ipv6address[0] st.log("Creating acl rules to drop icmp packets") acl_config = acl_data.acl_json_config_control_plane_v2 st.log("ACL_DATA: {}".format(acl_config)) acl_obj.apply_acl_config(vars.D1, acl_config) st.wait(3, "Waiting to apply acl rules") acl_obj.show_acl_table(vars.D1) acl_obj.show_acl_rule(vars.D1) if ip_obj.ping(dut=vars.D2, addresses=d1_ipaddress): st.error( "ICMP ipv4 packets are not dropped with applied control plane acl rules." ) result = False st.report_tc_fail("ft_controlplane_acl_ipv4_icmp", "test_case_failed") if ip_obj.ping(dut=vars.D1, addresses=d1_ipv6address, interface="eth0", family="ipv6"): st.error( "ICMP ipv6 packets are not dropped with applied control plane acl rules." ) result = False st.report_tc_fail("ft_controlplane_acl_ipv6_icmp", "test_case_failed") change_acl_rules(acl_config, "L3_IPV4_ICMP|rule1", "SRC_IP", "{}/32".format(d2_ipaddress)) change_acl_rules(acl_config, "L3_IPV6_ICMP|rule1", "SRC_IPV6", "{}/128".format(d1_ipv6address)) acl_obj.acl_delete(vars.D1) acl_obj.apply_acl_config(vars.D1, acl_config) st.wait(3, "Waiting to apply acl rules") if not ip_obj.ping(dut=vars.D2, addresses=d1_ipaddress): st.error( "ICMP ipv4 packets are dropped with applied control plane acl rules." ) result = False st.report_tc_fail("ft_controlplane_acl_seq_priority", "test_case_failed") st.report_tc_fail("ft_controlplane_acl_ipv4_icmp_permit", "test_case_failed") if not ip_obj.ping( dut=vars.D1, addresses=d1_ipv6address, interface="eth0", family="ipv6"): st.error( "ICMP ipv6 packets are dropped with applied control plane acl rules." ) result = False st.report_tc_fail("ft_controlplane_acl_ipv6_icmp_permit", "test_case_failed") config_save(vars.D1) st.log('rebooting the device.') st.reboot(vars.D1, 'fast') if not ip_obj.ping(dut=vars.D2, addresses=d1_ipaddress): st.error( "control plane ipv4 acl functionality is failed after reboot.") st.report_tc_fail("ft_controlplane_acl_reboot", "test_case_failed") result = False if not ip_obj.ping( dut=vars.D1, addresses=d1_ipv6address, interface="eth0", family="ipv6"): st.error( "control plane ipv6 acl functionality is failed after reboot.") st.report_tc_fail("ft_controlplane_acl_reboot", "test_case_failed") result = False if not result: st.report_fail("test_case_failed") else: st.report_pass("test_case_passed")
def report_fail(msgid, *argv): st.report_fail(msgid, pddf_data.feature, *argv)
def test_ft_ip6_static_route_traffic_forward_blackhole(): # Objective - Verify the Ipv6 traffic forwarding over static route. tg_handler = tgapi.get_handles_byname("T1D1P2", "T1D2P2") tg = tg_handler["tg"] tg.tg_traffic_control(action="reset", port_handle=tg_handler["tg_ph_list"]) tg.tg_traffic_control(action="clear_stats", port_handle=tg_handler["tg_ph_list"]) dut_rt_int_mac1 = basic_obj.get_ifconfig_ether(vars.D1, vars.D1T1P2) h1 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_1"], mode='config', ipv6_intf_addr=data.ip6_addr[0], \ ipv6_prefix_length='64', ipv6_gateway=data.ip6_addr[1], src_mac_addr=data.tg_mac1, arp_send_req='1') st.log("INTFCONF: " + str(h1)) h2 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_2"], mode='config', ipv6_intf_addr=data.ip6_addr[9], \ ipv6_prefix_length='64', ipv6_gateway=data.ip6_addr[8], src_mac_addr=data.tg_mac2, arp_send_req='1') st.log("INTFCONF: " + str(h2)) # Ping from tgen to DUT. res = tgapi.verify_ping(src_obj=tg, port_handle=tg_handler["tg_ph_1"], dev_handle=h1['handle'], dst_ip=data.ip6_addr[1], \ ping_count='1', exp_count='1') if res: st.log("Ping succeeded.") else: st.warn("Ping failed.") tr1 = tg.tg_traffic_config(port_handle=tg_handler["tg_ph_1"], mode='create', transmit_mode='single_burst', pkts_per_burst=2000, \ length_mode='fixed', rate_pps=2000, l3_protocol='ipv6', mac_src=data.tg_mac1, \ mac_dst=dut_rt_int_mac1, ipv6_src_addr=data.ip6_addr[0], ipv6_dst_addr=data.ip6_addr[9]) st.log("TRAFCONF: " + str(tr1)) res = tg.tg_traffic_control(action='run', stream_handle=tr1['stream_id']) st.log("TR_CTRL: " + str(res)) tg.tg_traffic_control(action='stop', stream_handle=tr1['stream_id']) st.log("Checking the stats and verifying the traffic flow") traffic_details = { '1': { 'tx_ports': [vars.T1D1P2], 'tx_obj': [tg_handler["tg"]], 'exp_ratio': [1], 'rx_ports': [vars.T1D2P2], 'rx_obj': [tg_handler["tg"]], } } # verify statistics aggrResult = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', comp_type='packet_count') if not aggrResult: st.report_fail("traffic_verification_failed") ipfeature.delete_static_route(vars.D1, data.ip6_addr[7], data.static_ip6_rt, shell=data.shell_vtysh, family=data.af_ipv6) st.log("Create a static route with nexthop as blackhole") ipfeature.create_static_route(vars.D1, data.static_ip6_rt_drop, data.static_ip6_rt, shell=data.shell_vtysh, family=data.af_ipv6) tg.tg_traffic_control(action="clear_stats", port_handle=tg_handler["tg_ph_list"]) res = tg.tg_traffic_control(action='run', stream_handle=tr1['stream_id']) st.log("TR_CTRL: " + str(res)) tg.tg_traffic_control(action='stop', stream_handle=tr1['stream_id']) st.log("Checking the stats and verifying the traffic flow") traffic_details = { '1': { 'tx_ports': [vars.T1D1P2], 'tx_obj': [tg_handler["tg"]], 'exp_ratio': [1], 'rx_ports': [vars.T1D2P2], 'rx_obj': [tg_handler["tg"]], } } # verify statistics aggrResult = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', comp_type='packet_count') if aggrResult: st.report_fail("traffic_verification_failed") st.report_pass("test_case_passed")
def test_ft_ip_v4_v6_L2_L3_translation(): # Objective - Verify that L2 port to IPv4 L3 port transition and vice-versa is successful. st.log("Checking IPv4 ping from {} to {} over routing interface".format( vars.D1, vars.D2)) if not ipfeature.ping( vars.D1, data.ip4_addr[7], family=data.af_ipv4, count=1): st.report_fail("ping_fail", data.ip4_addr[6], data.ip4_addr[7]) st.log( "Checking IPv6 ping from {} to {} over vlan routing interface".format( vars.D1, vars.D2)) if not ipfeature.ping( vars.D2, data.ip6_addr[6], family=data.af_ipv6, count=1): st.report_fail("ping_fail", data.ip6_addr[7], data.ip6_addr[6]) st.log("L3 to L2 port transition") st.log("Removing ipv4,ipv6 address from interface") ipfeature.delete_ip_interface(vars.D1, vars.D1D2P4, data.ip4_addr[6], 24, family=data.af_ipv4) ipfeature.delete_ip_interface(vars.D2, vars.D2D1P4, data.ip4_addr[7], 24, family=data.af_ipv4) ipfeature.delete_ip_interface(vars.D1, vars.D1D2P4, data.ip6_addr[6], 96, family=data.af_ipv6) ipfeature.delete_ip_interface(vars.D2, vars.D2D1P4, data.ip6_addr[7], 96, family=data.af_ipv6) ipfeature.delete_ip_interface(vars.D1, vars.D1T1P1, data.ip4_addr[1], 24, family=data.af_ipv4) ipfeature.delete_ip_interface(vars.D2, vars.D2T1P1, data.ip4_addr[8], 24, family=data.af_ipv4) st.log("Removing the static routes") ipfeature.delete_static_route(vars.D1, data.ip4_addr[7], data.static_ip_rt, shell=data.shell_vtysh, family=data.af_ipv4) ipfeature.delete_static_route(vars.D1, data.static_ip6_rt_drop, data.static_ip6_rt, shell=data.shell_vtysh, family=data.af_ipv6) st.log("Vlan creation and port association configuration") vlan_obj.create_vlan(vars.D1, data.vlan_2) st.log("Adding back to back connecting ports to vlan {}".format( data.vlan_2)) vlan_obj.add_vlan_member(vars.D1, data.vlan_2, [vars.D1D2P4], tagging_mode=True) vlan_obj.create_vlan(vars.D2, data.vlan_2) vlan_obj.add_vlan_member(vars.D2, data.vlan_2, [vars.D2D1P4], tagging_mode=True) st.log("Adding TG connecting ports to vlan {}".format(data.vlan_1)) vlan_obj.add_vlan_member(vars.D1, data.vlan_2, vars.D1T1P1, tagging_mode=True) vlan_obj.add_vlan_member(vars.D2, data.vlan_2, vars.D2T1P1, tagging_mode=True) tg_handler = tgapi.get_handles_byname("T1D1P1", "T1D2P1") tg.tg_traffic_control(action="reset", port_handle=tg_handler["tg_ph_list"]) tg.tg_traffic_control(action="clear_stats", port_handle=tg_handler["tg_ph_list"]) tr2 = tg.tg_traffic_config(port_handle=tg_handler["tg_ph_2"], mode='create', rate_pps="2000", mac_src_mode="fixed", transmit_mode="single_burst", pkts_per_burst=2000, length_mode='fixed', l2_encap='ethernet_ii_vlan', vlan_id=data.vlan_2, mac_dst_mode="fixed", vlan="enable", mac_src="00:a1:bb:cc:dd:01", mac_dst="00:b1:bb:cc:dd:01") st.log("TRAFCONF: " + str(tr2)) res = tg.tg_traffic_control(action='run', stream_handle=tr2['stream_id']) tg.tg_traffic_control(action='stop', stream_handle=tr2['stream_id']) st.wait(data.wait_tgstats) st.log("TR_CTRL: " + str(res)) st.log("Fetching TGen statistics") stats_tg1 = tgapi.get_traffic_stats(tg_handler["tg"], mode="aggregate", port_handle=tg_handler["tg_ph_2"]) total_tx_tg1 = stats_tg1.tx.total_packets stats_tg2 = tgapi.get_traffic_stats(tg_handler["tg"], mode="aggregate", port_handle=tg_handler["tg_ph_1"]) total_rx_tg2 = stats_tg2.rx.total_packets st.log("total_tx_tg1 = {}".format(total_tx_tg1)) total_tx_tg1_95_percentage = int(total_tx_tg1) * 0.95 st.log("total_tx_tg1_95_percentage= {}".format(total_tx_tg1_95_percentage)) st.log("total_rx_tg2 = {}".format(total_rx_tg2)) if int(total_tx_tg1_95_percentage) > int(total_rx_tg2): st.report_fail("traffic_verification_failed") st.log("Removing vlan configuration") vlan_obj.delete_vlan_member(vars.D1, data.vlan_2, [vars.D1D2P4, vars.D1T1P1], True) vlan_obj.delete_vlan_member(vars.D2, data.vlan_2, [vars.D2D1P4, vars.D2T1P1], True) st.log("L2 to L3 port transition") ipfeature.config_ip_addr_interface(vars.D1, vars.D1D2P4, data.ip4_addr[6], 24, family=data.af_ipv4) ipfeature.config_ip_addr_interface(vars.D2, vars.D2D1P4, data.ip4_addr[7], 24, family=data.af_ipv4) ipfeature.create_static_route(vars.D1, data.ip4_addr[7], data.static_ip_rt, shell=data.shell_vtysh, family=data.af_ipv4) st.log("Checking IPv4 ping from {} to {} over routing interface".format( vars.D1, vars.D2)) if not ipfeature.ping( vars.D1, data.ip4_addr[7], family=data.af_ipv4, count=1): st.report_fail("ping_fail", data.ip4_addr[6], data.ip4_addr[7]) ipfeature.config_ip_addr_interface(vars.D1, vars.D1D2P4, data.ip6_addr[6], 96, family=data.af_ipv6) ipfeature.config_ip_addr_interface(vars.D2, vars.D2D1P4, data.ip6_addr[7], 96, family=data.af_ipv6) ipfeature.create_static_route(vars.D1, data.static_ip6_rt_drop, data.static_ip6_rt, shell=data.shell_vtysh, family=data.af_ipv6) st.log( "Checking IPv6 ping from {} to {} over vlan routing interface".format( vars.D1, vars.D2)) if not ipfeature.ping( vars.D2, data.ip6_addr[6], family=data.af_ipv6, count=1): st.report_fail("ping_fail", data.ip6_addr[7], data.ip6_addr[6]) st.report_pass("test_case_passed")
def test_Ceta_31902(ceta_31902_fixture): success = True mac_obj.config_mac(dut=vars.D1, mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P1) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host1_mac, interface="Vlan" + data.host1_vlan) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host2_mac, interface="Vlan" + data.host2_vlan) arp_obj.delete_static_arp(dut=vars.D1, ipaddress=data_tg_ip, interface="Vlan" + data.host2_vlan, mac=data.host2_mac) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host2_mac, interface="Vlan" + data.host1_vlan) arp_obj.delete_static_arp(dut=vars.D1, ipaddress=data_tg_ip, interface="Vlan" + data.host1_vlan, mac=data.host2_mac) mac_obj.config_mac(dut=vars.D1, mac=data.host2_mac, vlan=data.host1_vlan, intf=vars.D1T1P2) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host2_mac, interface="Vlan" + data.host1_vlan) mac_obj.delete_mac(dut=vars.D1, mac=data.host1_mac, vlan=data.host1_vlan) mac_obj.config_mac(dut=vars.D1, mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P1) l2_out = asicapi.get_l2_out(vars.D1, data.host2_mac) l3_out = asicapi.get_l3_out(vars.D1, data.host2_mac) if l2_out and l3_out: l2_gport = l2_out[0]["gport"][9] l3_port = l3_out[0]["port"] if l2_gport == l3_port: st.log("MAC {} points port {} correctly in both ARP and MAC table". format(data.host2_mac, l2_gport)) else: success = False st.error("MAC and ARP table are NOT in SYNC; " "MAC {} points to gport {} in \"l2 show\"" "but in \"l3 egress show\" it is port {}".format( data.host2_mac, l2_gport, l3_port)) else: success = False st.error("MAC NOT present in \"l2 show\" or \"l3 egress show\" output") arp_obj.delete_static_arp(dut=vars.D1, ipaddress=data_tg_ip, interface="Vlan" + data.host1_vlan, mac=data.host2_mac) mac_obj.delete_mac(dut=vars.D1, mac=data.host1_mac, vlan=data.host1_vlan) mac_obj.delete_mac(dut=vars.D1, mac=data.host2_mac, vlan=data.host1_vlan) if success: st.report_pass("test_case_id_passed", "test_Ceta_31902") else: st.report_fail("test_case_id_failed", "test_Ceta_31902")
def test_ft_ping__v4_v6_after_ip_change_pc(): # Objective - Verify that ping is successful between L3 interfaces when Ip address is removed and new ip # is assigned st.log("In {} check portchannel is UP or not".format(vars.D2)) if not pc_obj.verify_portchannel_state( vars.D2, data.port_channel, state="up"): st.report_fail("portchannel_state_fail", data.port_channel, vars.D2, "Up") st.log( "Checking IPv4 ping from {} to {} over portchannel routing interface". format(vars.D1, vars.D2)) if not ipfeature.ping_poll( vars.D1, data.ip4_addr[5], family=data.af_ipv4, iter=5, count=1): st.report_fail("ping_fail", data.ip4_addr[4], data.ip4_addr[5]) st.log( "Checking IPv6 ping from {} to {} over portchannel routing interface". format(vars.D1, vars.D2)) if not ipfeature.ping_poll( vars.D2, data.ip6_addr[4], family=data.af_ipv6, iter=5, count=1): st.report_fail("ping_fail", data.ip6_addr[5], data.ip6_addr[4]) st.log("Removing the Ipv4 address on portchannel") ipfeature.delete_ip_interface(vars.D1, data.port_channel, data.ip4_addr[4], 24, family=data.af_ipv4) ipfeature.delete_ip_interface(vars.D2, data.port_channel, data.ip4_addr[5], 24, family=data.af_ipv4) st.log("Removing the Ipv6 address on portchannel") ipfeature.delete_ip_interface(vars.D1, data.port_channel, data.ip6_addr[4], 96, family=data.af_ipv6) ipfeature.delete_ip_interface(vars.D2, data.port_channel, data.ip6_addr[5], 96, family=data.af_ipv6) st.log("configuring new Ipv4 address on portchannel") ipfeature.config_ip_addr_interface(vars.D1, data.port_channel, data.ip4_addr[10], 24, family=data.af_ipv4) ipfeature.config_ip_addr_interface(vars.D2, data.port_channel, data.ip4_addr[11], 24, family=data.af_ipv4) st.log("configuring new Ipv6 address on portchannel") ipfeature.config_ip_addr_interface(vars.D1, data.port_channel, data.ip6_addr[10], 96, family=data.af_ipv6) ipfeature.config_ip_addr_interface(vars.D2, data.port_channel, data.ip6_addr[11], 96, family=data.af_ipv6) st.log( "After Ipv4 address change, checking IPv4 ping from {} to {} over portchannel " "routing interface".format(vars.D1, vars.D2)) if not ipfeature.ping_poll( vars.D1, data.ip4_addr[11], family=data.af_ipv4, iter=5, count=1): st.report_fail("ping_fail", data.ip4_addr[10], data.ip4_addr[11]) st.log( "After Ipv6 address change, checking IPv6 ping from {} to {} over portchannel " "routing interface".format(vars.D1, vars.D2)) if not ipfeature.ping_poll( vars.D1, data.ip6_addr[11], family=data.af_ipv6, iter=5, count=1): st.report_fail("ping_fail", data.ip6_addr[10], data.ip6_addr[11]) st.report_pass("test_case_passed")
def verify_security_default_config(dut): if not security.verify_aaa(dut, 'local (default)', 'False (default)'): st.report_fail("authentication_default_configs_fail")
def test_l3_ecmp_4paths_on_bo_tc(): (dut) = (data.dut) #count = 0 #intf_ip_addr = data.start_ip_addr #intf_ip_addr2 = data.start_ip_addr2 #nexthop = data.nexthop_start_ip_addr nexthop = "10.2.101.10/32" member1 = vars.D1T1P1 member2 = vars.D1T1P2 member3 = vars.D1T1P3 member4 = vars.D1T1P4 apply_file = False ipfeature.clear_ip_configuration([dut]) max_range = data.base_val+4 base_range = data.base_val-1 if apply_file is False: command = "config vlan range add 100 105" st.config(dut, command) command = "config vlan member add 100 {}".format(member4) st.config(dut, command) command = "config vlan member add 101 {}".format(member1) st.config(dut, command) command = "config vlan member add 102 {}".format(member2) st.config(dut, command) command = "config vlan member add 103 {}".format(member3) st.config(dut, command) command = "config vlan member add 104 {}".format(member4) st.config(dut, command) #ip_addr = data.start_ip_addr ip_addr = "10.2.100.1/24" for index in range(base_range, max_range): command = "config interface ip add "+ "Vlan" + str(index) + " " + ip_addr st.config(dut, command) (_, ip_addr) = ipfeature.increment_ip_addr(ip_addr, "network") base_range = data.base_val max_range = data.base_val+3 for index in range(base_range, max_range): (_, nexthop) = ipfeature.increment_ip_addr(nexthop, "network") nexthop1 = nexthop formatted_next_hop = nexthop1.replace("/32","") ipfeature.create_static_route(dut, formatted_next_hop, data.static_route) data.my_dut_list = st.get_dut_names() #dut1 = data.my_dut_list[0] (tg1, tg_ph_1, tg2, tg_ph_2, tg3, tg_ph_3, tg4, tg_ph_4) = get_handles_1() tg1.tg_traffic_control(action='reset',port_handle=tg_ph_1) tg2.tg_traffic_control(action='reset',port_handle=tg_ph_2) tg2.tg_traffic_control(action='reset',port_handle=tg_ph_3) tg2.tg_traffic_control(action='reset',port_handle=tg_ph_4) h0=tg4.tg_interface_config(port_handle=tg_ph_4, mode='config', intf_ip_addr='10.2.100.10', gateway='10.2.100.1', src_mac_addr='00:0d:01:00:00:01', vlan='1', vlan_id='100', arp_send_req='1', gateway_step='0.0.1.0', intf_ip_addr_step='0.0.1.0') h1=tg1.tg_interface_config(port_handle=tg_ph_1, mode='config', intf_ip_addr='10.2.101.10', gateway='10.2.101.1', src_mac_addr='00:0d:02:00:00:01', vlan='1', vlan_id='101', arp_send_req='1', gateway_step='0.0.1.0', intf_ip_addr_step='0.0.1.0') h2=tg2.tg_interface_config(port_handle=tg_ph_2, mode='config', intf_ip_addr='10.2.102.10', gateway='10.2.102.1', src_mac_addr='00:0c:01:00:00:01', vlan='1', vlan_id='102', arp_send_req='1', gateway_step='0.0.1.0', intf_ip_addr_step='0.0.1.0') h3=tg3.tg_interface_config(port_handle=tg_ph_3, mode='config', intf_ip_addr='10.2.103.10', gateway='10.2.103.1', src_mac_addr='00:0c:02:00:00:01', vlan='1', vlan_id='103', arp_send_req='1', gateway_step='0.0.1.0', intf_ip_addr_step='0.0.1.0') h4=tg4.tg_interface_config(port_handle=tg_ph_4, mode='config', intf_ip_addr='10.2.104.10', gateway='10.2.104.1', src_mac_addr='00:0a:02:00:00:01', vlan='1', vlan_id='104', arp_send_req='1', gateway_step='0.0.1.0', intf_ip_addr_step='0.0.1.0') mac_eth = macapi.get_sbin_intf_mac(vars.D1,'eth0') tr1=tg4.tg_traffic_config(port_handle=tg_ph_4, mac_src='00:11:01:00:00:01', mac_dst=mac_eth, ip_dst_mode='increment', ip_dst_count=200, ip_dst_step='0.0.0.1',ip_src_addr='10.2.100.10', ip_dst_addr='200.1.0.1', l3_protocol='ipv4', l2_encap='ethernet_ii_vlan', vlan_id='100', vlan='enable', mode='create', transmit_mode='continuous', length_mode='fixed', rate_pps=512000, enable_stream_only_gen='1') tg2.tg_traffic_control(action='run', handle=tr1['stream_id']) #count = 0 #Port Counters st.wait(20) st.show(dut, "show arp") #Port Counters ret = check_intf_traffic_bo_counters() if ret is True: st.log("Test Case PASSED") tg2.tg_traffic_control(action='stop', handle=tr1['stream_id']) tg1.tg_interface_config(port_handle=tg_ph_1, handle=h0['handle'], mode='destroy') tg1.tg_interface_config(port_handle=tg_ph_1, handle=h1['handle'], mode='destroy') tg2.tg_interface_config(port_handle=tg_ph_2, handle=h2['handle'], mode='destroy') tg3.tg_interface_config(port_handle=tg_ph_3, handle=h3['handle'], mode='destroy') tg4.tg_interface_config(port_handle=tg_ph_4, handle=h4['handle'], mode='destroy') if apply_file is False: base_range = data.base_val-1 ip_addr = data.start_ip_addr max_range = data.base_val+4 for index in range(base_range, max_range): command = "config interface ip remove "+ "Vlan" + str(index) + " " + ip_addr st.config(dut, command) (_, ip_addr) = ipfeature.increment_ip_addr(ip_addr, "network") command = "config vlan member del 100 {}".format(member4) st.config(dut, command) command = "config vlan member del 101 {}".format(member1) st.config(dut, command) command = "config vlan member del 102 {}".format(member2) st.config(dut, command) command = "config vlan member del 103 {}".format(member3) st.config(dut, command) command = "config vlan member del 104 {}".format(member4) st.config(dut, command) command = "config vlan range del 100 105" st.config(dut, command) if ret is True: st.log("Test Case PASSED") st.report_pass("operation_successful") else: st.log("Test Case FAILED") st.report_fail("operation_failed")
def test_ft_stp_loopguard_config_reload(): st.log("Verify the stp convergence") if not stp_wrap.poll_stp_convergence( vars, sc_data.vlan, iteration=20, delay=1): st.report_fail("stp_convergence_fail") st.log("Enable loop guard on device1") if not stp.config_loopguard_global(vars.D1, mode='enable'): st.report_fail("STP_loop_guard_config_fail") st.log("disable stp on an interface") stp.config_stp_enable_interface(vars.D2, vars.D2D1P2, mode="disable") st.wait(5) st.log("verify port moved to inconsistent state") if not stp.check_rg_current_state(vars.D1, sc_data.vlan, vars.D1D2P2): st.report_fail("port_inconsistent_state_fail") st.log("enable stp on an interface") stp.config_stp_enable_interface(vars.D2, vars.D2D1P2, mode="enable") reboot_obj.config_save(vars.D1) st.reboot(vars.D1) st.log("disable stp on an interface") stp.config_stp_enable_interface(vars.D2, vars.D2D1P2, mode="disable") st.wait(5) st.log("verify port moved to inconsistent state") if not stp.check_rg_current_state(vars.D1, sc_data.vlan, vars.D1D2P2): st.report_fail("port_inconsistent_state_fail") st.log("enable stp on an interface") stp.config_stp_enable_interface(vars.D2, vars.D2D1P2, mode="enable") st.wait(5) if not stp.verify_stp_intf_status(vars.D1, interface=vars.D1D2P2, status="DISCARDING", vlanid=sc_data.vlan): st.report_fail("port_discarding_fail") st.log("Unconfiguring the loop guard global mode") if not stp.config_loopguard_global(vars.D1, mode='disable'): st.report_fail("STP_loop_guard_config_fail") st.report_pass("test_case_passed")
def test_ft_sf_all_buffer_stats_using_multicast_traffic(): """ Author : prudviraj k ([email protected]) and phani kumar ravula([email protected]) """ result = 0 if not sfapi.config_snapshot_interval( vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): st.error("Failed to configure snapshot interval") result += 1 match = [{'snapshotinterval': sf_data.snapshot_interval}] if not sfapi.verify(vars.D1, 'snapshot_interval', verify_list=match): st.error("Failed to verify the configured snapshot interval") result += 1 if sfapi.multicast_queue_start_value(vars.D1, 'queue_user_watermark_multicast', port_alias=vars.D1T1P4): match = [{'mc8': sf_data.initial_counter_value}] else: match = [{'mc10': sf_data.initial_counter_value}] sf_tg_traffic_start_stop(sf_data.multicast, True) st.wait(2 * sf_data.snapshot_interval) st.banner('#### queue_multicast_for_user_watermark ####') st.banner('TC name:::: ft_sf_queue_multicast_using_uwm ::::') if sfapi.verify(vars.D1, 'queue_user_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): st.error( "Failed to verify the queue_user_watermark_multicast counter value" ) result += 1 st.report_tc_fail("ft_sf_queue_multicast_using_uwm", "snapshot_tc_verify", "queue_multicast_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_queue_multicast_using_uwm", "snapshot_tc_verify", "queue_multicast_for_user_watermark", "successful") st.banner('#### queue_multicast_for_persistent_watermark ####') st.banner('TC name :::: ft_sf_queue_multicast_using_persistent_wm ::::') if sfapi.verify(vars.D1, 'queue_persistent_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): st.error( "Failed to verify the queue_persistent_watermark_unicast counter value" ) result += 1 st.report_tc_fail("ft_sf_queue_multicast_using_persistent_wm", "snapshot_tc_verify", "queue_multicast_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_queue_multicast_using_persistent_wm", "snapshot_tc_verify", "queue_multicast_for_persistent_watermark", "successful") sf_tg_traffic_start_stop(sf_data.multicast, False) st.wait(2 * sf_data.snapshot_interval) st.banner('#### clear_queue_multicast_for_user_watermark ####') st.banner('TC name :::: ft_sf_queue_multicast_clear_using_uwm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], table=sf_data.table[0], counter_type=sf_data.PG[3]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.PG[3])) result += 1 if not sfapi.verify(vars.D1, 'queue_user_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): st.error("Failed to clear the snapshot counters") result += 1 st.report_tc_fail( "ft_sf_queue_multicast_clear_using_uwm", "snapshot_clear_verify", "clearing the multicast queue counters for user watermark", "failed") else: st.report_tc_pass( "ft_sf_queue_multicast_clear_using_uwm", "snapshot_clear_verify", "clearing the multicast queue counters for user watermark", "successful") st.banner('#### clear_queue_multicast_for_persistent_watermark ####') st.banner( 'TC name :::: ft_sf_queue_multicast_clear_using_persistent_wm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], table=sf_data.table[1], counter_type=sf_data.PG[3]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.PG[3])) result += 1 if not sfapi.verify(vars.D1, 'queue_persistent_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): st.error("Failed to clear the snapshot counters") result += 1 st.report_tc_fail( "ft_sf_queue_multicast_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the multicast queue counters for persistent watermark", "failed") else: st.report_tc_pass( "ft_sf_queue_multicast_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the multicast queue counters for persistent watermark", "successful") if not result: st.report_pass("snapshot_all_buffer_counters", "multicast", "successful") else: sf_collecting_debug_logs_when_test_fails() st.report_fail("snapshot_all_buffer_counters", "multicast", "failed")
def test_ft_verify_interfaces_order(): ''' @author: Ramprakash Reddy ([email protected]) ipv4_intf_order : Verify order of interfaces in "show ip interfaces" ipv6_intf_order : Verify order of interfaces in "show ipv6 interfaces' Verify order of interfaces in "show ip/ipv6 interfaces" in sorted order or not :return: ''' flag = 1 st.log( "This test is to ensure that interfaces are listed in sorted order by 'interface name' in 'show ip/ipv6 " "interfaces'") free_ports = st.get_free_ports(vars.D1) if len(free_ports) < data.no_of_ports: data.no_of_ports = len(free_ports) req_ports = random.sample(free_ports, data.no_of_ports) ipv4_addr = data.ip4_addr[11] + '/' + data.ipv4_mask ipv6_addr = data.ip6_addr[0] + '/' + data.ipv6_mask intf_list = [] for i in range(int(math.ceil(float(data.no_of_ports) / 2))): _, ipv4_addr = ipfeature.increment_ip_addr(ipv4_addr, "network") ipfeature.config_ip_addr_interface(vars.D1, interface_name=req_ports[i], ip_address=ipv4_addr.split('/')[0], subnet=data.ipv4_mask, family="ipv4") for i in range(int(math.floor(float(data.no_of_ports) / 2))): _, ipv6_addr = ipfeature.increment_ip_addr(ipv6_addr, "network", family="ipv6") ipfeature.config_ip_addr_interface( vars.D1, interface_name=req_ports[ i + int(math.ceil(float(data.no_of_ports) / 2))], ip_address=ipv6_addr.split('/')[0], subnet=data.ipv6_mask, family="ipv6") output = ipfeature.get_interface_ip_address(vars.D1) for each in output: intf_list.append(each['interface']) temp = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [temp(c) for c in re.split('([0-9]+)', key)] intf_list_sorted = sorted(intf_list, key=alphanum_key) if intf_list == intf_list_sorted: st.log("Ipv4 interfaces are in sorted order") else: st.error("Ipv4 interfaces are not in soretd order") flag = 0 del intf_list[:] del intf_list_sorted[:] output = ipfeature.get_interface_ip_address(vars.D1, family="ipv6") for each in output: intf_list.append(each['interface']) temp = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [temp(c) for c in re.split('([0-9]+)', key)] intf_list_sorted = sorted(intf_list, key=alphanum_key) if intf_list == intf_list_sorted: st.log("Ipv6 interfaces are in sorted order") else: st.error("Ipv6 interfaces are not in soretd order") flag = 0 #Unconfig ipv4_addr = data.ip4_addr[11] + '/' + data.ipv4_mask ipv6_addr = data.ip6_addr[0] + '/' + data.ipv6_mask for i in range(int(math.ceil(float(data.no_of_ports) / 2))): _, ipv4_addr = ipfeature.increment_ip_addr(ipv4_addr, "network") ipfeature.delete_ip_interface(vars.D1, interface_name=req_ports[i], ip_address=ipv4_addr.split('/')[0], subnet=data.ipv4_mask, family="ipv4") for i in range(int(math.floor(float(data.no_of_ports) / 2))): _, ipv6_addr = ipfeature.increment_ip_addr(ipv6_addr, "network", family="ipv6") ipfeature.delete_ip_interface( vars.D1, interface_name=req_ports[ i + int(math.ceil(float(data.no_of_ports) / 2))], ip_address=ipv6_addr.split('/')[0], subnet=data.ipv6_mask, family="ipv6") if flag == 0: st.report_fail("test_case_failed") st.report_pass("test_case_passed")
def test_ft_sf_verify_buffer_pool_counters(): """ Author : prudviraj k ([email protected]) and phani kumar ravula([email protected]) """ result = 0 per_result = 0 sf_data.platform_name_summary = get_platform_summary(vars.D1) sf_data.platform_name = sf_data.platform_name_summary["platform"] sf_data.platform_hwsku = sf_data.platform_name_summary["hwsku"] path = "/usr/share/sonic/device/{}/{}/{}".format(sf_data.platform_name, sf_data.platform_hwsku, sf_data.device_j2_file) convert_json = "sonic-cfggen -d -t " "{} > {}".format( path, sf_data.config_file) sfapi.load_json_config(vars.D1, convert_json, sf_data.config_file) reboot_api.config_save_reload(vars.D1) st.log("To make sure after reload DUT is fully operational") st.wait(sf_data.reload_interval) if not sfapi.config_snapshot_interval( vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): st.error("Failed to configure snapshot interval") result += 1 match = [{'snapshotinterval': sf_data.snapshot_interval}] if not sfapi.verify(vars.D1, 'snapshot_interval', verify_list=match): st.error("Failed to verify the configured snapshot interval") result += 1 sf_tg_traffic_start_stop(sf_data.unicast, True) st.log( "waiting for two snapshot interval times to get the counter values reflect correctly" ) st.wait(2 * sf_data.snapshot_interval) st.banner('#### buffer_Pool_for_user_watermark####') st.banner('TC name :::: ft_sf_buffer_pool_using_uwm ::::') match = {'pool': 'ingress_lossless_pool'} value = {'bytes': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_watermark', verify_list=match, key=value): st.error( "Failed to verify the buffer pool counters for user watermark") result += 1 st.report_tc_fail("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "successful") st.banner('TC name :::: ft_sf_buffer_pool_using_persistent_wm ::::') st.banner('#### buffer_pool_for_persistent_watermark using percentage####') match = {'pool': 'egress_lossless_pool'} value = {'percent': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value, percent=sf_data.percentage[0]): st.error( "Failed to verify the buffer pool counters for persistent watermark" ) result += 1 per_result += 1 st.banner('#### buffer_pool_for_persistent_watermark using CLI####') match = {'pool': 'egress_lossless_pool'} value = {'bytes': sf_data.initial_counter_value} if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value): st.error( "Failed to verify the buffer pool counters for persistent watermark" ) result += 1 per_result += 1 if per_result: st.report_tc_fail("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_for_persistent_watermark", "successful") st.banner('#### buffer_pool_using_counter_DB ####') st.banner('TC name :::: ft_sf_buffer_pool_using_counter_DB ::::') match = [{ 'SAI_BUFFER_POOL_STAT_WATERMARK_BYTES': sf_data.initial_counter_value }] if sfapi.verify(vars.D1, 'buffer_pool_counters_DB', oid_type='ingress_lossless_pool', verify_list=match): st.error( "Failed to verify the ingress lossless buffer pool counter using counter DB value" ) result += 1 st.report_tc_fail("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", "successful") sf_tg_traffic_start_stop(sf_data.unicast, False) st.log( "waiting for two snapshot interval times to get the counter values reflect correctly" ) st.wait(2 * sf_data.snapshot_interval) st.banner('#### clear_buffer_Pool_for_user_watermark####') st.banner('TC name :::: ft_sf_buffer_pool_clear_using_uwm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_buffer-pool watermark"): st.error("Failed to clear buffer-pool watermark") st.log( "After clear buffer_pool checking the stats with 10 cells tolerance") counters = sfapi.get(vars.D1, 'buffer_pool_watermark', get_value='bytes', match={'pool': 'ingress_lossless_pool'}) if counters > sf_data.buffer_pool_tolerance: st.error("Failed to clear the buffer pool counters for user watermark") result += 1 st.report_tc_fail("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "successful") st.banner('#### clear_buffer_pool_for_persistent_watermark ####') st.banner('TC name :::: ft_sf_buffer_pool_clear_using_persistent_wm ::::') if not sfapi.config_snapshot_interval( vars.D1, snap="clear_buffer-pool persistent-watermark"): st.error("Failed to clear_buffer-pool persistent-watermark") st.log( "After clear buffer_pool checking the stats with 10 cells tolerance") counters = sfapi.get(vars.D1, 'buffer_pool_watermark', get_value='bytes', match={'pool': 'egress_lossless_pool'}) if counters > sf_data.buffer_pool_tolerance: st.error( "Failed to clear the buffer pool counters for persistent watermark" ) result += 1 st.report_tc_fail("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_clear_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", "buffer_pool_clear_for_persistent_watermark", "successful") if not result: st.report_pass("snapshot_tc_verify", "buffer pool", "successful") else: sf_collecting_debug_logs_when_test_fails() st.report_fail("snapshot_tc_verify", "buffer pool", "failed")
def test_ft_bgp6_rr_traffic_check(self): TG_D1 = topo.tg_dut_list_name[0] TG_D2 = topo.tg_dut_list_name[1] tg_ob = topo['T1{}P1_tg_obj'.format(TG_D1)] bgp_handle = topo['T1{}P1_ipv6_tg_bh'.format(TG_D1)] tc_fail_flag = 0 spine_as = int(bgplib.data['spine_as']) st.log("Advertising Routes from one of the Leaf Router") bgp_route = tg_ob.tg_emulation_bgp_route_config( handle=bgp_handle['handle'], mode='add', ip_version='6', num_routes='100', prefix='1001::1', as_path='as_seq:1') tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start') # Sleep for some time and the check the route count in neighbour st.wait(10) bgp_summary = bgpapi.show_bgp_ipv6_summary(topo.dut_list[1]) rib_entries = bgp_summary[0]['ribentries'] st.log('RIB Entries : {}'.format(rib_entries)) # when route-reflector is not configured at server(spine), we should not learn anything at # route-reflector-client (leaf node), ideally, route count should be 0. if int(rib_entries) > 10: st.error( 'iBGP Routes are advertised to iBGP peer DUT, even when route-reflector-client is not configured' ) tc_fail_flag = 1 # now configure route-reflector-client at spine node result = bgpapi.create_bgp_route_reflector_client( topo.dut_list[0], spine_as, 'ipv6', 'spine_leaf6', 'yes') if not result: st.log( "Configuring client reflection on {} {} bgp {} Failed".format( topo.dut_list[0], 'ipv6', spine_as)) tc_fail_flag = 1 bgpapi.create_bgp_next_hop_self(topo.dut_list[0], spine_as, 'ipv6', 'spine_leaf6', 'yes', 'yes', cli_type=bgp_cli_type) st.wait(15) bgp_summary = bgpapi.show_bgp_ipv6_summary(topo.dut_list[1]) rib_entries = bgp_summary[0]['ribentries'] st.log('RIB Entries : {}'.format(rib_entries)) if int(rib_entries) < 100: st.error( 'iBGP Routes are not advertised to route-reflector-client') tc_fail_flag = 1 st.log( "Initiating the Ipv6 traffic for those Routes from another Leaf Router" ) tr1 = tg_ob.tg_traffic_config( port_handle=topo['T1{}P1_ipv6_tg_ph'.format(TG_D2)], emulation_src_handle=topo['T1{}P1_ipv6_tg_ih'.format( TG_D2)]['handle'], emulation_dst_handle=bgp_route['handle'], circuit_endpoint_type='ipv6', mode='create', transmit_mode='single_burst', pkts_per_burst='2000', length_mode='fixed', rate_pps=1000) stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) st.wait(20) tg1_stats = tgapi.get_traffic_stats( tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)]) tg2_stats = tgapi.get_traffic_stats( tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): st.error('Received ZERO stats.') tc_fail_flag = 1 else: percent_rx = float( int(tg2_stats.tx.total_packets) - int(tg1_stats.rx.total_packets)) / int( tg2_stats.tx.total_packets) * 100 st.log('tg1_stats.rx.total_packets : {}'.format( tg1_stats.rx.total_packets)) st.log('tg2_stats.tx.total_packets : {}'.format( tg2_stats.tx.total_packets)) st.log('percent_rx : {}'.format(percent_rx)) if percent_rx > 0.5: tc_fail_flag = 1 tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop') if tc_fail_flag: st.report_fail("traffic_verification_failed") st.report_pass('test_case_passed')
def test_ft_ssh_add_user_verify(): """ Author : Prudvi Mangadu ([email protected]) """ user_ssh = 0 acl_sshv4 = 0 acl_sshv6 = 0 acl_snmp = 0 if not snmp_config(config='add'): acl_snmp = +1 ipaddress = st.get_mgmt_ip(vars.D1) if not ipaddress: st.report_env_fail("ip_verification_fail") snmp_cmd = "snmpget -Oqv -v 2c -c {} {} {}".format(ssh_data.ro_community, ipaddress, ssh_data.oid_sysName) out = config_nondefault_user() if not out: user_ssh = +1 st.log("connecting to device with username={},password={}".format( ssh_data.usr_default, ssh_data.pwd_final)) if not st.exec_ssh(vars.D1, ssh_data.usr_default, ssh_data.pwd_final, ssh_data.commands_to_verify): st.error('Cannot SSH into Device with default credentials') user_ssh = +1 st.log('connecting to device with username={},password={}'.format( ssh_data.usr_non_default, ssh_data.pwd_non_default)) if not st.exec_ssh(vars.D1, ssh_data.usr_non_default, ssh_data.pwd_non_default, ssh_data.commands_to_verify): st.error('Cannot SSH into Device with non-default credentials') user_ssh = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P2, ssh_data.usr_default, ssh_data.pwd_final) if not output: user_ssh = +1 IPAddr = ensure_service_params(vars.D1, "snmptrap", "ip") + "/32" change_acl_rules(acl_data.acl_json_config_control_plane, "SNMP_ACL|RULE_1", "SRC_IP", IPAddr) change_acl_rules(acl_data.acl_json_config_control_plane, "SSH_ONLY|RULE_1", "SRC_IP", IPAddr) change_acl_rules(acl_data.acl_json_config_control_plane, "SSH_ONLY|RULE_2", "SRC_IP", ssh_data.ipv4_network) change_acl_rules(acl_data.acl_json_config_control_plane, "V6_SSH_ONLY|RULE_1", "SRC_IPV6", ssh_data.ipv6_network_D1) acl_config = acl_data.acl_json_config_control_plane st.log("ACL_DATA: {}".format(acl_config)) apply_acl_config(vars.D1, acl_config) acl_obj.show_acl_table(vars.D1) acl_obj.show_acl_rule(vars.D1) if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "SNMP_ACL", "RULE_1"): st.error("Failed to create ACL rule '{}' ".format("SNMP_ACL")) acl_snmp = +1 if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "SSH_ONLY", "RULE_1"): st.error("Failed to create ACL rule '{}' ".format("SSH_ONLY")) acl_sshv4 = +1 if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "V6_SSH_ONLY", "RULE_1"): st.error("Failed to create ACL rule '{}' ".format("V6_SSH_ONLY")) acl_sshv6 = +1 hostname = basic_obj.get_hostname(vars.D1) st.log("HOSTNAME: {}".format(hostname)) snmp_out = execute_command(ssh_conn_obj, snmp_cmd) if hostname not in snmp_out: acl_snmp = +1 st.log("connecting to device with default username={},password={}".format( ssh_data.usr_default, ssh_data.pwd_final)) output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P2, ssh_data.usr_default, ssh_data.pwd_final) if output: acl_sshv4 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv6_address_D1D2P2, ssh_data.usr_default, ssh_data.pwd_final) if output: acl_sshv6 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P1, ssh_data.usr_default, ssh_data.pwd_final) if not output: acl_sshv4 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv6_address_D1D2P1, ssh_data.usr_default, ssh_data.pwd_final) if not output: acl_sshv6 = +1 st.log( "connecting to device with non default username={},password={}".format( ssh_data.usr_non_default, ssh_data.pwd_non_default)) output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P1, ssh_data.usr_non_default, ssh_data.pwd_non_default) if not output: acl_sshv4 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv6_address_D1D2P1, ssh_data.usr_non_default, ssh_data.pwd_non_default) if not output: acl_sshv6 = +1 config_save(vars.D1) st.log('rebooting the device.') st.reboot(vars.D1, 'fast') acl_obj.show_acl_table(vars.D1) acl_obj.show_acl_rule(vars.D1) if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "SSH_ONLY", "RULE_1"): st.log("Failed to create ACL rule '{}' ".format("SSH_ONLY")) acl_sshv4 = +1 if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "V6_SSH_ONLY", "RULE_1"): st.log("Failed to create ACL rule '{}' ".format("V6_SSH_ONLY")) acl_sshv4 = +1 if not poll_wait(acl_obj.verify_acl_table_rule, 5, vars.D1, "SNMP_ACL", "RULE_1"): st.error("Failed to create ACL rule '{}' ".format("SNMP_ACL")) acl_snmp = +1 hostname = basic_obj.get_hostname(vars.D1) snmp_out = execute_command(ssh_conn_obj, snmp_cmd) if hostname not in snmp_out: acl_snmp = +1 ''' change_acl_rules(acl_data.acl_json_config_control_plane, "SNMP_ACL|RULE_1", "SRC_IP", "2.2.2.2/24") acl_config = acl_data.acl_json_config_control_plane apply_acl_config(vars.D1, acl_config) acl_obj.show_acl_rule(vars.D1) snmp_out = execute_command(ssh_conn_obj, snmp_cmd) if "Timeout" not in snmp_out: acl_snmp = + 1 ''' st.log("connecting to device with default username={},password={}".format( ssh_data.usr_default, ssh_data.pwd_final)) output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P2, ssh_data.usr_default, ssh_data.pwd_final) if output: acl_sshv4 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv6_address_D1D2P2, ssh_data.usr_default, ssh_data.pwd_final) if output: acl_sshv6 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv4_address_D1D2P1, ssh_data.usr_default, ssh_data.pwd_final) if not output: acl_sshv4 = +1 output = verify_ssh_connection(vars.D2, ssh_data.ipv6_address_D1D2P1, ssh_data.usr_default, ssh_data.pwd_final) if not output: acl_sshv6 = +1 if acl_sshv4: st.report_tc_fail("test_ft_controlplane_acl_service_sshv4", "ssh_failed", "with control plane ACL service SSHv4 after reboot") else: st.report_tc_pass("test_ft_controlplane_acl_service_sshv4", "ssh_failed", "with control plane ACL service SSHv4 after reboot") if acl_sshv6: st.report_tc_fail("test_ft_controlplane_acl_service_sshv6", "ssh_failed", "with control plane ACL service SSHv6 after reboot") else: st.report_tc_pass("test_ft_controlplane_acl_service_sshv6", "ssh_failed", "with control plane ACL service SSHv6 after reboot") if acl_snmp: st.report_tc_fail("test_ft_controlplane_acl_service_snmp", "snmp_output_failed", "with control plane ACL service SNMP after reboot") else: st.report_tc_pass("test_ft_controlplane_acl_service_snmp", "snmp_output_failed", "with control plane ACL service SNMP after reboot") acl_obj.delete_acl_table(vars.D1) if acl_sshv4 or acl_sshv6 or acl_snmp: st.generate_tech_support(vars.D1, "controlplane_acl_services_after_reboot") st.log("connecting to device with username={},password={}".format( ssh_data.usr_default, ssh_data.pwd_final)) if not st.exec_ssh(vars.D1, ssh_data.usr_default, ssh_data.pwd_final, ssh_data.commands_to_verify): st.error( 'Cannot SSH into Device with default credentials after reboot') user_ssh = +1 st.log('connecting to device with username={},password={}'.format( ssh_data.usr_non_default, ssh_data.pwd_non_default)) if not st.exec_ssh(vars.D1, ssh_data.usr_non_default, ssh_data.pwd_non_default, ssh_data.commands_to_verify): st.error( 'Cannot SSH into Device with non-default credentials after reboot') user_ssh = +1 config_nondefault_user(config='remove') if user_ssh: st.report_fail("ssh_failed") st.report_pass("test_case_passed")
def test_ft_copp_sflow(): """ scenario : Verify CoPP functionality for sflow Author : [email protected] :return: """ success = True copp_queue = retrun_group_dict(copp_data, 'sflow')['queue'] string_copp = 'copp-scheduler-policy@' + copp_queue if "COPP_TABLE:trap.group.sflow" in copp_data.keys(): copp_obj.set_copp_config(vars.D1, ["COPP_TABLE:trap.group.sflow", "cbs", "600"], ["COPP_TABLE:trap.group.sflow", "cir", "600"]) else: copp_obj.set_copp_config( vars.D1, ["COPP_TABLE:copp-system-sflow", "cbs", "600"], ["COPP_TABLE:copp-system-sflow", "cir", "600"]) if string_copp in copp_data_pir['SCHEDULER'].keys(): if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '600': copp_obj.set_copp_pir_config(vars.D1, 'apply', [string_copp, "pir", "600"]) st.log("performing reboot") st.reboot(vars.D1) try: enable_disable_config(vars.D1, interface=False, interface_name=None, action="enable", cli_type="klish") add_del_collector(vars.D1, collector_name="collector_1", ip_address="1.1.1.1", port_number=None, action="add", cli_type="klish") except Exception as e: st.log(e) st.report_fail("exception_observed", e) copp_obj.get_copp_config(dut=vars.D1, table_name='all') copp_cir_sflow = hw_constants['COPP_CIR_SFLOW'] sent_rate_pps = "921828" deviation = copp_cir_sflow * deviation_percentage st.log("testcase to verify COPP for sflow") tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1]) st.log( 'sending packets for {}pps and expecting rate limit to {}pps '.format( sent_rate_pps, copp_cir_sflow)) tg_stream_handle = tg.tg_traffic_config(mac_src='00.00.00.00.00.01', mac_dst='00.00.00.00.00.02', rate_pps=sent_rate_pps, mode='create', \ port_handle=tg_ph_1, transmit_mode='continuous', l2_encap='ethernet_ii_vlan', vlan_id='10')['stream_id'] tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle]) st.wait(5) if not verify_counter_cpu_asic_bcm( dut=vars.D1, queue=copp_queue, value=copp_cir_sflow, tol=deviation): st.error('CPU counter check for rate limiting igmp to {}pps is failed'. format(copp_cir_sflow)) success = False tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle]) if success: sflow_copp_config_undo() sflow_unconfig() st.report_pass("sflow_rate_limit_status", copp_cir_sflow, "passed") else: sflow_copp_config_undo() sflow_unconfig() st.report_fail("sflow_rate_limit_status", copp_cir_sflow, "failed")
def verify_pddf_running(): log("Checking PDDF is running on DUT") if not bsapi.is_service_active(vars.D1, service="pddf"): st.report_fail("pddf_service_is_not_running")
def test_ft_copp_udp(): """ scenario : Verify CoPP functionality for UDP # IP helper case: RtIpHeAdFn011 Author : :return: """ copp_queue = '2' string_copp = 'copp-scheduler-policy@' + copp_queue if string_copp in copp_data_pir['SCHEDULER'].keys(): if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '5000': copp_obj.set_copp_pir_config(vars.D1, 'apply', [string_copp, "pir", "5000"]) st.log("performing reboot") st.reboot(vars.D1) success = True config_rate_limit_value = 5000 st.log("test case to verify COPP for UDP broadcast packets") st.log("On DUT enable IP helper globally") ip_helper_obj.config(vars.D1, helper_status='enable') st.log("Configuring rate limit value {} for UDP broadcast packets.".format( config_rate_limit_value)) ip_helper_obj.config(vars.D1, rate_limit_val=config_rate_limit_value) if not ip_helper_obj.verify(vars.D1, forward_protocol='', verify_list=[{ 'forwarding': 'Enabled', 'enable_ports': [ 'TFTP', 'NTP', 'DNS', 'TACACS', 'NetBios-Name-Server', 'NetBios-Datagram-Server', ], 'rate_limit': str(config_rate_limit_value) }]): st.report_fail("UDP_forwarding_status_verification_failed") st.log("Configure IP helper address {} on interface {}".format( "2.2.2.2", vars.D1T1P1)) # noinspection PyInterpreter ip_helper_obj.config(vars.D1, action_str='add', intf_name=vars.D1T1P1, ip_address="2.2.2.2") copp_cir_udp = config_rate_limit_value sent_rate_pps = copp_cir_udp * 2 deviation = copp_cir_udp * deviation_percentage tg.tg_traffic_control(action='reset', port_handle=[tg_ph_1]) st.log('sending UDP packets for {}pps and expecting rate limit to {}pps '. format(sent_rate_pps, copp_cir_udp)) tg_stream_handle = tg.tg_traffic_config( port_handle=[tg_ph_1], mac_src="00:00:00:11:22:33", mac_dst="FF:FF:FF:FF:FF:FF", mode='create', transmit_mode='continuous', data_pattern= '00 03 01 00 00 01 00 00 00 00 00 00 06 67 6f 6f 67 6c 65 03 63 6f 6d 00 00 01 00 01', rate_pps=sent_rate_pps, l3_protocol='ipv4', ip_protocol=17, ip_src_addr='1.1.1.1', l4_protocol='udp', ip_dst_addr='255.255.255.255', udp_dst_port='53', udp_src_port=54821)['stream_id'] st.log("send UDP broadcast packets and verify CPU counter") tg.tg_traffic_control(action='run', stream_handle=[tg_stream_handle]) st.wait(5) if not verify_counter_cpu_asic_bcm( dut=vars.D1, queue=copp_queue, value=copp_cir_udp, tol=deviation): st.error('CPU counter check for rate limiting udp to {} pps is failed'. format(copp_cir_udp)) success = False tg.tg_traffic_control(action='stop', stream_handle=[tg_stream_handle]) ip_helper_obj.config(vars.D1, rate_limit_val=600) ip_helper_obj.config(vars.D1, action_str='remove', intf_name=vars.D1T1P1, ip_address="2.2.2.2") st.log("On DUT Disable IP helper globally") ip_helper_obj.config(vars.D1, cli_type='click', helper_status='disable') if string_copp in copp_data_pir['SCHEDULER'].keys(): if copp_data_pir['SCHEDULER'][string_copp]['pir'] != '5000': copp_obj.set_copp_pir_config(vars.D1, 'apply', [ string_copp, "pir", copp_data_pir['SCHEDULER'][string_copp]['pir'] ]) st.log("performing reboot") st.reboot(vars.D1) msg_str = "UDP broadcast rate limit" if success: st.report_pass("IP_helper_test_case_msg_status", msg_str, "passed") else: st.report_fail("IP_helper_test_case_msg_status", msg_str, "failed")
def test_vrrp2vrf_func_004(prologue_epilogue): tc_list = ["FtOpSoRoVrrpvrfFn013", "FtOpSoRoVrrpvrfFn014"] tc_result = True err_list = [] ########################################################### hdrMsg( "Step T1 : Verify VRRP Master/Backup election for {} configured sessions" .format(vrrp_sessions)) ############################################################ result = verify_vrrp(summary='no') if result is False: err = "VRRP Master/Backup election is incorrect for one or more VRRP sessions" st.report_fail('test_case_failure_message', err) ########################################################### hdrMsg( "Step T2 : Start Traffic for all configured VRRP sessions and verify Trafficgets forwarded by VRRP Masters" ) ############################################################ run_traffic() result = verify_tg_traffic_rate() if result is False: err = "Testcase {} Traffic not forwarded for all VRRP Masters".format( tc_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T3 : Shutdown all member ports of LAG {} on dut1 connected to dut3(switch) and " "verify dut2 becomes Master for all sessions".format(lag_id_list[0])) ############################################################ port_api.shutdown(data.dut1, data.d1d3_ports) for vrid, vlan, vmac, vip in zip(vrid_list, vrrp_vlan_intf, vmac_list_1, vip_list): result = verify_master_backup(vrid, vlan, vmac, vip, master_dut=data.dut2, backup_dut=data.dut1, skip_backup_check='yes') if result is False: err = "Testcase {} After port shutdown on dut1, dut2 didnot become Master for all VRRP sessions" failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T4 : Verify Traffic gets load balanced via new Master for all vrrp sessions" ) ############################################################ result = verify_tg_traffic_rate() if result is False: err = "Testcase {} Traffic not forwarded by all VRRP Masters".format( tc_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T5 : No-Shutdown all member ports of LAG {} on dut1 connected to dut3(switch) and verify dut1" " becomes Master again for sessions {}".format(lag_id_list[0], vrid_list[0])) ############################################################ port_api.noshutdown(data.dut1, data.d1d3_ports) for vrid, vlan, vmac, vip in zip(vrid_list, vrrp_vlan_intf, vmac_list_1, vip_list): result = verify_master_backup(vrid, vlan, vmac, vip, master_dut=data.dut1, backup_dut=data.dut2, skip_backup_check='yes') if result is False: err = "Testcase {} After port no-shutdown on dut1, dut1 didnot become Master again for VRRP sessions".format( vrid_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False run_traffic(action='stop') if tc_result is False: st.report_fail('test_case_failure_message', err_list[0]) else: st.report_pass('test_case_passed')
def cos_run_config(): for queue in range(0, 8): if not sconf_obj.verify_running_config(vars.D1, "TC_TO_QUEUE_MAP", data.cos_name, queue, queue): st.log("Queue {} mapping not found".format(queue)) st.report_fail("queue_map_not_found", queue)
def test_vrrp2vrf_func_001(prologue_epilogue): tc_list = [ "FtOpSoRoVrrpvrfFn001", "FtOpSoRoVrrpvrfFn002", "FtOpSoRoVrrpvrfFn003", "FtOpSoRoVrrpvrfFn004", "FtOpSoRoVrrpvrfFn005" ] tc_result = True err_list = [] ########################################################### hdrMsg( "Step T1 : Verify VRRP Master/Backup election for all {} configured sessions" .format(vrrp_sessions)) ############################################################ result = verify_vrrp() if result is False: err = "VRRP Master/Backup election is incorrect for one or more VRRP sessions" st.report_fail('test_case_failure_message', err) ########################################################### hdrMsg( "Step T2 : For VRIDs {} Verify Master dut {} sent out Gratuitous ARP by " "checking vmac {} in backup mac table".format(vrid_list[0], data.dut1, vmac_list[0])) ############################################################ result, err = check_mac(data.dut2, vrrp_vlans, vmac_list, [lag_intf_list[1]] * len(vrrp_vlans)) if result is False: failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg("Step T4 : Disable/Enable VRRP sessions {} on dut1(Master)".format( vrid_list[0])) ############################################################ for vrid, vlan, vip, prio, vmac in zip(vrid_list, vrrp_vlan_intf, vip_list, vrrp_priority_list_dut1, vmac_list_1): st.log(">>>> Disable/Enable VRRP session {} <<<<<".format(vrid)) vrrp.configure_vrrp(data.dut1, vrid=vrid, interface=vlan, config="no", disable='') vrrp.configure_vrrp(data.dut1, vrid=vrid, vip=vip, interface=vlan, priority=prio, config="yes", enable='') st.log( "\nVerify dut1 elected as VRRP Master for VRID {} \n".format(vrid)) result = verify_master_backup(vrid, vlan, vmac, vip, master_dut=data.dut1, backup_dut=data.dut2) if result is False: err = "Testcase {} dut1 not elected as VRRP Master for VRID {}".format( tc_list[0], vrid) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T5 : For VRIDs {} Verify Master dut {} sent out Gratuitous ARP after enabling vrrp by " "checking vmac {} in backup mac table".format(vrid_list[0], data.dut1, vmac_list[0])) ############################################################ result, err = check_mac(data.dut2, vrrp_vlans, vmac_list, [lag_intf_list[1]] * len(vrrp_vlans)) if result is False: failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg("Step T6 : Trigger ARP request for VIP {} and {} from TG".format( vip_list[0], vip_list[int(vrrp_sessions / 2)])) ############################################################ result1 = data.tg1.tg_arp_control( handle=data.host_handles['vrrp_host_{}'.format(vrid_list[0])], arp_target='all') result2 = data.tg1.tg_arp_control( handle=data.host_handles['vrrp_host_{}'.format(vrid_list[int( vrrp_sessions / 2)])], arp_target='all') if result1['status'] == '0' or result2['status'] == '0': err = "Testcase: {} ARP resolve failed in TGEN".format(tc_list[1]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T7 : Verify only Master replied for ARP request by checking VMAC on dut3 pointing to Master dut" ) ############################################################ st.log( "Verify Vmac {} learnt on Vlan {} pointing to Master(dut1) interface {}" .format(vmac_list[0], vlan_list[0], lag_intf_list[0])) result = check_mac(data.dut3, vlan_list, vmac_list, lag_intf_list) if result is False: err = "Testcase: {} On DUT3 Vmac {} not learnt on Vlan {} Interface {}".format( tc_list[1], vmac_list[0], vlan_list[0], lag_intf_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False st.log( "Verify Vmac {} not learnt on Vlan {} pointing to Backup(dut2) interface {}" .format(vmac_list[0], vlan_list[0], lag_intf_list[1])) result = check_mac(data.dut3, vlan_list, vmac_list, lag_intf_list[1]) if result is True: err = "Testcase: {} On DUT3 Vmac {} learnt on Vlan {} Interface {} pointing to backup".format( tc_list[3], vmac_list[0], vlan_list[0], lag_intf_list[0]) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T8: Ping to all Virtual IPs {} from backup dut2 and verify VIP is installed in " "routing table with /32 subnet mask only on master".format( vip_list[0])) ############################################################ for vip, vrid in zip(vip_list, vrid_list): result = ip_api.ping(data.dut2, vip, interface=vrrp_vrf, count=2) if result is False: err = "Testcase {} Ping to VIP {} failed from backup dut dut2".format( tc_list[2], vip) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False result = ip_api.verify_ip_route(data.dut1, ip_address="{}/32".format(vip), interface='vrrp.{}'.format(vrid), family='ipv4', vrf_name=vrrp_vrf) if result is False: err = "Testcase {} VIP {}/32 not installed in dut1(Master)routing table".format( tc_list[2], vip) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False result = ip_api.verify_ip_route(data.dut2, ip_address="{}/32".format(vip), interface='vrrp.{}'.format(vrid), family='ipv4', vrf_name=vrrp_vrf) if result is True: err = "Testcase {} VIP {}/32 should not be installed in dut2(Backup)routing table".format( tc_list[4], vip) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False ########################################################### hdrMsg( "Step T9:Verify Traffic gets forwarded for all VRRP sessions configured" ) ########################################################### run_traffic() result = verify_tg_traffic_rate() if result is False: err = "Testcase {} data traffic not forwarded for VRIDs {}".format( tc_list[0], vrid_list) failMsg(err) debug_vrrp() err_list.append(err) tc_result = False run_traffic(action='stop') if tc_result is False: st.report_fail('test_case_failure_message', err_list[0]) else: st.report_pass('test_case_passed')
def crm_config_verify(): st.log("Verifying CRM ACL table config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.acl_table_family, thresholdtype=data.threshold_free_type, highthreshold=data.mode_high_free, lowthreshold=data.mode_low_free): st.report_fail("threshold_config_fail") st.log("Verifying CRM IPv4 route family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv4_route_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log("Verifying CRM IPv6 route family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv6_route_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log("Verifying CRM FDB config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.fdb_family, thresholdtype=data.threshold_used_type, highthreshold=data.mode_high_used, lowthreshold=data.mode_low_used): st.report_fail("threshold_config_fail") st.log( "Verifying CRM IPv4 neighbor route family config after save and reload" ) if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv4_neighbor_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log( "Verifying CRM IPv6 neighbor route family config after save and reload" ) if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv6_neighbor_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log("Verifying CRM ACL group entry family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.acl_group_entry_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log("Verifying CRM IPv6 nexthop family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv6_nexthop_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log("Verifying CRM IPv4 nexthop family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.ipv4_nexthop_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail") st.log( "Verifying CRM ACL group counter family config after save and reload") if not crm_obj.verify_crm_thresholds( vars.D1, family=data.acl_group_counter_family, thresholdtype=data.threshold_percentage_type, highthreshold=data.mode_high_percentage, lowthreshold=data.mode_low_percentage): st.report_fail("threshold_config_fail")
def test_convergence_l3_scale(evpn_underlay_hooks): func_result = True err_list = [] data['table'] = list() tgen_emulate_bgp() data.config_tgen_bgp = True data.tc_list = ['scale'] for tc in data.tc_list: tc_result = True ############################################ st.banner('Testcase - {}'.format(tc)) ############################################ data['table_{}'.format(tc)] = list() for trigger in trigger_list: tech_support = True ########################################################## st.banner('Testcase -{} : Trigger - {}'.format(tc, trigger)) ########################################################### data[trigger] = {} data['table_{}'.format(trigger)] = [tc, trigger] if 'uplink' in trigger: st.log( "\n\n>>> Keep only one uplink port between Leaf and Spine nodes <<<<\n\n" ) st.exec_all([[ port_api.shutdown, evpn_dict['leaf_node_list'][0], [ evpn_dict["leaf1"]["intf_list_spine"][0], evpn_dict["leaf1"]["intf_list_spine"][3], evpn_dict["leaf1"]["intf_list_spine"][4], evpn_dict["leaf1"]["intf_list_spine"][7] ] ], [ port_api.shutdown, evpn_dict['leaf_node_list'][1], [ evpn_dict["leaf2"]["intf_list_spine"][0], evpn_dict["leaf2"]["intf_list_spine"][3], evpn_dict["leaf2"]["intf_list_spine"][4], evpn_dict["leaf2"]["intf_list_spine"][7] ] ]]) for iter in range(data.iteration_count): ################################################## st.banner('Testcase -{} : Trigger - {},Iteration -{}'.format( tc, trigger, (iter + 1))) ################################################### convergence_time = convergence_measure(tc, trigger=trigger, streams=stream_dict[tc], iteration=(iter + 1)) if type(convergence_time ) is bool and convergence_time is False: data[trigger]['convergence_{}'.format(iter)] = None else: data[trigger]['convergence_{}'.format(iter)] = float( convergence_time) if data[trigger]['convergence_{}'.format( iter)] > data.threshold or data[trigger][ 'convergence_{}'.format(iter)] is None: err = "Average Traffic convergence after {} : {} sec".format( trigger, data[trigger]['convergence_{}'.format(iter)]) st.error(err) st.report_tc_fail(tc, 'test_case_failure_message', err) if tech_support: st.generate_tech_support(dut=None, name='{}_{}_{}'.format( tc, trigger, iter)) tech_support = False tc_result = False err_list.append(err) func_result = False revert_trigger_change(trigger, iteration=(iter + 1)) table_append = data[trigger]['convergence_{}'.format(iter)] data['table_{}'.format(trigger)].append(table_append) get_average_convergence(data[trigger], trigger) if 'uplink' in trigger: st.log( ">>> \n\nBring back all uplink ports between Leaf and SPine nodes <<<<\n\n" ) st.exec_all([[ port_api.noshutdown, evpn_dict['leaf_node_list'][0], [ evpn_dict["leaf1"]["intf_list_spine"][0], evpn_dict["leaf1"]["intf_list_spine"][3], evpn_dict["leaf1"]["intf_list_spine"][4], evpn_dict["leaf1"]["intf_list_spine"][7] ] ], [ port_api.noshutdown, evpn_dict['leaf_node_list'][1], [ evpn_dict["leaf2"]["intf_list_spine"][0], evpn_dict["leaf2"]["intf_list_spine"][3], evpn_dict["leaf2"]["intf_list_spine"][4], evpn_dict["leaf2"]["intf_list_spine"][7] ] ]]) if 'link_down_uplink' not in trigger: st.log("verify BGP EVPN neighborship for all nodes ") st.exec_all([[leaf1_verify_evpn], [leaf2_verify_evpn]]) if tc_result: st.report_tc_pass(tc, 'test_case_passed') data['table_{}'.format(tc)].append( data['table_{}'.format(trigger)]) #Append each testcase along with all trigger result to data.table data['table'].append(data['table_{}'.format(tc)]) #Tabulate results tabulate_results(data['table']) if not func_result: st.report_fail('test_case_failure_message', err_list[0]) st.report_pass('test_case_passed')
def test_ft_sf_all_buffer_stats_using_unicast_traffic(): """ Author : prudviraj k ([email protected]) and phani kumar ravula([email protected]) """ result = 0 per_result = 0 clr_result = 0 if not sfapi.config_snapshot_interval( vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): st.error("Failed to configure snapshot interval") result += 1 match = [{'snapshotinterval': sf_data.snapshot_interval}] if not sfapi.verify(vars.D1, 'snapshot_interval', verify_list=match): st.error("Failed to verify the configured snapshot interval") result += 1 st.log("configuring the QOS maps") if not cos_api.config_dot1p_to_tc_map(vars.D1, sf_data.obj_name[0], sf_data.dot1p_to_tc_map_dict): st.error("Failed to configure qos map of type dot1p to tc") if not cos_api.config_tc_to_pg_map(vars.D1, sf_data.obj_name[1], sf_data.tc_to_pg_map_dict): st.error("Failed to configure qos map of type tc to pg") if not cos_api.verify_qos_map_table( vars.D1, 'dot1p_to_tc_map', sf_data.obj_name[0], { '0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7' }): st.error("Failed to verify configured dot1p to tc map values") result += 1 if not cos_api.verify_qos_map_table( vars.D1, 'tc_to_pg_map', sf_data.obj_name[1], { '0': '7', '1': '7', '2': '7', '3': '7', '4': '7', '5': '7', '6': '7', '7': '7' }): st.error("Failed to verify configured tc to pg map values") result += 1 if not cos_api.config_port_qos_map_all(vars.D1, sf_data.dot1p_tc_bind_map): st.error( "Failed to bind the configured qos map of type dot1p to tc on interface" ) if not cos_api.config_port_qos_map_all(vars.D1, sf_data.tc_pg_bind_map): st.error( "Failed to bind the configured qos map of type tc to pg on interface" ) sf_tg_traffic_start_stop(sf_data.unicast, True) st.wait(2 * sf_data.snapshot_interval) st.banner('#### PG_shared_for_user_watermark####') st.banner('TC name :::: ft_sf_pg_shared_using_uwm ::::') match = [{'pg7': sf_data.initial_counter_value}] if sfapi.verify(vars.D1, 'user_watermark_PG_shared', verify_list=match, port_alias=vars.D1T1P1): st.error("Failed to verify the user_watermark_PG_shared counter value") result += 1 st.report_tc_fail("ft_sf_pg_shared_using_uwm", "snapshot_tc_verify", "PG_shared_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_pg_shared_using_uwm", "snapshot_tc_verify", "PG_shared_for_user_watermark", "successful") st.banner('####verification_of_PG_shared_using_counter_DB####') st.banner('TC name :::: ft_sf_pg_shared_using_Counter_DB ::::') match = [{ 'SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value }] if sfapi.verify(vars.D1, column_name="COUNTERS_PG_NAME_MAP", interface_name=vars.D1T1P1, queue_value=7, table_name="COUNTERS", verify_list=match): st.error( "Failed to verify the user_watermark_PG_shared counter DB value") result += 1 st.report_tc_fail("ft_sf_pg_shared_using_Counter_DB", "snapshot_tc_counter_DB_verify", "PG_shared", "failed") else: st.report_tc_pass("ft_sf_pg_shared_using_Counter_DB", "snapshot_tc_counter_DB_verify", "PG_shared", "successful") st.banner('TC name:::: ft_sf_queue_unicast_using_uwm ::::') match = [{'uc0': sf_data.initial_counter_value}] st.banner( '#### queue_unicast_for_user_watermark using percentage values####') if sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4, percentage=sf_data.percentage[0]): st.error( "Failed to verify the queue_user_watermark_unicast counter value using percentage" ) result += 1 per_result += 1 st.banner('#### queue_unicast_for_user_watermark using CLI####') if sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): st.error( "Failed to verify the queue_user_watermark_unicast counter value") result += 1 per_result += 1 if per_result: st.report_tc_fail("ft_sf_queue_unicast_using_uwm", "snapshot_tc_verify", "queue_unicast_for_user_watermark", "failed") else: st.report_tc_pass("ft_sf_queue_unicast_using_uwm", "snapshot_tc_verify", "queue_unicast_for_user_watermark", "successful") st.banner( '####verification_of_queue_unicast_for_user_watermark_using_counter_DB####' ) st.banner('TC name:::: ft_sf_queue_unicast_using_Counter_DB ::::') match = [{ 'SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value }] if sfapi.verify(vars.D1, column_name="COUNTERS_QUEUE_NAME_MAP", interface_name=vars.D1T1P4, queue_value=0, table_name="COUNTERS", verify_list=match): st.error( "Failed to verify the queue_user_watermark_unicast counter DB value" ) result += 1 st.report_tc_fail("ft_sf_queue_unicast_using_Counter_DB", "snapshot_tc_counter_DB_verify", "queue_unicast", "failed") else: st.report_tc_pass("ft_sf_queue_unicast_using_Counter_DB", "snapshot_tc_counter_DB_verify", "queue_unicast", "successful") st.banner('#### PG_shared_for_persistent_watermark####') st.banner('TC name :::: ft_sf_pg_shared_using_persistent_wm ::::') match = [{'pg7': sf_data.initial_counter_value}] if sfapi.verify(vars.D1, 'persistent_PG_shared', verify_list=match, port_alias=vars.D1T1P2): st.error( "Failed to verify the persistent_watermark_PG_shared counter value" ) result += 1 st.report_tc_fail("ft_sf_pg_shared_using_persistent_wm", "snapshot_tc_verify", "PG_shared_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_pg_shared_using_persistent_wm", "snapshot_tc_verify", "PG_shared_for_persistent_watermark", "successful") st.banner('#### queue_unicast_for_persistent_watermark ####') st.banner('TC name :::: ft_sf_queue_unicast_using_persistent_wm ::::') match = [{'uc0': sf_data.initial_counter_value}] if sfapi.verify(vars.D1, 'queue_persistent_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): st.error( "Failed to verify the queue_persistent_watermark_unicast counter value" ) result += 1 st.report_tc_fail("ft_sf_queue_unicast_using_persistent_wm", "snapshot_tc_verify", "queue_unicast_for_persistent_watermark", "failed") else: st.report_tc_pass("ft_sf_queue_unicast_using_persistent_wm", "snapshot_tc_verify", "queue_unicast_for_persistent_watermark", "successful") sf_tg_traffic_start_stop(sf_data.unicast, False) st.wait(2 * sf_data.snapshot_interval) st.banner('#### clear_PG_shared_for_user_watermark####') st.banner('TC name :::: ft_sf_pg_shared_clear_using_uwm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[0], table=sf_data.table[0], counter_type=sf_data.PG[0]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.group[0])) result += 1 match = [{'pg0': sf_data.initial_counter_value}] if not sfapi.verify(vars.D1, 'user_watermark_PG_shared', verify_list=match, port_alias=vars.D1T1P1): st.error("Failed to clear the snapshot counters") result += 1 st.report_tc_fail( "ft_sf_pg_shared_clear_using_uwm", "snapshot_clear_verify", "clearing the PG shared counters for user_watermark", "failed") else: st.report_tc_pass( "ft_sf_pg_shared_clear_using_uwm", "snapshot_clear_verify", "clearing the PG shared counters for user watermark", "successful") st.banner('TC name :::: ft_sf_queue_unicast_clear_using_uwm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], table=sf_data.table[0], counter_type=sf_data.PG[2]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.group[0])) result += 1 match = [{'uc0': sf_data.initial_counter_value}] st.banner( '#### clear_queue_unicast_percentage_Values_for_user_watermark ####') if not sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4, percentage=sf_data.percentage[1]): st.error("Failed to clear percentage snapshot counters") result += 1 clr_result += 1 st.banner('#### clear_queue_unicast_for_user_watermark using CLI####') if not sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): st.error("Failed to clear the snapshot counters") result += 1 clr_result += 1 if clr_result: st.report_tc_fail( "ft_sf_queue_unicast_clear_using_uwm", "snapshot_clear_verify", "clearing the unicast queue counters for user watermark", "failed") else: st.report_tc_pass( "ft_sf_queue_unicast_clear_using_uwm", "snapshot_clear_verify", "clearing the unicast queue counters for user watermark", "successful") st.banner('#### clear_PG_shared_for_persistent_watermark ####') st.banner('TC name :::: ft_sf_pg_shared_clear_using_persistent_wm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[0], table=sf_data.table[1], counter_type=sf_data.PG[0]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.group[0])) result += 1 match = [{'pg0': sf_data.initial_counter_value}] if not sfapi.verify(vars.D1, 'persistent_PG_shared', verify_list=match, port_alias=vars.D1T1P2): st.error("Failed to clear the snapshot counters") result += 1 st.report_tc_fail( "ft_sf_pg_shared_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the PG shared counters for persistent watermark", "failed") else: st.report_tc_pass( "ft_sf_pg_shared_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the PG shared counters for persistent watermark", "successful") st.banner('#### clear_queue_unicast_for_persistent_watermark ####') st.banner( 'TC name :::: ft_sf_queue_unicast_clear_using_persistent_wm ::::') if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], table=sf_data.table[1], counter_type=sf_data.PG[2]): st.error( "Failed to execute the command clear {} snapshot counters".format( sf_data.group[0])) result += 1 match = [{'uc0': sf_data.initial_counter_value}] if not sfapi.verify(vars.D1, 'queue_persistent_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): st.error("Failed to clear the snapshot counters") result += 1 st.report_tc_fail( "ft_sf_queue_unicast_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the unicast queue counters for persistent watermark", "failed") else: st.report_tc_pass( "ft_sf_queue_unicast_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the unicast queue counters for persistent watermark", "successful") clear_qos_map_config() if not result: st.report_pass("snapshot_all_buffer_counters", "unicast", "successful") else: sf_collecting_debug_logs_when_test_fails() st.report_fail("snapshot_all_buffer_counters", "unicast", "failed")
def no_test_convergence_orphan_traffic(evpn_underlay_hooks): #st.log("create static ARP in DUT4 for DUT3's orphan traffic") #Arp.add_static_arp(evpn_dict["mlag_node_list"][1], evpn_dict["leaf3"]["v4_prefix"][0], evpn_dict["orphan_mac"], # interface=evpn_dict["leaf1"]["iccpd_pch_intf_list"][0]) if data.config_tgen_bgp is False: tgen_emulate_bgp() data.config_tgen_bgp = True tech_support = True func_result = True err_list = [] data['table'] = list() trigger_list = ['shut_all_uplinks_active'] data.tc_list = ['orphan_traffic'] for tc in data.tc_list: tc_result = True ############################################ st.banner('Testcase - {}'.format(tc)) ############################################ data['table_{}'.format(tc)] = list() for trigger in trigger_list: ########################################################## st.banner('Testcase -{} : Trigger - {}'.format(tc, trigger)) ########################################################### data[trigger] = {} data['table_{}'.format(trigger)] = [tc, trigger] for iter in range(data.iteration_count): ################################################## st.banner('Testcase -{} : Trigger - {},Iteration -{}'.format( tc, trigger, (iter + 1))) ################################################### convergence_time = convergence_measure(tc, trigger=trigger, streams=stream_dict[tc], iteration=(iter + 1)) if type(convergence_time ) is bool and convergence_time is False: data[trigger]['convergence_{}'.format(iter)] = None else: data[trigger]['convergence_{}'.format(iter)] = float( convergence_time) if data[trigger]['convergence_{}'.format( iter)] > data.threshold or data[trigger][ 'convergence_{}'.format(iter)] is None: err = "Average Traffic convergence after {} : {} sec".format( trigger, data[trigger]['convergence_{}'.format(iter)]) st.error(err) st.report_tc_fail(tc, 'test_case_failure_message', err) if tech_support: st.generate_tech_support( dut=None, name='test_convergence_on_fail') tech_support = False tc_result = False err_list.append(err) func_result = False table_append = data[trigger]['convergence_{}'.format(iter)] data['table_{}'.format(trigger)].append(table_append) get_average_convergence(data[trigger], trigger) if tc_result: st.report_tc_pass(tc, 'test_case_passed') data['table_{}'.format(tc)].append( data['table_{}'.format(trigger)]) #Append each testcase along with all trigger result to data.table data['table'].append(data['table_{}'.format(tc)]) #Tabulate results tabulate_results(data['table']) if not func_result: st.report_fail('test_case_failure_message', err_list[0]) st.report_pass('test_case_passed')
def ensure_device_ipaddress(): data.ip_address_list = basic_obj.get_ifconfig_inet(vars.D1, 'eth0') if not data.ip_address_list: st.report_fail("DUT_does_not_have_IP_address") data.ip_address = data.ip_address_list[0]