def setUpClass(cls): super(PerfBase, cls).setUpClass() cls.nova_h = cls.connections.nova_h cls.orch = cls.connections.orch cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture cls.results = 'logs/results_%s'%random.randint(0,100000) cls.results_file = open(cls.results,"aw") cls.encap_type = ["MPLSoUDP","MPLSoGRE"] cls.spirent_linux_user = cls.inputs.spirent_linux_username cls.spirent_linux_passwd = cls.inputs.spirent_linux_password cls.ixia_linux_user = cls.inputs.ixia_linux_username cls.ixia_linux_passwd = cls.inputs.ixia_linux_password cls.ixia_linux_host = cls.inputs.ixia_linux_host_ip cls.ixia_host = cls.inputs.ixia_host_ip cls.spirent_linux_host = cls.inputs.spirent_linux_host_ip cls.mx1_ip = cls.inputs.ixia_mx_ip cls.mx2_ip = cls.inputs.spirent_mx_ip cls.mx_user = cls.inputs.ixia_mx_username cls.mx_password = cls.inputs.ixia_mx_password cls.rt = {'ixia': ['2000','3000'],'spirent':['1','2'] } cls.family = '' cls.dpdk_svc_scaling = False cls.nova_flavor= { '2':'contrail_perf_2cpu','3':'contrail_perf_3cpu','4':'contrail_perf_4cpu', '8':'contrail_perf_8cpu'} cls.vrouter_mask_list = ['0xff','0x3f','0xff','0xf000f'] cls.mx1_handle = NetconfConnection(host = cls.mx1_ip,username=cls.mx_user,password=cls.mx_password) cls.mx1_handle.connect() cls.mx2_handle = NetconfConnection(host = cls.mx2_ip,username=cls.mx_user,password=cls.mx_password) cls.mx2_handle.connect() cls.host_cpu = {} cls.host = None cls.cpu_intr_mask = '80' cls.perf_si_fixtures = [] cls.update_hosts() cls.create_availability_zone() cls.nova_flavor_key_delete()
def remove_mx_group_config(cls): if cls.inputs.ext_routers: router_params = cls.inputs.physical_routers_data.values()[0] cmd = [] cmd.append('delete groups md5_tests') cmd.append('delete apply-groups md5_tests') mx_handle = NetconfConnection(host=router_params['mgmt_ip']) mx_handle.connect() cli_output = mx_handle.config(stmts=cmd, timeout=120)
def remove_mx_group_config(cls): if cls.inputs.ext_routers: router_params = cls.inputs.physical_routers_data.values()[0] cmd = [] cmd.append('delete groups md5_tests') cmd.append('delete apply-groups md5_tests') mx_handle = NetconfConnection(host = router_params['mgmt_ip']) mx_handle.connect() cli_output = mx_handle.config(stmts = cmd, timeout = 120)
def setUpClass(cls): ''' It will set up a topology where agent is connected to only one of the control node This way we make sure that the route is learned from a different agent via bgp ''' super(TestLlgrBase, cls).setUpClass() cls.cn_introspect = ControlNodeInspect(cls.inputs.bgp_ips[0]) cls.host_list = cls.connections.orch.get_hosts() if len(cls.host_list) > 1 and len(cls.inputs.bgp_ips) > 1: cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[ cls.host_list[0]]['host_control_ip'], ctrl_node=cls.inputs.bgp_ips[0], mode='disable') cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[ cls.host_list[1]]['host_control_ip'], ctrl_node=cls.inputs.bgp_ips[1], mode='disable') if cls.inputs.ext_routers: cls.mx1_ip = cls.inputs.ext_routers[0][1] # TODO remove the hard coding once we get this parameters populated from testbed cls.mx_user = '******' cls.mx_password = '******' cls.mx1_handle = NetconfConnection(host=cls.mx1_ip, username=cls.mx_user, password=cls.mx_password) cls.mx1_handle.connect() time.sleep(20)
def setUpClass(cls): super(PerfBaseIxia, cls).setUpClass() cls.nova_h = cls.connections.nova_h cls.orch = cls.connections.orch cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture cls.encap_type = ["MPLSoUDP", "MPLSoGRE"] cls.spirent_linux_user = '******' cls.spirent_linux_passwd = 'n1keenA' cls.family = '' cls.ixia_linux_host = '10.87.132.179' cls.ixia_host = '10.87.132.18' cls.spirent_linux_host = '10.87.132.185' cls.set_cpu_cores = 4 cls.set_si = 1 cls.dpdk_svc_scaling = False cls.nova_flavor = { '2': 'perf_flavor_2', '3': 'perf_flavor_3', '4': 'perf_flavor_4', '8': 'perf_flavor_8' } #cls.image_flavor= { '1':'perf-ubuntu-1404', '2':'ubuntu-perf-multique-2', '3':'dpdk-l2-no-delay','4':'ubuntu-perf-multique-4','5':'perf-ubuntu-netronome'} cls.image_flavor = { '1': 'perf-ubuntu-1404', '2': 'ubuntu-perf-multique-2', '3': 'dpdk_l2fwd_sleep3', '4': 'ubuntu-perf-multique-4', '5': 'dpdk-l2-no-delay-new', '6': 'tiny_in-net', '7': 'dpdk-l3fwd-mq-2', '8': 'perf-ubuntu-1404-v6-2', '8': 'perf-ubuntu-netronome', '9': 'DPDK-l2fwd-virtio-new' } cls.vrouter_mask_list = ['0xf', '0x3f', '0xff', '0xf000f'] cls.mx1_ip = '10.87.64.246' cls.mx2_ip = '10.87.140.181' cls.mx_user = '******' cls.mx_password = '******' cls.mx1_handle = NetconfConnection(host=cls.mx1_ip, username=cls.mx_user, password=cls.mx_password) cls.mx1_handle.connect() cls.mx2_handle = NetconfConnection(host=cls.mx2_ip, username=cls.mx_user, password=cls.mx_password) cls.mx2_handle.connect()
def cleanup_mx(self, mx_ip, cmds): mx_handle = NetconfConnection(host=mx_ip) mx_handle.connect() cli_output = mx_handle.config(stmts=cmds, timeout=120) mx_handle.disconnect() assert (not ('failed' in cli_output)), "Not able to push config to mx"
def remove_routes_mx_side(self, logicalsystem): ''' Description: Route entries from routing table are removed by deactivating the logical system and is activated back. ''' deactivate_cmd = 'deactivate logical-systems ' + logicalsystem activate_cmd = 'activate logical-systems ' + logicalsystem cmds = [[deactivate_cmd], [activate_cmd]] mx_params = list(self.inputs.physical_routers_data.values())[0] nhLS_netconf = NetconfConnection(mx_params['mgmt_ip']) nhLS_netconf.connect() for i in range(len(cmds)): nhLS_netconf.config(stmts=cmds[i], timeout=120) nhLS_netconf.disconnect()
def enable_snooping(self): tors_info_list = self.get_available_devices('tor') tor_params = tors_info_list[0] mgmt_ip=tor_params['mgmt_ip'] cmd = [] cmd.append('activate protocols igmp-snooping') cmd.append('activate groups __contrail_overlay_evpn_ucast_gateway__ protocols igmp-snooping') mx_handle = NetconfConnection(host = mgmt_ip) mx_handle.connect() time.sleep(30) cli_output = mx_handle.config(stmts = cmd, timeout = 120) time.sleep(30) mx_handle.disconnect()
def provision_mx(self, device_ip, cli_cmds, cleanup_cmds, ignore_errors=False): mx_handle = NetconfConnection(host=device_ip) mx_handle.connect() cli_output = mx_handle.config(stmts=cli_cmds, ignore_errors=ignore_errors, timeout=120) mx_handle.disconnect() assert cli_output[0], "Not able to push config to mx" self.addCleanup(self.cleanup_mx, device_ip, cleanup_cmds)
def cleanup_data_sw(self, ip): ''' Cleanup configs done on data s/w. ''' handle = NetconfConnection(host = ip,username='******',password='******') handle.connect() cmd = [] for bond_interface in self.inputs.data_sw_compute_bond_interface: cmd.append('set interfaces '+bond_interface+' disable') for bond_interface in self.inputs.data_sw_compute_bond_interface: cmd.append('delete interfaces '+bond_interface+' disable') cli_output = handle.config(stmts = cmd, timeout = 120) assert (not('failed' in cli_output)), "Not able to push config." handle.disconnect()
def config_basic(self, check_dm): #mx config using device manager #both dm_mx and use_device_manager knobs are required for DM #this check is present in is_test_applicable if check_dm: if self.inputs.use_devicemanager_for_md5: for i in range(len(self.inputs.dm_mx.values())): router_params = self.inputs.dm_mx.values()[i] if router_params['model'] == 'mx': self.phy_router_fixture = self.useFixture( PhysicalRouterFixture( router_params['name'], router_params['control_ip'], model=router_params['model'], vendor=router_params['vendor'], asn=router_params['asn'], ssh_username=router_params['ssh_username'], ssh_password=router_params['ssh_password'], mgmt_ip=router_params['control_ip'], connections=self.connections, dm_managed=True)) physical_dev = self.vnc_lib.physical_router_read( id=self.phy_router_fixture.phy_device.uuid) physical_dev.set_physical_router_management_ip( router_params['mgmt_ip']) physical_dev._pending_field_updates self.vnc_lib.physical_router_update(physical_dev) else: if self.inputs.ext_routers: for i in range(len( self.inputs.physical_routers_data.values())): router_params = self.inputs.physical_routers_data.values( )[i] if router_params['model'] == 'mx': cmd = [] cmd.append( 'set groups md5_tests routing-options router-id %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests routing-options route-distinguisher-id %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests routing-options autonomous-system %s' % router_params['asn']) cmd.append( 'set groups md5_tests protocols bgp group md5_tests type internal' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests multihop' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests local-address %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests protocols bgp group md5_tests hold-time 90' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests keep all' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family evpn signaling' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family route-target' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests local-as %s' % router_params['asn']) for node in self.inputs.bgp_control_ips: cmd.append( 'set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s' % (node, router_params['asn'])) cmd.append('set apply-groups md5_tests') mx_handle = NetconfConnection( host=router_params['mgmt_ip']) mx_handle.connect() cli_output = mx_handle.config(stmts=cmd, timeout=120) #ipv6 not supported for vcenter so skipping config if self.inputs.orchestrator != 'vcenter': vn61_name = "test_vnv6sr" vn61_net = ['2001::101:0/120'] #vn1_fixture = self.config_vn(vn1_name, vn1_net) vn61_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn61_name, inputs=self.inputs, subnets=vn61_net)) vn62_name = "test_vnv6dn" vn62_net = ['2001::201:0/120'] #vn2_fixture = self.config_vn(vn2_name, vn2_net) vn62_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn62_name, inputs=self.inputs, subnets=vn62_net)) vm61_name = 'source_vm' vm62_name = 'dest_vm' #vm1_fixture = self.config_vm(vn1_fixture, vm1_name) #vm2_fixture = self.config_vm(vn2_fixture, vm2_name) vm61_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn61_fixture.obj, vm_name=vm61_name, node_name=None, image_name='cirros', flavor='m1.tiny')) vm62_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn62_fixture.obj, vm_name=vm62_name, node_name=None, image_name='cirros', flavor='m1.tiny')) vm61_fixture.wait_till_vm_is_up() vm62_fixture.wait_till_vm_is_up() rule = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn61_name, 'src_ports': [0, -1], 'dest_network': vn62_name, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] policy_name = 'allow_all' policy_fixture = self.config_policy(policy_name, rule) vn61_policy_fix = self.attach_policy_to_vn(policy_fixture, vn61_fixture) vn62_policy_fix = self.attach_policy_to_vn(policy_fixture, vn62_fixture) vn1 = "vn1" vn2 = "vn2" vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn1, 'src_ports': [0, -1], 'dest_network': vn2, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] self.logger.info("Configure the policy with allow any") self.multi_vn_fixture = self.useFixture( MultipleVNFixture(connections=self.connections, inputs=self.inputs, subnet_count=2, vn_name_net=vn_s, project_name=self.inputs.project_name)) vns = self.multi_vn_fixture.get_all_fixture_obj() (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] self.config_policy_and_attach_to_vn(rules) self.multi_vm_fixture = self.useFixture( MultipleVMFixture(project_name=self.inputs.project_name, connections=self.connections, vm_count_per_vn=1, vn_objs=vns, image_name='cirros', flavor='m1.tiny')) vms = self.multi_vm_fixture.get_all_fixture() (self.vm1_name, self.vm1_fix) = vms[0] (self.vm2_name, self.vm2_fix) = vms[1]
def config_basic(self, is_mx_present): #mx config using device manager if is_mx_present: if self.inputs.ext_routers: if self.inputs.use_devicemanager_for_md5: for i in range(len(self.inputs.physical_routers_data.values())): router_params = self.inputs.physical_routers_data.values()[i] if router_params['model'] == 'mx': self.phy_router_fixture = self.useFixture(PhysicalRouterFixture( router_params['name'], router_params['mgmt_ip'], model=router_params['model'], vendor=router_params['vendor'], asn=router_params['asn'], ssh_username=router_params['ssh_username'], ssh_password=router_params['ssh_password'], mgmt_ip=router_params['mgmt_ip'], connections=self.connections)) else: if self.inputs.ext_routers: for i in range(len(self.inputs.physical_routers_data.values())): router_params = self.inputs.physical_routers_data.values()[i] if router_params['model'] == 'mx': cmd = [] cmd.append('set groups md5_tests routing-options router-id %s' % router_params['mgmt_ip']) cmd.append('set groups md5_tests routing-options route-distinguisher-id %s' % router_params['mgmt_ip']) cmd.append('set groups md5_tests routing-options autonomous-system %s' % router_params['asn']) cmd.append('set groups md5_tests protocols bgp group md5_tests type internal') cmd.append('set groups md5_tests protocols bgp group md5_tests multihop') cmd.append('set groups md5_tests protocols bgp group md5_tests local-address %s' % router_params['mgmt_ip']) cmd.append('set groups md5_tests protocols bgp group md5_tests hold-time 90') cmd.append('set groups md5_tests protocols bgp group md5_tests keep all') cmd.append('set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast') cmd.append('set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast') cmd.append('set groups md5_tests protocols bgp group md5_tests family evpn signaling') cmd.append('set groups md5_tests protocols bgp group md5_tests family route-target') cmd.append('set groups md5_tests protocols bgp group md5_tests local-as %s' % router_params['asn']) for node in self.inputs.bgp_control_ips: cmd.append('set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s' % (node, router_params['asn'])) cmd.append('set apply-groups md5_tests') mx_handle = NetconfConnection(host = router_params['mgmt_ip']) mx_handle.connect() cli_output = mx_handle.config(stmts = cmd, timeout = 120) vn61_name = "test_vnv6sr" vn61_net = ['2001::101:0/120'] #vn1_fixture = self.config_vn(vn1_name, vn1_net) vn61_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name=vn61_name, inputs=self.inputs, subnets=vn61_net)) vn62_name = "test_vnv6dn" vn62_net = ['2001::201:0/120'] #vn2_fixture = self.config_vn(vn2_name, vn2_net) vn62_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name=vn62_name, inputs=self.inputs, subnets=vn62_net)) vm61_name = 'source_vm' vm62_name = 'dest_vm' #vm1_fixture = self.config_vm(vn1_fixture, vm1_name) #vm2_fixture = self.config_vm(vn2_fixture, vm2_name) vm61_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn61_fixture.obj, vm_name=vm61_name, node_name=None, image_name='cirros', flavor='m1.tiny')) vm62_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn62_fixture.obj, vm_name=vm62_name, node_name=None, image_name='cirros', flavor='m1.tiny')) vm61_fixture.wait_till_vm_is_up() vm62_fixture.wait_till_vm_is_up() rule = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn61_name, 'src_ports': [0, -1], 'dest_network': vn62_name, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] policy_name = 'allow_all' policy_fixture = self.config_policy(policy_name, rule) vn61_policy_fix = self.attach_policy_to_vn( policy_fixture, vn61_fixture) vn62_policy_fix = self.attach_policy_to_vn( policy_fixture, vn62_fixture) vn1 = "vn1" vn2 = "vn2" vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn1, 'src_ports': [0, -1], 'dest_network': vn2, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] self.logger.info("Configure the policy with allow any") self.multi_vn_fixture = self.useFixture(MultipleVNFixture( connections=self.connections, inputs=self.inputs, subnet_count=2, vn_name_net=vn_s, project_name=self.inputs.project_name)) vns = self.multi_vn_fixture.get_all_fixture_obj() (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] self.config_policy_and_attach_to_vn(rules) self.multi_vm_fixture = self.useFixture(MultipleVMFixture( project_name=self.inputs.project_name, connections=self.connections, vm_count_per_vn=1, vn_objs=vns, image_name='cirros', flavor='m1.tiny')) vms = self.multi_vm_fixture.get_all_fixture() (self.vm1_name, self.vm1_fix) = vms[0] (self.vm2_name, self.vm2_fix) = vms[1]
'set protocols bgp group %s family inet unicast' % device_dict['group_name'], 'set protocols bgp group %s family inet6 unicast' % device_dict['group_name'], 'set protocols bgp group %s family inet-vpn unicast' % device_dict['group_name'], 'set protocols bgp group %s local-as %s' % (device_dict['group_name'], device_dict['asn']), 'set protocols bgp group %s neighbor %s' % (device_dict['group_name'], cfgm_ips[0]), 'set protocols bgp group %s neighbor %s' % (device_dict['group_name'], cfgm_ips[1]), 'set protocols bgp group %s neighbor %s' % (device_dict['group_name'], cfgm_ips[2]) ] router_netconf = NetconfConnection(device_dict['mgmt_ip']) router_netconf.connect() router_netconf.config(stmts) router_netconf.disconnect() if device_dict.get('role') in ['leaf', 'spine', 'pnf']: continue phy_router_obj = PhysicalRouterFixture( device_dict['name'], device_dict['mgmt_ip'], asn=device_dict['asn'], model=device_dict.get('model', 'mx'), vendor=device_dict.get('vendor', 'juniper'), ssh_username=device_dict.get('ssh_username'), ssh_password=device_dict.get('ssh_password'), tunnel_ip=device_dict.get('tunnel_ip'),
def test_dpdkbond_status_flap(self): ''' Ensure that bond member status is displayed properly for dpdk compute node after bringing down bond interface. 1. Bring down bond member by disabling ae link in data switch. 2. Validate agent introspect / vif ouptus shows proper status. 3. Ensure that alarms are raised on bringing down interface. ''' mgmt_ip= self.inputs.data_sw_ip bond_interface_list = self.inputs.data_sw_compute_bond_interface if (mgmt_ip is None) or (bond_interface_list is None): raise self.skipTest("Skipping Test. Need management switch IP and bond interface details.") handle = NetconfConnection(host = mgmt_ip,username='******',password='******') handle.connect() time.sleep(10) self.logger.info('Bring down data switch bond interface connected to compute.') self.addCleanup(self.cleanup_data_sw, mgmt_ip) cmd = [] for bond_interface in self.inputs.data_sw_compute_bond_interface: cmd.append('set interfaces '+bond_interface+' disable') cli_output = handle.config(stmts = cmd, timeout = 120) time.sleep(20) dpdk_compute = self.inputs.dpdk_ips[0] self.logger.info('Validate bond/slave interface status.') self.logger.info('Ensure slave members are present in vif --list output.') assert self.agent_inspect[dpdk_compute].validate_bondVifListStatus(bondStatus="DOWN",slaveStatus="DOWN") self.logger.info('Ensure slave members status is present in agent introspect.') assert self.agent_inspect[dpdk_compute].validate_bondStatus(bondStatus="Inactive",slaveStatus="DOWN") self.logger.info('Ensure alarms are present since interface is down.') multi_instances = False if len(self.inputs.collector_ips) > 1: multi_instances = True verify_alarm_cleared = False assert self.analytics_obj._verify_contrail_alarms(None, 'vrouter', 'vrouter_interface', multi_instances=multi_instances, verify_alarm_cleared=verify_alarm_cleared) self.logger.info('Bring up data switch bond interface connected to compute.') cmd = [] for bond_interface in self.inputs.data_sw_compute_bond_interface: cmd.append('delete interfaces '+bond_interface+' disable') cli_output = handle.config(stmts = cmd, timeout = 120) time.sleep(30) self.logger.info('Ensure slave members are present in vif --list output.') assert self.agent_inspect[dpdk_compute].validate_bondVifListStatus(bondStatus="UP",slaveStatus="UP") self.logger.info('Ensure slave members status is present in agent introspect.') assert self.agent_inspect[dpdk_compute].validate_bondStatus(bondStatus="Active",slaveStatus="UP") self.logger.info('Ensure no alarms are present since all bond members are up.') verify_alarm_cleared = True assert self.analytics_obj._verify_contrail_alarms(None, 'vrouter', 'vrouter_interface', multi_instances=multi_instances, verify_alarm_cleared=verify_alarm_cleared) return True
def test_logical_router_static_route_update_in_lr_type5_vrf_on_qfx(self): ''' 1) Configure Encapsulation order as VxLAN, MPLSoverGRE, MPLSoverUDP 2) Enable VxLAN Routing under that project settings 3) Add vn1 and vn2. 4) Launch a vm and bms in each of vn1 and vn2 5) Create a logical router and attach it to vn1, vn2 and extend it to crb-gateway 6) ping from vm to vm and bms to bms from vn1 to vn2 7) Add static route in lr vrf on crb-gateway verify it gets copied to vn1 and vn2 and lr route table 8) Verify vm to vm and bms to bms traffic 9) Verify l2 traffic in vn1 10) Verify default rt is not added in vn1 and vn2, and subnet rt is added ''' if (len(self.get_bms_nodes()) < 2): raise self.skipTest( "Skipping Test. At least 2 bms is required to run the test") vn1 = self.create_vn() vn2 = self.create_vn() lr1 = self.create_logical_router([vn1, vn2], vni=70001) vm11 = self.create_vm(vn_fixture=vn1) vm21 = self.create_vm(vn_fixture=vn2) bms1 = self.create_bms(bms_name=self.get_bms_nodes()[0], tor_port_vlan_tag=10, vn_fixture=vn1) bms2 = self.create_bms(bms_name=self.get_bms_nodes()[1], tor_port_vlan_tag=20, vn_fixture=vn2) lr1.verify_on_setup() self.check_vms_booted([vm11, vm21]) self.logger.info( "Verify Traffic between VN-1 and VN-2 on Logical Router: lr1") self.verify_traffic(vm11, vm21, 'udp', sport=10000, dport=20000) self.do_ping_mesh([vm11, vm21, bms1, bms2]) vn_l2_vm1_name = 'VM1' vn_l2_vm2_name = 'VM2' vn_l2_vm1_fixture = self.create_vm(vn_fixture=vn1, image_name='ubuntu') vn_l2_vm2_fixture = self.create_vm(vn_fixture=vn1, image_name='ubuntu') self.check_vms_booted([vn_l2_vm1_fixture, vn_l2_vm2_fixture]) #l2 traffic verification in vn1 self.mac1 = vn_l2_vm1_fixture.mac_addr[vn1.vn_fq_name] self.mac2 = vn_l2_vm2_fixture.mac_addr[vn1.vn_fq_name] filters = 'ether src %s' % (self.mac1) tap_intf = vn_l2_vm2_fixture.tap_intf[vn1.vn_fq_name]['name'] session, pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters, interface=tap_intf) self.sleep(20) self.send_l2_traffic(vn_l2_vm1_fixture, iface='eth0') result = verify_tcpdump_count(self, session, pcap, exp_count=10, mac=self.mac2) assert result, "L2 traffic verification failed" # Verify default rt is not present in vn1 vm1_node_ip = vm11.vm_node_ip vm1_vrf_id = vm11.get_vrf_id(vn1.vn_fq_name, vn1.vrf_name) inspect_h = self.agent_inspect[vm1_node_ip] route = inspect_h.get_vna_route(vrf_id=vm1_vrf_id, ip="0.0.0.0", prefix="0") assert (not route), "Route is present in bridge vn vn1 inet table." # Verify subnet rt for vn2 in vn1 rt table inspect_h = self.agent_inspect[vm1_node_ip] route = inspect_h.get_vna_route( vrf_id=vm1_vrf_id, ip=vn2.get_cidrs()[0].split('/')[0], prefix=vn2.get_cidrs()[0].split('/')[1]) assert route, "Subnet route for vn2 is not present in vn1 inet table." # Verify default rt is not present in vn2 vm2_node_ip = vm21.vm_node_ip vm2_vrf_id = vm21.get_vrf_id(vn1.vn_fq_name, vn1.vrf_name) inspect_h = self.agent_inspect[vm2_node_ip] route = inspect_h.get_vna_route(vrf_id=vm2_vrf_id, ip="0.0.0.0", prefix="0") assert (not route), "Route is present in bridge vn vn2 inet table." # Verify subnet rt for vn1 in vn2 rt table inspect_h = self.agent_inspect[vm2_node_ip] route = inspect_h.get_vna_route( vrf_id=vm2_vrf_id, ip=vn1.get_cidrs()[0].split('/')[0], prefix=vn1.get_cidrs()[0].split('/')[1]) assert route, "Subnet route for vn1 is not present in vn2 inet table." self.logger.info("Add static route on spine with mgmt ip %s " % self.spines[0].mgmt_ip) rt_cmd = "set groups __contrail_overlay_evpn_type5__ routing-instances __contrail_%s_%s routing-options static route 8.8.8.0/24 next-table inet.0" % ( lr1.name, lr1.uuid) # Add static route on spine device in type5 lr vrf and verify rt in lr, vn1 and vn2 rt table cmd = [] cmd.append(rt_cmd) device_handle = NetconfConnection(host=self.spines[0].mgmt_ip, username=self.spines[0].ssh_username, password=self.spines[0].ssh_password) device_handle.connect() cli_output = device_handle.config(stmts=cmd, timeout=120) device_handle.disconnect() # Verify route gets updated in bridge network vn1 conneted to lr1 inspect_h = self.agent_inspect[vm1_node_ip] route = inspect_h.get_vna_route(vrf_id=vm1_vrf_id, ip="8.8.8.0", prefix="24") assert route, "Route is not present in bridge vn inet table." # Verify route gets updated in internal vn for lr1 on compute node of vm1 inspect_h = self.agent_inspect[vm1_node_ip] vm1_lr_vrf_id = inspect_h.get_vna_vrf_id(":".join( lr1.get_internal_vn().fq_name)) route = inspect_h.get_vna_route(vrf_id=vm1_lr_vrf_id, ip="8.8.8.0", prefix="24") assert route, "Route is not present in agent inet table." # Verify route gets updated in bridge network vn2 conneted to lr1 inspect_h = self.agent_inspect[vm2_node_ip] route = inspect_h.get_vna_route(vrf_id=vm2_vrf_id, ip="8.8.8.0", prefix="24") assert route, "Route is not present in bridge vn inet table." # Verify route gets updated in internal vn for lr1 on compute node of vm2 inspect_h = self.agent_inspect[vm2_node_ip] vm2_lr_vrf_id = inspect_h.get_vna_vrf_id(":".join( lr1.get_internal_vn().fq_name)) route = inspect_h.get_vna_route(vrf_id=vm2_lr_vrf_id, ip="8.8.8.0", prefix="24") assert route, "Route is not present in agent inet table." # Verify traffic after static route add self.logger.info( "Verify Traffic between VN-1 and VN-2 on Logical Router: lr1") self.verify_traffic(vm11, vm21, 'udp', sport=10000, dport=20000) self.do_ping_mesh([vm11, vm21, bms1, bms2]) # Clean up static route on spine device in type5 lr vrf rt_del_cmd = "set groups __contrail_overlay_evpn_type5__ routing-instances __contrail_%s_%s routing-options static route 8.8.8.0/24 discard" % ( lr1.name, lr1.uuid) cleanup_cmd = [] cleanup_cmd.append(rt_cmd) device_handle = NetconfConnection(host=self.spines[0].mgmt_ip, username=self.spines[0].ssh_username, password=self.spines[0].ssh_password) device_handle.connect() cli_output = device_handle.config(stmts=cmd, timeout=120) device_handle.disconnect()
def config_basic(self, is_mx_present): #mx config using device manager if is_mx_present: if self.inputs.ext_routers: if self.inputs.use_devicemanager_for_md5: router_params = self.inputs.physical_routers_data.values( )[0] self.phy_router_fixture = self.useFixture( PhysicalRouterFixture( router_params['name'], router_params['mgmt_ip'], model=router_params['model'], vendor=router_params['vendor'], asn=router_params['asn'], ssh_username=router_params['ssh_username'], ssh_password=router_params['ssh_password'], mgmt_ip=router_params['mgmt_ip'], connections=self.connections)) else: if self.inputs.ext_routers: router_params = self.inputs.physical_routers_data.values()[0] cmd = [] cmd.append( 'set groups md5_tests routing-options router-id %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests routing-options route-distinguisher-id %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests routing-options autonomous-system %s' % router_params['asn']) cmd.append( 'set groups md5_tests protocols bgp group md5_tests type internal' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests multihop' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests local-address %s' % router_params['mgmt_ip']) cmd.append( 'set groups md5_tests protocols bgp group md5_tests hold-time 90' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests keep all' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family evpn signaling' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests family route-target' ) cmd.append( 'set groups md5_tests protocols bgp group md5_tests local-as %s' % router_params['asn']) for node in self.inputs.bgp_control_ips: cmd.append( 'set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s' % (node, router_params['asn'])) cmd.append('set apply-groups md5_tests') mx_handle = NetconfConnection(host=router_params['mgmt_ip']) mx_handle.connect() cli_output = mx_handle.config(stmts=cmd, timeout=120) vn61_name = "test_vnv6sr" vn61_net = ['2001::101:0/120'] #vn1_fixture = self.config_vn(vn1_name, vn1_net) vn61_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn61_name, inputs=self.inputs, subnets=vn61_net)) vn62_name = "test_vnv6dn" vn62_net = ['2001::201:0/120'] #vn2_fixture = self.config_vn(vn2_name, vn2_net) vn62_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn62_name, inputs=self.inputs, subnets=vn62_net)) vm61_name = 'source_vm' vm62_name = 'dest_vm' #vm1_fixture = self.config_vm(vn1_fixture, vm1_name) #vm2_fixture = self.config_vm(vn2_fixture, vm2_name) vm61_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn61_fixture.obj, vm_name=vm61_name, node_name=None, image_name='cirros-0.3.0-x86_64-uec', flavor='m1.tiny')) vm62_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn62_fixture.obj, vm_name=vm62_name, node_name=None, image_name='cirros-0.3.0-x86_64-uec', flavor='m1.tiny')) vm61_fixture.wait_till_vm_is_up() vm62_fixture.wait_till_vm_is_up() rule = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn61_name, 'src_ports': [0, -1], 'dest_network': vn62_name, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] policy_name = 'allow_all' policy_fixture = self.config_policy(policy_name, rule) vn61_policy_fix = self.attach_policy_to_vn(policy_fixture, vn61_fixture) vn62_policy_fix = self.attach_policy_to_vn(policy_fixture, vn62_fixture) vn1 = "vn1" vn2 = "vn2" vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn1, 'src_ports': [0, -1], 'dest_network': vn2, 'dst_ports': [0, -1], 'simple_action': 'pass', }, ] self.logger.info("Configure the policy with allow any") self.multi_vn_fixture = self.useFixture( MultipleVNFixture(connections=self.connections, inputs=self.inputs, subnet_count=2, vn_name_net=vn_s, project_name=self.inputs.project_name)) vns = self.multi_vn_fixture.get_all_fixture_obj() (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] self.config_policy_and_attach_to_vn(rules) self.multi_vm_fixture = self.useFixture( MultipleVMFixture(project_name=self.inputs.project_name, connections=self.connections, vm_count_per_vn=1, vn_objs=vns, image_name='cirros-0.3.0-x86_64-uec', flavor='m1.tiny')) vms = self.multi_vm_fixture.get_all_fixture() (self.vm1_name, self.vm1_fix) = vms[0] (self.vm2_name, self.vm2_fix) = vms[1]