def create_2_legs(self): vn1_name = get_random_name('bgpaas_vn') vn1_subnets = [get_random_cidr()] vn1_fixture = self.create_vn(vn1_name, vn1_subnets) test_vm = self.create_vm(vn1_fixture, 'test_vm', image_name='ubuntu-traffic') assert test_vm.wait_till_vm_is_up() vn2_name = get_random_name('bgpaas_vn') vn2_subnets = [get_random_cidr()] vn2_fixture = self.create_vn(vn2_name, vn2_subnets) bgpaas_vm1 = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_objs=[vn1_fixture.obj, vn2_fixture.obj], vm_name='bgpaas_vm1', node_name=None, image_name='vsrx')) assert bgpaas_vm1.wait_till_vm_is_up() ret_dict = { 'vn1_fixture': vn1_fixture, 'vn2_fixture': vn2_fixture, 'test_vm': test_vm, 'bgpaas_vm1': bgpaas_vm1, } return ret_dict
def create_2_legs(self): vn1_name = get_random_name('bgpaas_vn') vn1_subnets = [get_random_cidr()] vn1_fixture = self.create_vn(vn1_name, vn1_subnets) test_vm = self.create_vm(vn1_fixture, 'test_vm', image_name='ubuntu-traffic') assert test_vm.wait_till_vm_is_up() vn2_name = get_random_name('bgpaas_vn') vn2_subnets = [get_random_cidr()] vn2_fixture = self.create_vn(vn2_name, vn2_subnets) bgpaas_vm1 = self.useFixture( VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_objs=[ vn1_fixture.obj, vn2_fixture.obj], vm_name='bgpaas_vm1', node_name=None, image_name='vsrx')) assert bgpaas_vm1.wait_till_vm_is_up() ret_dict = { 'vn1_fixture': vn1_fixture, 'vn2_fixture': vn2_fixture, 'test_vm': test_vm, 'bgpaas_vm1': bgpaas_vm1, } return ret_dict
def setup_evpn_service_chain(self, left_vn, right_vn, **kwargs): left_lr_fixture = self.create_lr([left_vn]) right_lr_fixture = self.create_lr([right_vn]) left_internal_vn = left_lr_fixture.get_internal_vn() right_internal_vn = right_lr_fixture.get_internal_vn() left_lr_intvn_fixture = self.create_vn( left_lr_fixture.get_internal_vn_name(), uuid=left_internal_vn.uuid, clean_up=False) left_intvn_subnet_list = [get_random_cidr(), get_random_cidr(af='v6')] left_intvn_v4_subnets = {'cidr': left_intvn_subnet_list[0]} left_lr_intvn_fixture.create_subnet(left_intvn_v4_subnets) left_intvn_v6_subnets = {'cidr': left_intvn_subnet_list[1]} left_lr_intvn_fixture.create_subnet(left_intvn_v6_subnets) right_lr_intvn_fixture = self.create_vn( right_lr_fixture.get_internal_vn_name(), uuid=right_internal_vn.uuid, clean_up=False) right_intvn_subnet_list = [get_random_cidr(), get_random_cidr(af='v6')] right_intvn_v4_subnets = {'cidr': right_intvn_subnet_list[0]} right_intvn_v6_subnets = {'cidr': right_intvn_subnet_list[1]} right_lr_intvn_fixture.create_subnet(right_intvn_v4_subnets) right_lr_intvn_fixture.create_subnet(right_intvn_v6_subnets) return (left_lr_intvn_fixture, right_lr_intvn_fixture)
def verify_svc_transparent_datapath(self, si_count=1, svc_scaling=False, max_inst=1, flavor='contrail_flavor_2cpu', proto='any', src_ports=[0, -1], dst_ports=[0, -1], svc_img_name='vsrx-bridge'): """Validate the service chaining datapath""" self.vn1_name = get_random_name('bridge_vn1') self.vn1_subnets = [get_random_cidr()] self.vm1_name = get_random_name('bridge_vm1') self.vn2_name = get_random_name('bridge_vn2') self.vn2_subnets = [get_random_cidr()] self.vm2_name = get_random_name('bridge_vm2') self.action_list = [] self.if_list = [] self.st_name = get_random_name('service_template_1') si_prefix = get_random_name('bridge_si') + '_' self.policy_name = get_random_name('policy_transparent') self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) self.st_fixture, self.si_fixtures = self.config_st_si( self.st_name, si_prefix, si_count, svc_scaling, max_inst, flavor=flavor, project=self.inputs.project_name, svc_img_name=svc_img_name) self.action_list = self.chain_si( si_count, si_prefix, self.inputs.project_name) self.rules = [ { 'direction': '<>', 'protocol': proto, 'source_network': self.vn1_name, 'src_ports': src_ports, 'dest_network': self.vn2_name, 'dst_ports': dst_ports, 'simple_action': None, 'action_list': {'apply_service': self.action_list} }, ] self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) self.vm1_fixture = self.config_and_verify_vm( self.vn1_fixture, self.vm1_name) self.vm2_fixture = self.config_and_verify_vm( self.vn2_fixture, self.vm2_name) self.verify_si(self.si_fixtures) result, msg = self.validate_vn( self.vn1_name, project_name=self.inputs.project_name) assert result, msg result, msg = self.validate_vn( self.vn2_name, project_name=self.inputs.project_name) assert result, msg if proto not in ['any', 'icmp']: self.logger.info('Will skip Ping test') else: # Ping from left VM to right VM errmsg = "Ping to Right VM %s from Left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip, count='3'), errmsg return True
def test_with_multiple_subnets(self): ''' Create a VN with two /29 subnets Create 5 VMIs on the VN so that 1st subnet IPs are exhausted Add lifs with 6th and 7th VMIs Validate that the BMSs get IP from 2nd subnet and ping passes ''' bms_fixtures = list() vn_subnets = [ get_random_cidr('29'), get_random_cidr('29')] vn_fixture = self.create_vn(vn_subnets=vn_subnets, disable_dns=True) bms_data = self.inputs.bms_data.keys() bms1_fixture = self.create_bms(bms_name=bms_data[0], vn_fixture=vn_fixture, security_groups=[self.default_sg.uuid]) bms_fixtures.append(bms1_fixture) port_fixtures = [] vm1 = self.create_vm(vn_fixture=vn_fixture, image_name='cirros') for i in range(0, 5): port_fixtures.append(self.setup_vmi(vn_fixture.uuid)) bms2_fixture = self.create_bms(bms_name=bms_data[1], vn_fixture=vn_fixture, security_groups=[self.default_sg.uuid]) bms_fixtures.append(bms2_fixture) vm2 = self.create_vm(vn_fixture=vn_fixture, image_name='cirros') bms_ip = IPAddress(bms2_fixture.bms_ip) subnet_cidr = IPNetwork(vn_subnets[1]) assert bms_ip in subnet_cidr, ( 'BMS does not seem to have got IP from second subnet' 'BMS IP %s not in %s subnet' % (bms_ip, subnet_cidr)) self.do_ping_mesh(bms_fixtures+[vm1, vm2]) self.do_ping_test(bms1_fixture, bms2_fixture.bms_ip)
def test_with_multiple_subnets(self): ''' Create a VN with two /28 subnets Create 8 VMIs on the VN so that 1st subnet IPs are exhausted Add lifs with 6th and 7th VMIs Validate that the BMSs get IP from 2nd subnet and ping passes ''' vn_subnets = [get_random_cidr('28'), get_random_cidr('28')] vn = self.create_vn(vn_subnets=vn_subnets) self.create_logical_router([vn]) bms_data = self.get_bms_nodes() bms1_fixture = self.create_bms(bms_name=bms_data[0], tor_port_vlan_tag=10, vn_fixture=vn) vm1 = self.create_vm(vn_fixture=vn, image_name='cirros') for i in range(0, 4): port_fixture = self.setup_vmi(vn.uuid) if port_fixture.get_ip_addresses()[0] in IPNetwork(vn_subnets[1]): self.perform_cleanup(port_fixture) break bms2_fixture = self.create_bms(bms_name=bms_data[1], tor_port_vlan_tag=10, vn_fixture=vn) vm2 = self.create_vm(vn_fixture=vn, image_name='cirros') vm2.wait_till_vm_is_up() self.do_ping_mesh([bms1_fixture, bms2_fixture, vm1, vm2])
def config_svc_mirroring(self, service_mode='transparent', *args, **kwargs): """Validate the service chaining datapath Test steps: 1. Create the SI/ST in svc_mode specified. 2. Create vn11/vm1, vn21/vm2 3. Create the policy rule for ICMP/UDP and attach to vn's 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. Pass criteria : count = sent single node : Pkts mirrored to the analyzer should be equal to 'count' multinode :Pkts mirrored to the analyzer should be equal to '2xcount' """ ci = self.inputs.is_ci_setup() create_svms = kwargs.get('create_svms', True) vn1_subnets = [get_random_cidr(af=self.inputs.get_af())] vn2_subnets = [get_random_cidr(af=self.inputs.get_af())] vn1_name = get_random_name('left') vn2_name = get_random_name('right') st_name = get_random_name("st1") action_list = [] service_type = 'analyzer' si_prefix = get_random_name("mirror_si") policy_name = get_random_name("mirror_policy") vn1_fixture = self.config_vn(vn1_name, vn1_subnets) vn2_fixture = self.config_vn(vn2_name, vn2_subnets) ret_dict = self.verify_svc_chain(service_mode=service_mode, service_type=service_type, left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture, create_svms=create_svms, **kwargs) si_fixture = ret_dict['si_fixture'] policy_fixture = ret_dict['policy_fixture'] si_fq_name = si_fixture.fq_name_str rules = [{'direction': '<>', 'protocol': 'icmp', 'source_network': vn1_fixture.vn_fq_name, 'src_ports': [0, 65535], 'dest_network': vn2_fixture.vn_fq_name, 'dst_ports': [0, 65535], 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': si_fq_name}} }, {'direction': '<>', 'protocol': 'icmp6', 'source_network': vn1_fixture.vn_fq_name, 'src_ports': [0, 65535], 'dest_network': vn2_fixture.vn_fq_name, 'dst_ports': [0, 65535], 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': si_fq_name}} }] policy_fixture.update_policy_api(rules) ret_dict['policy_fixture'] = policy_fixture return ret_dict
def create_vm_in_all_nodes(self): vn_pop1_name = get_random_name('pop1_vn') vn_pop2_name = get_random_name('pop2_vn') vn_main_name = get_random_name('main_vn') vn_pop1_subnet = [get_random_cidr()] vn_pop2_subnet = [get_random_cidr()] vn_main_subnet = [get_random_cidr()] rt_value = randint(50000, 60000) vn_pop1_fixture = self.create_vn(vn_pop1_name, vn_pop1_subnet) vn_pop1_fixture.add_route_target(vn_pop1_fixture.ri_name, self.inputs.router_asn, rt_value) vn_pop2_fixture = self.create_vn(vn_pop2_name, vn_pop2_subnet) vn_pop2_fixture.add_route_target(vn_pop2_fixture.ri_name, self.inputs.router_asn, rt_value) vn_main_fixture = self.create_vn(vn_main_name, vn_main_subnet) vn_main_fixture.add_route_target(vn_main_fixture.ri_name, self.inputs.router_asn, rt_value) compute_nodes = self.get_compute_nodes() test_vm_pop1 = self.create_vm(vn_pop1_fixture, 'test_vm_pop1', image_name='cirros', node_name=compute_nodes['pop1'][0]) test_vm_pop2 = self.create_vm(vn_pop2_fixture, 'test_vm_pop2', image_name='cirros', node_name=compute_nodes['pop2'][0]) test_vm_main = self.create_vm(vn_main_fixture, 'test_vm_main', image_name='cirros', node_name=compute_nodes['main'][0]) assert test_vm_pop1.wait_till_vm_is_up() assert test_vm_pop2.wait_till_vm_is_up() assert test_vm_main.wait_till_vm_is_up() ret_dict = { 'vn_pop1_fixture': vn_pop1_fixture, 'vn_pop2_fixture': vn_pop2_fixture, 'vn_main_fixture': vn_main_fixture, 'test_vm_pop1': test_vm_pop1, 'test_vm_pop2': test_vm_pop2, 'test_vm_main': test_vm_main, } return ret_dict
def create_bgpaas_routes(self): ret_dict = {} vn_name = get_random_name('bgpaas_vn') vn_subnets = [get_random_cidr()] ret_dict['vn_fixture'] = self.create_vn(vn_name, vn_subnets) ret_dict['test_vm'] = self.create_vm(ret_dict['vn_fixture'], 'test_vm', image_name='ubuntu-traffic') assert ret_dict['test_vm'].wait_till_vm_is_up() bgpaas_vm1 = self.create_vm(ret_dict['vn_fixture'], 'bgpaas_vm1',image_name='ubuntu-bird') assert bgpaas_vm1.wait_till_vm_is_up() bgpaas_fixture = self.create_bgpaas(bgpaas_shared=True, autonomous_system=64500, bgpaas_ip_address=bgpaas_vm1.vm_ip) bgpaas_vm1.wait_for_ssh_on_vm() port1 = {} port1['id'] = bgpaas_vm1.vmi_ids[bgpaas_vm1.vn_fq_name] address_families = [] address_families = ['inet', 'inet6'] autonomous_system = 64500 gw_ip = ret_dict['vn_fixture'].get_subnets()[0]['gateway_ip'] dns_ip = ret_dict['vn_fixture'].get_subnets()[0]['dns_server_address'] neighbors = [] neighbors = [gw_ip, dns_ip] self.logger.info('Configuring BGP on the bird vm') static_routes = [] static_routes.append({"network":ret_dict['vn_fixture'].get_subnets()[0]['cidr'],"nexthop":"blackhole"}) self.config_bgp_on_bird( bgpaas_vm=bgpaas_vm1, local_ip=bgpaas_vm1.vm_ip, neighbors=neighbors, peer_as=self.inputs.bgp_asn, local_as=autonomous_system,static_routes=static_routes) self.attach_vmi_to_bgpaas(port1['id'], bgpaas_fixture) self.addCleanup(self.detach_vmi_from_bgpaas,port1['id'], bgpaas_fixture) return ret_dict
def test_disable_enable_policy_inter_node(self): """ Description: Verify disabling enabling policy for ECMP routes with static routes on VM Steps: 1. launch 1 VN and launch 3 VMs in it. 2. create a static route for a new subnet prefix and add this on 2 VMIs. this will create 2 ECMP routes. 3. Disable-enable the policy on all VMIs. 4. Now from 3rd VM send traffic to an IP from static route prefix, verify flow created 5. Now disable the policy again and verify no flows Pass criteria: 1. traffic should go through fine 2. flows should not be created 3. load should be distributed among ecmp routes. """ compute_hosts = self.orch.get_hosts() if len(compute_hosts) < 2: raise self.skipTest("Skipping test case,\ this test needs atleast 2 compute nodes") vn_fixtures = self.create_vns(count=1) self.verify_vns(vn_fixtures) vn1_fixture = vn_fixtures[0] prefix = get_random_cidr(af=self.inputs.get_af()) assert prefix, "Unable to get a random CIDR" #Launch sender on first node and ECMP dest VMs on another node image = 'ubuntu-traffic' vm1_fixture = self.create_vms(vn_fixture= vn1_fixture,count=1, node_name=compute_hosts[0], image_name=image) vm_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, node_name=compute_hosts[1], image_name=image) self.verify_vms(vm_fixtures) self.verify_vms(vm1_fixture) vm1_fixture = vm1_fixture[0] vm2_fixture = vm_fixtures[0] vm3_fixture = vm_fixtures[1] #Add static routes, which will create ECMP routes static_ip = self.add_static_routes_on_vms(prefix, [vm2_fixture, vm3_fixture]) #Disable the policy on all the VMIs self.disable_policy_for_vms([vm1_fixture]) self.disable_policy_for_vms(vm_fixtures) #Enable the policy self.disable_policy_for_vms([vm1_fixture], disable=False) self.disable_policy_for_vms(vm_fixtures, disable=False) assert self.verify_ecmp_routes([vm2_fixture,vm3_fixture], prefix) assert self.verify_traffic_for_ecmp(vm1_fixture, [vm2_fixture,vm3_fixture], static_ip, flow_count=1) #Disable the policy on all the VMIs self.disable_policy_for_vms([vm1_fixture]) self.disable_policy_for_vms(vm_fixtures) assert self.verify_ecmp_routes([vm2_fixture,vm3_fixture], prefix) assert self.verify_traffic_for_ecmp(vm1_fixture, [vm2_fixture,vm3_fixture], static_ip)
def test_disable_policy_remove_sg(self): """ Description: Verify disabling policy with SG detach from vmi Steps: 1. launch 1 VN and launch 3 VMs on the same node 2. create a static route for a new subnet prefix and add this on 2 VMIs. this will create 2 ECMP routes. 3. Disable the policy on all VMIs. and start ping 4. remove the SG from all VMIs 5. Now from 3rd VM send traffic to an IP from static route prefix Pass criteria: 1. traffic should go through fine and no ping loss 2. flows should not be created 3. load should be distributed among ecmp routes. """ vn_fixtures = self.create_vns(count=1) self.verify_vns(vn_fixtures) vn1_fixture = vn_fixtures[0] prefix = get_random_cidr(af=self.inputs.get_af()) assert prefix, "Unable to get a random CIDR" compute_hosts = self.orch.get_hosts() #launch all VMs on same node, to test intra node traffic image = 'ubuntu-traffic' vm_fixtures = self.create_vms(vn_fixture=vn1_fixture, count=3, node_name=compute_hosts[0], image_name=image) self.verify_vms(vm_fixtures) vm1_fixture = vm_fixtures[0] vm2_fixture = vm_fixtures[1] vm3_fixture = vm_fixtures[2] static_ip = self.add_static_routes_on_vms(prefix, [vm2_fixture,vm3_fixture]) #Start ping ping_h = self.start_ping(vm1_fixture, dst_ip=static_ip) self.disable_policy_for_vms(vm_fixtures) #Remove the SG from all VMs self.remove_sg_from_vms(vm_fixtures) assert self.verify_ecmp_routes([vm2_fixture,vm3_fixture], prefix) assert self.verify_traffic_for_ecmp(vm1_fixture, [vm2_fixture,vm3_fixture], static_ip) #Get ping stats stats = ping_h.get_stats() assert stats['loss'] == '0', ('Ping loss seen after disabling policy with active flow') #Attach the SGs back self.add_sg_to_vms(vm_fixtures) #Get ping stats stats = ping_h.get_stats() assert stats['loss'] == '0', ('Ping loss seen after disabling policy with active flow') #Remove the SG from all VMs self.remove_sg_from_vms(vm_fixtures) #Enable the policy now, some ping loss should be seen now self.disable_policy_for_vms(vm_fixtures, disable=False) (stats, ping_log) = self.stop_ping(ping_h) assert stats['loss'] != '0', ('Ping loss not seen after enabling policy with active flow')
def test_rp_interface_static_matrix(self): ''' 1. Create a routing policy with interface-static match and different "to" conditions:med, as-path, local-pref, community. 2. Launch VMs. 3. Attach policy to VN and confirm if policy takes hold. ''' ret_dict = self.config_basic() vn_fixture = ret_dict['vn_fixture'] test_vm = ret_dict['test_vm'] test2_vm = ret_dict['test2_vm'] self.static_table_handle = ContrailVncApi(self.vnc_lib, self.logger) random_cidr = get_random_cidr() self.intf_table_to_right_obj = self.static_table_handle.create_route_table( prefixes=[random_cidr], name=get_random_name('int_table_right'), parent_obj=self.project.project_obj, ) id_entry = self.inputs.project_fq_name[0] + ':' + \ self.inputs.project_fq_name[1] + ':' + vn_fixture.vn_name self.static_table_handle.bind_vmi_to_interface_route_table( str(test_vm.get_vmi_ids()[id_entry]), self.intf_table_to_right_obj) config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'interface-static', 'to_term':'med', 'sub_to':'444'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '444') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'interface-static', 'to_term':'local-preference', 'sub_to':'555'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '555') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'interface-static', 'to_term':'as-path', 'sub_to':'666'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '666') assert test_vm.ping_with_certainty(test2_vm.vm_ip)
def create_bgpaas_routes(self): ret_dict = {} vn_name = get_random_name('bgpaas_vn') vn_subnets = [get_random_cidr()] ret_dict['vn_fixture'] = self.create_vn(vn_name, vn_subnets) ret_dict['test_vm'] = self.create_vm(ret_dict['vn_fixture'], 'test_vm', image_name='ubuntu-traffic') assert ret_dict['test_vm'].wait_till_vm_is_up() bgpaas_vm1 = self.create_vm(ret_dict['vn_fixture'], 'bgpaas_vm1',image_name='vsrx') assert bgpaas_vm1.wait_till_vm_is_up() bgpaas_fixture = self.create_bgpaas(bgpaas_shared=True, autonomous_system=64500, bgpaas_ip_address=bgpaas_vm1.vm_ip) bgpaas_vm1.wait_for_ssh_on_vm() port1 = {} port1['id'] = bgpaas_vm1.vmi_ids[bgpaas_vm1.vn_fq_name] address_families = [] address_families = ['inet', 'inet6'] autonomous_system = 64500 gw_ip = ret_dict['vn_fixture'].get_subnets()[0]['gateway_ip'] dns_ip = ret_dict['vn_fixture'].get_subnets()[0]['dns_server_address'] neighbors = [] neighbors = [gw_ip, dns_ip] self.logger.info('We will configure BGP on the two vSRX') self.config_bgp_on_vsrx(src_vm=ret_dict['test_vm'], dst_vm=bgpaas_vm1, bgp_ip=bgpaas_vm1.vm_ip, lo_ip=bgpaas_vm1.vm_ip, address_families=address_families, autonomous_system=autonomous_system, neighbors=neighbors, bfd_enabled=False) bgpaas_vm1.wait_for_ssh_on_vm() self.attach_vmi_to_bgpaas(port1['id'], bgpaas_fixture) self.addCleanup(self.detach_vmi_from_bgpaas,port1['id'], bgpaas_fixture) return ret_dict
def create_basic_config(self, forwarding_mode='l2_l3'): vn_name = get_random_name('bgpvpn_vn') vm_name = get_random_name('bgpvpn_vm') vn_subnets = [get_random_cidr()] self.vn_fixture = self.create_vn(vn_name, vn_subnets,forwarding_mode=forwarding_mode) self.vm_fixture = self.create_vm(vn_fixture, vm_name, image_name='cirros') assert self.vm_fixture.wait_till_vm_is_up()
def get_random_ip_list(max_list_length=4): list_length = random.randint(1, max_list_length) final_list = [] for i in range(0, list_length): cidr = get_random_cidr() random_ip = get_random_ip(cidr) final_list.append(random_ip) return final_list
def get_route_dict_list(cidr, max_length=4): list_length = random.randint(1, max_length) final_list = [] for i in range(0, list_length): route_dict = {'destination': get_random_cidr(), 'nexthop': str(get_random_ip(cidr))} final_list.append(route_dict) return final_list
def create_lr_config(self, forwarding_mode='l2_l3'): self.create_basic_config('l2_l3') vn2_name = get_random_name('bgpvpn_v2') vm2_name = get_random_name('bgpvpn_vm2') vn2_subnets = [get_random_cidr()] self.vn2_fixture = self.create_vn(vn2_name, vn2_subnets,forwarding_mode=forwarding_mode) self.vm2_fixture = self.create_vm(vn2_fixture, vm2_name, image_name='cirros') assert self.vm2_fixture.wait_till_vm_is_up() self.lr = self.create_lr([self.vn_fixture,self.vn2_fixture]) self.lr_obj = self.vnc_api_h.logical_router_read(fq_name=self.lr.lr_fq_name) self.addCleanup(self.lr.delete)
def get_route_dict_list(cidr, max_length=4): list_length = random.randint(1, max_length) final_list = [] for i in range(0, list_length): route_dict = { 'destination': get_random_cidr(), 'nexthop': str(get_random_ip(cidr)) } final_list.append(route_dict) return final_list
def create_sub_intf(self, vn_fix_uuid, intf_type, vlan=101, mac_address=None): parent_port_vn_subnets = [get_random_cidr(af=self.inputs.get_af())] parent_port_vn_name = get_random_name( intf_type + "_parent_port_vn") parent_port_vn_fixture = self.config_vn(parent_port_vn_name, parent_port_vn_subnets) parent_port = self.setup_vmi(parent_port_vn_fixture.uuid) port = self.setup_vmi(vn_fix_uuid, parent_vmi=parent_port.vmi_obj, vlan_id=vlan, api_type='contrail', mac_address=mac_address) return port
def _get_vn_for_config(self, vn_name, vn_subnets, vn_fixture, vn_name_prefix): if vn_fixture: vn_name = vn_fixture.vn_name vn_subnets = [x['cidr'] for x in vn_fixture.vn_subnets] else: vn_name = vn_name or get_random_name(vn_name_prefix) vn_subnets = vn_subnets or \ [get_random_cidr(af=self.inputs.get_af())] vn_fixture = vn_fixture or self.config_vn(vn_name, vn_subnets) vn_fq_name = vn_fixture.vn_fq_name return (vn_name, vn_subnets, vn_fixture, vn_fq_name)
def __init__(self, domain='default-domain', project='admin', username=None, password=None): # # Domain and project defaults: Do not change until support for # non-default is tested! self.domain = domain self.project = project self.username = username self.password = password # # Define VN's in the project: self.vnet_list = [get_random_name('vnet0')] # # Define network info for each VN: if self.project == 'vCenter': # For vcenter, only one subnet per VN is supported self.vn_nets = {self.vnet_list[0]: [get_random_cidr(af='v4')]} else: self.vn_nets = {self.vnet_list[0]: ['10.1.1.0/24', '11.1.1.0/24']} # # Define network policies self.policy_list = list() for i in range(10): self.policy_list.append(get_random_name('policy%d'%i)) self.vn_policy = {self.vnet_list[0]: self.policy_list} # # Define VM's # VM distribution on available compute nodes is handled by nova # scheduler or contrail vm naming scheme self.vn_of_vm = {get_random_name('vmc0'): self.vnet_list[0]} # # Define network policy rules self.rules = {} self.rules[self.policy_list[0]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[1]] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[2]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[3]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[4]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[5]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[6]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[7]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[8]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] self.rules[self.policy_list[9]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]
def config_basic(self): vn_name = get_random_name('bgpaas_vn') vn2_name = get_random_name('bgpaas_vn') vn_subnets = [get_random_cidr()] vn2_subnets = [get_random_cidr()] vn_fixture = self.create_vn(vn_name, vn_subnets) rt_value = randint(50000, 60000) vn_fixture.add_route_target(vn_fixture.ri_name, self.inputs.router_asn, rt_value) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn2_fixture.add_route_target(vn2_fixture.ri_name, self.inputs.router_asn, rt_value) test_vm = self.create_vm(vn_fixture, 'test_vm', image_name='cirros') test2_vm = self.create_vm(vn2_fixture, 'test2_vm', image_name='cirros') assert test_vm.wait_till_vm_is_up() assert test2_vm.wait_till_vm_is_up() ret_dict = { 'vn_fixture' : vn_fixture, 'test_vm' : test_vm, 'test2_vm' : test2_vm, } return ret_dict
def create_vn(self, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True): if not vn_name: vn_name = get_random_name('vn') if not vn_subnets: vn_subnets = [get_random_cidr()] return self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn_name, subnets=vn_subnets, vxlan_id=vxlan_id, enable_dhcp=enable_dhcp))
def create_sub_intf(self, vn_fix_uuid, intf_type, mac_address=None): vlan = self.vlan parent_port_vn_subnets = [get_random_cidr(af=self.inputs.get_af())] parent_port_vn_name = get_random_name( intf_type + "_parent_port_vn") parent_port_vn_fixture = self.config_vn(parent_port_vn_name, parent_port_vn_subnets) parent_port = self.setup_vmi(parent_port_vn_fixture.uuid) mac_address = parent_port.mac_address port = self.setup_vmi(vn_fix_uuid, parent_vmi=parent_port.vmi_obj, vlan_id=vlan, api_type='contrail', mac_address=mac_address) return port, parent_port, parent_port_vn_fixture
def test_remove_policy_with_ref(self): ''' This tests the following scenarios. 1. Test to validate that policy removal will fail when it referenced with VN. 2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs. 3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs. ''' vn1_name = get_random_name('vn4') vn1_subnets = [get_random_cidr(af='v4')] policy_name = get_random_name('policy1') rules = [ { 'direction': '<>', 'simple_action': 'pass', 'protocol': 'icmp', 'source_network': vn1_name, 'dest_network': vn1_name, }, ] policy_fixture = self.useFixture( PolicyFixture(policy_name=policy_name, rules_list=rules, inputs=self.inputs, connections=self.connections)) vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn1_fixture.bind_policies([policy_fixture.policy_fq_name], vn1_fixture.vn_id) assert vn1_fixture.verify_on_setup() ret = policy_fixture.verify_on_setup() if ret['result'] == False: self.logger.error("Policy %s verification failed after setup" % policy_name) assert ret['result'], ret['msg'] self.logger.info( "Done with setup and verification, moving onto test ..") # try to remove policy which was referenced with VN. policy_removal = True pol_id = None try: self.vnc_lib.network_policy_delete(id=policy_fixture.get_id()) except Exception as e: policy_removal = False self.assertFalse( policy_removal, 'Policy removal succeed as not expected since policy is referenced with VN' ) #assert vn1_fixture.verify_on_setup() # policy_fixture.verify_policy_in_api_server() return True
def create_interface_static_routes(self): ret_dict = self.config_basic() self.static_table_handle = ContrailVncApi(self.vnc_lib, self.logger) random_cidr = get_random_cidr() self.intf_table_to_right_obj = self.static_table_handle.create_route_table( prefixes=[random_cidr], name=get_random_name('int_table_right'), parent_obj=self.project.project_obj, ) id_entry = self.inputs.project_fq_name[0] + ':' + \ self.inputs.project_fq_name[1] + ':' + ret_dict['vn_fixture'].vn_name self.static_table_handle.bind_vmi_to_interface_route_table( str(ret_dict['test_vm'].get_vmi_ids()[id_entry]), self.intf_table_to_right_obj) return ret_dict,random_cidr
def attach_to_vmi_common(self, hc_type='link-local'): # Only link-local type for non svmi vn_name = get_random_name('vn') vn_subnets = [get_random_cidr()] vn_fixture = self.create_vn(vn_name, vn_subnets) vm1 = self.create_vm(vn_fixture, 'vm1', image_name='cirros') assert vm1.wait_till_vm_is_up() vm_port = vm1.vmi_ids[vm1.vn_fq_name] local_ip = vm1.vm_ip shc_fixture = self.create_hc(hc_type=hc_type) self.attach_shc_to_vmi(shc_fixture, vm1) self.addCleanup(self.detach_shc_from_vmi, shc_fixture, vm1) assert vm1.verify_hc_in_agent() assert vm1.verify_hc_is_active() return True
def test_rp_secondary_routes(self): ''' Maintainer: [email protected] Description: CEM-6735 - Enhanced import policy extended to MP-BGP route type To verify: routing-policy to change routing-parameters for secondary routes ( routes from External Devices ) 1. Create VN and add MXs route-target to VN , to import route from MX into VN. 2. Retrieve the local-preference advertised by MX. 3. Create routing-policy to change local-preference and attach to VN 4. Verify updated routing-policy is applied to secondary routes from MX and local-preference value is set to new value mentioned through routing-policy. ''' vm1_name = get_random_name('vm_private') vn1_name = get_random_name('vn_private') vn1_subnets = [get_random_cidr()] self.allow_default_sg_to_allow_all_on_project(self.inputs.project_name) vn1_fixture = self.create_vn(vn1_name, vn1_subnets) mx_rt = self.inputs.mx_rt if self.inputs.config['test_configuration'].get('router_asn',False): router_asn = self.inputs.config['test_configuration'].get('router_asn') else: router_asn = self.inputs.bgp_asn vn1_fixture.add_route_target(routing_instance_name=vn1_fixture.ri_name, router_asn=router_asn, route_target_number=mx_rt) vn1_fixture.verify_on_setup() vm1_fixture = self.create_vm(vn1_fixture, vm1_name, image_name='ubuntu') vm1_fixture.wait_till_vm_is_up() initial_local_pref = -1 new_local_pref = -1 for cn in self.inputs.bgp_control_ips: cn_entries = self.cn_inspect[cn].get_cn_route_table_entry(prefix="0.0.0.0/0",table="inet.0",ri_name=vn1_fixture.ri_name) if cn_entries: initial_local_pref = int(cn_entries[0]['local_preference']) if initial_local_pref == -1: assert False,"Default route 0.0.0.0/0 is not advertised by MX.Check the MX routing-instance configurations." config_dicts = {'vn_fixture':vn1_fixture, 'from_term':'protocol','sub_from':'bgp','to_term':'local-preference','sub_to':initial_local_pref + 10} rp = self.configure_term_routing_policy(config_dicts) time.sleep(10) for cn in self.inputs.bgp_control_ips: cn_entries = self.cn_inspect[cn].get_cn_route_table_entry(prefix="0.0.0.0/0",table="inet.0",ri_name=vn1_fixture.ri_name) if cn_entries: new_local_pref = int(cn_entries[0]['local_preference']) self.logger.info("Old local-preference: %d , New local-preference: %d"%(initial_local_pref,new_local_pref)) if new_local_pref != initial_local_pref + 10: assert False,"Error: Routing-Policy not applied on Secondary routes from MX and Local Preference is not updated" self.logger.info("PASS: routing-policy is applied correctly for secondary-routes from MX")
def config_basic(self): vn_name = get_random_name('bgpaas_vn') vn_subnets = [get_random_cidr()] vn_fixture = self.create_vn(vn_name, vn_subnets) test_vm = self.create_vm(vn_fixture, 'test_vm', image_name='ubuntu-traffic') assert test_vm.wait_till_vm_is_up() bgpaas_vm1 = self.create_vm( vn_fixture, 'bgpaas_vm1', image_name='vsrx') assert bgpaas_vm1.wait_till_vm_is_up() ret_dict = { 'vn_fixture': vn_fixture, 'test_vm': test_vm, 'bgpaas_vm1': bgpaas_vm1, } return ret_dict
def _get_vn_for_config(self, vn_name, vn_subnets, vn_fixture, vn_name_prefix, **kwargs): if vn_fixture: vn_name = vn_fixture.vn_name vn_subnets = [x['cidr'] for x in vn_fixture.vn_subnets] else: vn_name = vn_name or get_random_name(vn_name_prefix) vn_subnets = vn_subnets or \ [get_random_cidr(af=self.inputs.get_af())] vn_fixture = vn_fixture or self.config_vn(vn_name, vn_subnets, **kwargs) vn_fq_name = vn_fixture.vn_fq_name return (vn_name, vn_subnets, vn_fixture, vn_fq_name)
def test_rp_network_static_matrix(self): ''' 1. Create a routing policy with interface match and different "to" conditions:med, as-path, local-pref, community. 2. Launch VMs. 3. Attach policy to VN and confirm if policy takes hold. ''' ret_dict = self.config_basic() vn_fixture = ret_dict['vn_fixture'] test_vm = ret_dict['test_vm'] test2_vm = ret_dict['test2_vm'] self.static_table_handle = ContrailVncApi(self.vnc_lib, self.logger) random_cidr = get_random_cidr() self.nw_handle_to_right = self.static_table_handle.create_route_table( prefixes=[random_cidr], name="network_table_left_to_right", next_hop=test_vm.vm_ip, parent_obj=self.project.project_obj, next_hop_type='ip-address', route_table_type='network', ) self.static_table_handle.bind_network_route_table_to_vn( vn_uuid=vn_fixture.uuid, nw_route_table_obj=self.nw_handle_to_right) config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'community', 'sub_to':'64512:55555'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '55555') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'add_ext_community', 'sub_to':'target:64512:44444'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = 'target:64512:44444') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'set_ext_community', 'sub_to':'target:64512:33333'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = 'target:64512:33333'), 'Search term not found in introspect' config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'med', 'sub_to':'444'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '444') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'local-preference', 'sub_to':'555'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '555') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'static', 'to_term':'as-path', 'sub_to':'666'} rp = self.configure_term_routing_policy(config_dicts) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = random_cidr, search_value = '666') assert test_vm.ping_with_certainty(test2_vm.vm_ip)
def create_vn(self, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True, cleanup=True): if not vn_name: vn_name = get_random_name('vn') if not vn_subnets: vn_subnets = [get_random_cidr()] vn_fixture = VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn_name, subnets=vn_subnets, vxlan_id=vxlan_id, enable_dhcp=enable_dhcp) vn_fixture.setUp() if cleanup: self.addCleanup(vn_fixture.cleanUp) return vn_fixture
def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True, **kwargs): '''Classmethod to do only VN creation ''' if not vn_name: vn_name = get_random_name('vn') if not vn_subnets: vn_subnets = [get_random_cidr()] vn_fixture = VNFixture(project_name=cls.inputs.project_name, connections=cls.connections, inputs=cls.inputs, vn_name=vn_name, subnets=vn_subnets, vxlan_id=vxlan_id, enable_dhcp=enable_dhcp, **kwargs) vn_fixture.setUp() return vn_fixture
def test_rp_bgpaas_matrix(self): ''' 1. Create a routing policy with bgpaas match and different "to" conditions:med, as-path, local-pref, community. 2. Launch VMs. 3. Attach policy to VN and confirm if policy takes hold. ''' vn_name = get_random_name('bgpaas_vn') vn_subnets = [get_random_cidr()] vn_fixture = self.create_vn(vn_name, vn_subnets) test_vm = self.create_vm(vn_fixture, 'test_vm', image_name='ubuntu-traffic') assert test_vm.wait_till_vm_is_up() bgpaas_vm1 = self.create_vm(vn_fixture, 'bgpaas_vm1',image_name='vsrx') assert bgpaas_vm1.wait_till_vm_is_up() bgpaas_fixture = self.create_bgpaas(bgpaas_shared=True, autonomous_system=64500, bgpaas_ip_address=bgpaas_vm1.vm_ip) bgpaas_vm1.wait_for_ssh_on_vm() port1 = {} port1['id'] = bgpaas_vm1.vmi_ids[bgpaas_vm1.vn_fq_name] address_families = [] address_families = ['inet', 'inet6'] autonomous_system = 64500 gw_ip = vn_fixture.get_subnets()[0]['gateway_ip'] dns_ip = vn_fixture.get_subnets()[0]['dns_server_address'] neighbors = [] neighbors = [gw_ip, dns_ip] self.logger.info('We will configure BGP on the two vSRX') self.config_bgp_on_vsrx(src_vm=test_vm, dst_vm=bgpaas_vm1, bgp_ip=bgpaas_vm1.vm_ip, lo_ip=bgpaas_vm1.vm_ip, address_families=address_families, autonomous_system=autonomous_system, neighbors=neighbors, bfd_enabled=False) bgpaas_vm1.wait_for_ssh_on_vm() self.attach_vmi_to_bgpaas(port1['id'], bgpaas_fixture) self.addCleanup(self.detach_vmi_from_bgpaas,port1['id'], bgpaas_fixture) config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'bgpaas', 'to_term':'med', 'sub_to':'444'} rp = self.configure_term_routing_policy(config_dicts) sleep(90) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = str(vn_fixture.get_subnets()[0]['cidr']), search_value = '444') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'bgpaas', 'to_term':'local-preference', 'sub_to':'555'} rp = self.configure_term_routing_policy(config_dicts) sleep(90) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = str(vn_fixture.get_subnets()[0]['cidr']), search_value = '555') config_dicts = {'vn_fixture':vn_fixture, 'from_term':'protocol', 'sub_from':'bgpaas', 'to_term':'as-path', 'sub_to':'666'} rp = self.configure_term_routing_policy(config_dicts) sleep(90) assert self.verify_policy_in_control(vn_fixture, test_vm, search_ip = str(vn_fixture.get_subnets()[0]['cidr']), search_value = '666')
def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True, **kwargs): """Classmethod to do only VN creation """ if not vn_name: vn_name = get_random_name("vn") if not vn_subnets: vn_subnets = [get_random_cidr()] vn_fixture = VNFixture( project_name=cls.inputs.project_name, connections=cls.connections, inputs=cls.inputs, vn_name=vn_name, subnets=vn_subnets, vxlan_id=vxlan_id, enable_dhcp=enable_dhcp, **kwargs ) vn_fixture.setUp() return vn_fixture
def test_ecmp_with_static_routes_intra_node(self): """ Description: Verify disabling policy for ECMP routes with static routes on VM Steps: 1. launch 1 VN and launch 3 VMs on the same node 2. create a static route for a new subnet prefix and add this on 2 VMIs. this will create 2 ECMP routes. 3. Disable the policy on all VMIs. 4. Now from 3rd VM send traffic to an IP from static route prefix Pass criteria: 1. traffic should go through fine 2. flows should not be created 3. load should be distributed among ecmp routes. """ vn_fixtures = self.create_vns(count=1) self.verify_vns(vn_fixtures) vn1_fixture = vn_fixtures[0] prefix = get_random_cidr(af=self.inputs.get_af()) assert prefix, "Unable to get a random CIDR" compute_hosts = self.orch.get_hosts() #launch all VMs on same node, to test intra node traffic image = 'ubuntu-traffic' vm_fixtures = self.create_vms(vn_fixture=vn1_fixture, count=3, node_name=compute_hosts[0], image_name=image) self.verify_vms(vm_fixtures) vm1_fixture = vm_fixtures[0] vm2_fixture = vm_fixtures[1] vm3_fixture = vm_fixtures[2] static_ip = self.add_static_routes_on_vms(prefix, [vm2_fixture, vm3_fixture]) self.disable_policy_for_vms(vm_fixtures) assert self.verify_ecmp_routes([vm2_fixture, vm3_fixture], prefix) assert self.verify_traffic_for_ecmp(vm1_fixture, [vm2_fixture, vm3_fixture], static_ip)
def test_ecmp_with_static_routes_intra_node(self): """ Description: Verify disabling policy for ECMP routes with static routes on VM Steps: 1. launch 1 VN and launch 3 VMs on the same node 2. create a static route for a new subnet prefix and add this on 2 VMIs. this will create 2 ECMP routes. 3. Disable the policy on all VMIs. 4. Now from 3rd VM send traffic to an IP from static route prefix Pass criteria: 1. traffic should go through fine 2. flows should not be created 3. load should be distributed among ecmp routes. """ vn_fixtures = self.create_vns(count=1) self.verify_vns(vn_fixtures) vn1_fixture = vn_fixtures[0] prefix = get_random_cidr(af=self.inputs.get_af()) assert prefix, "Unable to get a random CIDR" compute_hosts = self.orch.get_hosts() #launch all VMs on same node, to test intra node traffic image = 'ubuntu-traffic' vm_fixtures = self.create_vms(vn_fixture=vn1_fixture, count=3, node_name=compute_hosts[0], image_name=image) self.verify_vms(vm_fixtures) vm1_fixture = vm_fixtures[0] vm2_fixture = vm_fixtures[1] vm3_fixture = vm_fixtures[2] static_ip = self.add_static_routes_on_vms(prefix, [vm2_fixture,vm3_fixture]) self.disable_policy_for_vms(vm_fixtures) assert self.verify_ecmp_routes([vm2_fixture,vm3_fixture], prefix) assert self.verify_traffic_for_ecmp(vm1_fixture, [vm2_fixture,vm3_fixture], static_ip)
def verify_multi_inline_svc(self, si_list=[('bridge', 1), ('in-net', 1), ('nat', 1)], flavor='contrail_flavor_2cpu', ordered_interfaces=True, vn1_subnets=None, vn2_subnets=None): """Validate in-line multi service chaining in network datapath""" vn1_subnets = vn1_subnets or [get_random_cidr(af=self.inputs.get_af())] vn2_subnets = vn2_subnets or [get_random_cidr(af=self.inputs.get_af())] self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn1") self.vn1_name = self.vn1_fq_name.split(':')[2] self.vn1_subnets = vn1_subnets self.vm1_name = get_random_name("in_network_vm1") self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn2") self.vn2_name = self.vn2_fq_name.split(':')[2] self.vn2_subnets = vn2_subnets self.vm2_name = get_random_name("in_network_vm2") self.action_list = [] self.si_list = [] self.policy_name = get_random_name("policy_in_network") self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) for si in si_list: self.if_list = [['management', False, False], ['left', True, False], ['right', True, False]] svc_scaling = False si_count = 1 self.st_name = get_random_name( ("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_st")) si_prefix = get_random_name( ("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_si")) + "_" max_inst = si[1] left_vn = self.vn1_fq_name right_vn = self.vn2_fq_name if max_inst > 1: svc_scaling = True if si[0] == 'nat': svc_mode = 'in-network-nat' svc_img_name = 'vsrx' elif si[0] == 'in-net': svc_mode = 'in-network' svc_img_name = 'ubuntu-in-net' else: svc_mode = 'transparent' svc_img_name = 'tiny_trans_fw' left_vn = None right_vn = None self.st_fixture, self.si_fixtures = self.config_st_si( self.st_name, si_prefix, si_count, svc_scaling, max_inst, left_vn=left_vn, right_vn=right_vn, svc_mode=svc_mode, flavor=flavor, ordered_interfaces=ordered_interfaces, project=self.inputs.project_name, svc_img_name=svc_img_name) action_step = self.chain_si( si_count, si_prefix, self.inputs.project_name) self.action_list += action_step self.si_list += self.si_fixtures self.rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn2_name, 'dst_ports': [0, -1], 'simple_action': None, 'action_list': {'apply_service': self.action_list} }, ] self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) self.vm1_fixture = self.config_and_verify_vm( self.vn1_fixture, self.vm1_name) self.vm2_fixture = self.config_and_verify_vm( self.vn2_fixture, self.vm2_name) for si_fix in self.si_fixtures: si_fix.verify_on_setup() result, msg = self.validate_vn( self.vn1_name, project_name=self.inputs.project_name) assert result, msg result, msg = self.validate_vn( self.vn2_name, project_name=self.inputs.project_name) assert result, msg # Ping from left VM to right VM errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg return True
def verify_svc_in_network_datapath(self, si_count=1, svc_scaling=False, max_inst=1, svc_mode='in-network-nat', flavor='contrail_flavor_2cpu', static_route=['None', 'None', 'None'], ordered_interfaces=True, svc_img_name='vsrx', vn1_subnets=None, vn2_fixture=None, vn2_subnets=None, ci=False): """Validate the service chaining in network datapath""" self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn1") self.vn1_name = self.vn1_fq_name.split(':')[2] self.vn1_subnets = vn1_subnets or [get_random_cidr(af=self.inputs.get_af())] self.vm1_name = get_random_name("in_network_vm1") self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn2") self.vn2_name = self.vn2_fq_name.split(':')[2] self.vn2_subnets = vn2_subnets or [get_random_cidr(af=self.inputs.get_af())] self.vm2_name = get_random_name("in_network_vm2") self.action_list = [] self.if_list = [['management', False, False], ['left', True, False], ['right', True, False]] for entry in static_route: if entry != 'None': self.if_list[static_route.index(entry)][2] = True self.st_name = get_random_name("in_net_svc_template_1") si_prefix = get_random_name("in_net_svc_instance") + "_" self.policy_name = get_random_name("policy_in_network") self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) if vn2_fixture is None: self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) else: self.vn2_fixture = vn2_fixture self.vn2_fq_name = vn2_fixture.vn_fq_name self.vn2_name = self.vn2_fq_name.split(':')[2] self.st_fixture, self.si_fixtures = self.config_st_si( self.st_name, si_prefix, si_count, svc_scaling, max_inst, left_vn=self.vn1_fq_name, right_vn=self.vn2_fq_name, svc_mode=svc_mode, flavor=flavor, static_route=static_route, ordered_interfaces=ordered_interfaces, svc_img_name=svc_img_name, project=self.inputs.project_name) self.action_list = self.chain_si( si_count, si_prefix, self.inputs.project_name) self.rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': self.vn1_fq_name, 'src_ports': [0, -1], 'dest_network': self.vn2_fq_name, 'dst_ports': [0, -1], 'simple_action': None, 'action_list': {'apply_service': self.action_list} }, ] self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) if ci and self.inputs.get_af() == 'v4': image_name = 'cirros-0.3.0-x86_64-uec' else: image_name = 'ubuntu-traffic' self.vm1_fixture = self.config_and_verify_vm( self.vn1_fixture, self.vm1_name, image_name) self.vm2_fixture = self.config_and_verify_vm( self.vn2_fixture, self.vm2_name, image_name) for si_fix in self.si_fixtures: si_fix.verify_on_setup() result, msg = self.validate_vn( self.vn1_name, project_name=self.vn1_fixture.project_name) assert result, msg result, msg = self.validate_vn( self.vn2_name, project_name=self.vn2_fixture.project_name) assert result, msg # Ping from left VM to right VM errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg return True
def test_policy_with_spl_char_in_name(self): result = True vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn2_name = get_random_name('vn2') vn2_subnets = [get_random_cidr()] policy_name = 'policy1' + gen_str_with_spl_char(10) rules = [ { 'direction': '<>', 'simple_action': 'pass', 'protocol': 'any', 'src_ports': 'any', 'dst_ports': 'any', 'source_network': 'any', 'dest_network': 'any', }, ] policy_fixture = self.useFixture( PolicyFixture(policy_name=policy_name, rules_list=rules, inputs=self.inputs, connections=self.connections)) vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn1_fixture.bind_policies([policy_fixture.policy_fq_name], vn1_fixture.vn_id) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn2_fixture.bind_policies([policy_fixture.policy_fq_name], vn2_fixture.vn_id) self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id, [policy_fixture.policy_fq_name]) assert vn1_fixture.verify_on_setup() self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id, [policy_fixture.policy_fq_name]) assert vn2_fixture.verify_on_setup() vn1_vm1_name = get_random_name('vn1_vm1') vn2_vm1_name = get_random_name('vn2_vm1') vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) vm2_fixture = self.create_vm(vn1_fixture, vn2_vm1_name) vm1_fixture.wait_till_vm_is_up() vm2_fixture.wait_till_vm_is_up() if not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm1_fixture.vm_name, vm2_fixture.vm_name)) result = False if not vm2_fixture.ping_to_ip(vm1_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm2_fixture.vm_name, vm1_fixture.vm_name)) result = False self.inputs.restart_service('ifmap', host_ips=self.inputs.cfgm_ips) sleep(10) if not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm1_fixture.vm_name, vm2_fixture.vm_name)) result = False if not vm2_fixture.ping_to_ip(vm1_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm2_fixture.vm_name, vm1_fixture.vm_name)) result = False return result
def verify_add_new_vns(self, svc_chain_info): left_vn_policy_fix = svc_chain_info['left_vn_policy_fix'] right_vn_policy_fix = svc_chain_info['right_vn_policy_fix'] policy_fixture = svc_chain_info['policy_fixture'] left_vm_fixture = svc_chain_info['left_vm_fixture'] right_vm_fixture = svc_chain_info['right_vm_fixture'] si_fixture = svc_chain_info['si_fixture'] # Create one more left and right VN's new_left_vn = "new_left_bridge_vn" new_left_vn_net = [get_random_cidr(af=self.inputs.get_af())] new_right_vn = "new_right_bridge_vn" new_right_vn_net = [get_random_cidr(af=self.inputs.get_af())] new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net) new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net) # Launch VMs in new left and right VN's new_left_vm = 'new_left_bridge_vm' new_right_vm = 'new_right_bridge_vm' new_left_vm_fix = self.config_vm(vn_fix=new_left_vn_fix, vm_name=new_left_vm) new_right_vm_fix = self.config_vm(vn_fix=new_right_vn_fix, vm_name=new_right_vm) assert new_left_vm_fix.verify_on_setup() assert new_right_vm_fix.verify_on_setup() # Wait for VM's to come up assert new_left_vm_fix.wait_till_vm_is_up() assert new_right_vm_fix.wait_till_vm_is_up() # Add rule to policy to allow traffic from new left_vn to right_vn # through SI mirror_fq_name = si_fixture.fq_name_str rules = [{'direction': '<>', 'protocol': 'icmp', 'source_network': new_left_vn, 'src_ports': [0, 65535], 'dest_network': new_right_vn, 'dst_ports': [0, 65535], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': mirror_fq_name}} }, {'direction': '<>', 'protocol': 'icmp6', 'source_network': new_left_vn, 'src_ports': [0, 65535], 'dest_network': new_right_vn, 'dst_ports': [0, 65535], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': mirror_fq_name}} }] policy_fixture.input_rules_list.extend(rules) policy_fixture.update_policy_api(policy_fixture.input_rules_list) # Create new policy with rule to allow traffic from new VN's self.attach_policy_to_vn(policy_fixture, new_left_vn_fix) self.attach_policy_to_vn(policy_fixture, new_right_vn_fix) assert self.verify_si(si_fixture) self._verify_proto_based_mirror(si_fixture, left_vm_fixture, right_vm_fixture, 'icmp') self._verify_proto_based_mirror(si_fixture, new_left_vm_fix, new_right_vm_fix, 'icmp') return True
def test_policy_with_spl_char_in_name(self): result = True vn1_name = get_random_name('vn1') vn1_subnets = [get_random_cidr()] vn2_name = get_random_name('vn2') vn2_subnets = [get_random_cidr()] policy_name = 'policy1' + gen_str_with_spl_char(10) rules = [ { 'direction': '<>', 'simple_action': 'pass', 'protocol': 'any', 'src_ports': 'any', 'dst_ports': 'any', 'source_network': 'any', 'dest_network': 'any', }, ] policy_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, rules_list=rules, inputs=self.inputs, connections=self.connections)) vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn1_fixture.bind_policies( [policy_fixture.policy_fq_name], vn1_fixture.vn_id) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn2_fixture.bind_policies( [policy_fixture.policy_fq_name], vn2_fixture.vn_id) self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id, [policy_fixture.policy_fq_name]) assert vn1_fixture.verify_on_setup() self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id, [policy_fixture.policy_fq_name]) assert vn2_fixture.verify_on_setup() vn1_vm1_name = get_random_name('vn1_vm1') vn2_vm1_name = get_random_name('vn2_vm1') vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) vm2_fixture = self.create_vm(vn1_fixture, vn2_vm1_name) vm1_fixture.wait_till_vm_is_up() vm2_fixture.wait_till_vm_is_up() if not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm1_fixture.vm_name, vm2_fixture.vm_name)) result = False if not vm2_fixture.ping_to_ip(vm1_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm2_fixture.vm_name, vm1_fixture.vm_name)) result = False self.inputs.restart_service('ifmap', host_ips=self.inputs.cfgm_ips) sleep(10) if not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm1_fixture.vm_name, vm2_fixture.vm_name)) result = False if not vm2_fixture.ping_to_ip(vm1_fixture.vm_ip): self.logger.error( 'Ping from %s to %s failed, expected it to pass' % (vm2_fixture.vm_name, vm1_fixture.vm_name)) result = False return result
def verify_firewall_with_mirroring( self, max_inst=1, firewall_svc_mode='in-network', mirror_svc_mode='transparent'): """Validate the service chaining in network datapath""" #TODO # max_inst cannot be more than one in this method since # analyzer packet count verification logic needs to be updated when # in case of more than one mirror SVM max_inst = 1 vn1_name = get_random_name('left_vn') vn2_name = get_random_name('right_vn') vn1_subnets = [get_random_cidr(af=self.inputs.get_af())] vn2_subnets = [get_random_cidr(af=self.inputs.get_af())] vm1_name = get_random_name("in_network_vm1") vm2_name = get_random_name("in_network_vm2") action_list = [] firewall_st_name = get_random_name("svc_firewall_template_1") firewall_si_prefix = get_random_name("svc_firewall_instance") mirror_st_name = get_random_name("svc_mirror_template_1") mirror_si_prefix = get_random_name("svc_mirror_instance") policy_name = get_random_name("policy_in_network") mgmt_vn_fixture = self.config_vn(get_random_name('mgmt'), [get_random_cidr(af=self.inputs.get_af())]) vn1_fixture = self.config_vn(vn1_name, vn1_subnets) vn2_fixture = self.config_vn(vn2_name, vn2_subnets) vns = [mgmt_vn_fixture, vn1_fixture, vn2_fixture] def firewall_svc_create(vn_list): st_fixture = self.config_st(firewall_st_name, service_type='firewall', service_mode=firewall_svc_mode, mgmt=getattr(mgmt_vn_fixture, 'vn_fq_name', None), left=vn_list[1].vn_fq_name, right=vn_list[2].vn_fq_name) svm_fixtures = self.create_service_vms(vn_list, service_mode=st_fixture.service_mode, service_type=st_fixture.service_type, max_inst=max_inst) firewall_si_fixture = self.config_si(firewall_si_prefix, st_fixture, max_inst=max_inst, mgmt_vn_fq_name=getattr(mgmt_vn_fixture, 'vn_fq_name', None), left_vn_fq_name=vn_list[1].vn_fq_name, right_vn_fq_name=vn_list[2].vn_fq_name, svm_fixtures=svm_fixtures) assert firewall_si_fixture.verify_on_setup() return firewall_si_fixture if firewall_svc_mode == 'transparent': dummy_vn1 = self.config_vn('dummy_vn1', [get_random_cidr(af=self.inputs.get_af())]) dummy_vn2 = self.config_vn('dummy_vn2', [get_random_cidr(af=self.inputs.get_af())]) dummy_vn_list = [mgmt_vn_fixture, dummy_vn1, dummy_vn2] firewall_si_fixture = firewall_svc_create(dummy_vn_list) else: firewall_si_fixture = firewall_svc_create(vns) action_list = [firewall_si_fixture.fq_name_str] mirror_st_fixture = self.config_st(mirror_st_name, service_type='analyzer', service_mode=mirror_svc_mode, left=vn1_fixture.vn_fq_name) mirror_svm_fixtures = self.create_service_vms([vn1_fixture], service_mode=mirror_st_fixture.service_mode, service_type=mirror_st_fixture.service_type, max_inst=max_inst) mirror_si_fixture = self.config_si(mirror_si_prefix, mirror_st_fixture, max_inst=max_inst, left_vn_fq_name=vn1_fixture.vn_fq_name, svm_fixtures=mirror_svm_fixtures) assert mirror_si_fixture.verify_on_setup() action_list += [mirror_si_fixture.fq_name_str] rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': vn1_name, 'src_ports': [0, 65535], 'dest_network': vn2_name, 'dst_ports': [0, 65535], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': action_list[1]}, 'apply_service': action_list[:1]} }, ] policy_fixture = self.config_policy(policy_name, rules) vn1_policy_fix = self.attach_policy_to_vn( policy_fixture, vn1_fixture) vn2_policy_fix = self.attach_policy_to_vn( policy_fixture, vn2_fixture) vm1_fixture = self.config_vm(vm1_name, vn_fix=vn1_fixture) vm2_fixture = self.config_vm(vm2_name, vn_fix=vn2_fixture) vm1_fixture.wait_till_vm_is_up() vm2_fixture.wait_till_vm_is_up() result, msg = self.validate_vn(vn1_fixture.vn_fq_name) assert result, msg result, msg = self.validate_vn(vn2_fixture.vn_fq_name) assert result, msg assert self.verify_si(firewall_si_fixture) assert self.verify_si(mirror_si_fixture) svms = firewall_si_fixture.svm_list svm_node_ip = svms[0].vm_node_ip # Ping from left VM to right VM errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), errmsg # Verify ICMP mirror sessions = self.tcpdump_on_all_analyzer(mirror_si_fixture) errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip), errmsg for svm_name, (session, pcap) in sessions.items(): if vm1_fixture.vm_node_ip == vm2_fixture.vm_node_ip: if firewall_svc_mode == 'transparent': count = 20 else: count = 10 if vm1_fixture.vm_node_ip != vm2_fixture.vm_node_ip: if firewall_svc_mode == 'in-network' and vm1_fixture.vm_node_ip == svm_node_ip: count = 10 else: count = 20 self.verify_icmp_mirror(svm_name, session, pcap, count)
from tcutils.util import get_random_cidr ecmp_pt = { "parameters": {"domain": "default-domain", "dst_port_end": -1, "protocol": "any", "service_template_properties_version": 2, "svm2_name": "pt_svm2", "image": "cirros-0.3.0-x86_64-uec", "dst_port_start": -1, "service_template_properties_service_type": "firewall", "service_template_properties_service_mode": "in-network-nat", "network_ipam_refs_data_ipam_subnets_subnet_ip_prefix_3": '%s' % get_random_cidr().split('/')[0], "network_ipam_refs_data_ipam_subnets_subnet_ip_prefix_2": "%s" % get_random_cidr().split('/')[0], "simple_action": "pass", "flavor": "m1.tiny", "src_port_start": -1, "right_vn_fqdn": "", "service_template_properties_ordered_interfaces": "true", "left_vn": "left_vn", "network_ipam_refs_data_ipam_subnets_addr_from_start_true": "true", "left_vn_fqdn": "", "network_ipam_refs_data_ipam_subnets_subnet_ip_prefix_len_3": 24, "service_template_properties_interface_type_service_interface_type_3": "right", "network_ipam_refs_data_ipam_subnets_subnet_ip_prefix_len_1": 24, "service_template_properties_interface_type_service_interface_type_1": "management",
def verify_intf_mirroring(self, src_compute, dst_compute, analyzer_compute): """Validate the interface mirroring Test steps: 1. Create vn1/vm1_vn1, vn1/vm2_vn1, vn1/mirror_vm_vn1, vn2/vm2_vn2, vn2/mirror_vm_vn2, vn3/mirror_vm_vn3 2. Create the policies vn1_vn2 and vn1_vn3 for ICMP/UDP and attach to vn's 3. Enable intf mirroring on src vm's port and test the following cases: src vm in vn1, mirror vm in vn1, and dst vm in vn2 src vm in vn1, mirror vm in vn3, and dst vm in vn2 src vm, dst vm and mirror vm all are in vn1 src vm in vn1, dst vm in vn2, and mirror vm in vn2 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to mirror_vm the analyzer Pass criteria : Pkts(based on direction) getting mirrored to mirror_vm """ result = True vn1_subnets = [get_random_cidr(af=self.inputs.get_af())] vn2_subnets = [get_random_cidr(af=self.inputs.get_af())] vn3_subnets = [get_random_cidr(af=self.inputs.get_af())] self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("vn1") self.vn1_name = self.vn1_fq_name.split(':')[2] self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("vn2") self.vn2_name = self.vn2_fq_name.split(':')[2] self.vn3_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("vn3") self.vn3_name = self.vn3_fq_name.split(':')[2] self.vm1_name_vn1 = get_random_name("vm1_vn1") self.vm2_name_vn2 = get_random_name("vm2_vn2") self.vm2_name_vn1 = get_random_name("vm2_vn1") self.mirror_vm_name_vn1 = get_random_name("mirror_vm_vn1") self.mirror_vm_name_vn2 = get_random_name("mirror_vm_vn2") self.mirror_vm_name_vn3 = get_random_name("mirror_vm_vn3") self.analyzer_name_vn1 = "default-domain:" + self.inputs.project_name + \ ":" + self.mirror_vm_name_vn1 self.routing_instance_vn1 = self.vn1_fq_name + ':' + self.vn1_name self.analyzer_name_vn3 = "default-domain:" + self.inputs.project_name + \ ":" + self.mirror_vm_name_vn3 self.routing_instance_vn3 = self.vn3_fq_name + ':' + self.vn3_name self.analyzer_name_vn2 = "default-domain:" + self.inputs.project_name + \ ":" + self.mirror_vm_name_vn2 self.routing_instance_vn2 = self.vn2_fq_name + ':' + self.vn2_name self.analyzer_port = 8099 image_name = 'ubuntu-traffic' self.vn1_subnets = vn1_subnets self.vn2_subnets = vn2_subnets self.vn3_subnets = vn3_subnets self.policy_name_vn1_vn2 = get_random_name("vn1_vn2_pass") self.policy_name_vn1_vn3 = get_random_name("vn1_vn3_pass") self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) self.vn3_fixture = self.config_vn(self.vn3_name, self.vn3_subnets) self.rules_vn1_vn2 = [{'direction': '<>', 'protocol': 'icmp', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn2_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} }, {'direction': '<>', 'protocol': 'icmp6', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn2_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} }] self.rules_vn1_vn2.append({'direction': '<>', 'protocol': 'udp', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn2_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} } ) self.rules_vn1_vn3 = [{'direction': '<>', 'protocol': 'icmp', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn3_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} }, {'direction': '<>', 'protocol': 'icmp6', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn3_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} }] self.rules_vn1_vn3.append({'direction': '<>', 'protocol': 'udp', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn3_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass'} } ) self.policy_fixture_vn1_vn2 = self.config_policy(self.policy_name_vn1_vn2, self.rules_vn1_vn2) self.policy_fixture_vn1_vn3 = self.config_policy(self.policy_name_vn1_vn3, self.rules_vn1_vn3) self.policy_fixture_vn1_vn2 = self.config_policy(self.policy_name_vn1_vn2, self.rules_vn1_vn2) self.policy_fixture_vn1_vn3 = self.config_policy(self.policy_name_vn1_vn3, self.rules_vn1_vn3) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture_vn1_vn2, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture_vn1_vn2, self.vn2_fixture) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture_vn1_vn3, self.vn1_fixture) self.vn3_policy_fix = self.attach_policy_to_vn( self.policy_fixture_vn1_vn3, self.vn3_fixture) self.vm1_fixture_vn1 = self.config_vm( self.vn1_fixture, self.vm1_name_vn1, node_name=src_compute, image_name=image_name) self.vm2_fixture_vn2 = self.config_vm( self.vn2_fixture, self.vm2_name_vn2, node_name=dst_compute, image_name=image_name) self.mirror_vm_fixture_vn1 = self.config_vm( self.vn1_fixture, self.mirror_vm_name_vn1, node_name=analyzer_compute, image_name=image_name) self.vm2_fixture_vn1 = self.config_vm( self.vn1_fixture, self.vm2_name_vn1, node_name=dst_compute, image_name=image_name) self.mirror_vm_fixture_vn3 = self.config_vm( self.vn3_fixture, self.mirror_vm_name_vn3, node_name=analyzer_compute, image_name=image_name) self.mirror_vm_fixture_vn2 = self.config_vm( self.vn2_fixture, self.mirror_vm_name_vn2, node_name=analyzer_compute, image_name=image_name) assert self.vm1_fixture_vn1.verify_on_setup() assert self.vm2_fixture_vn2.verify_on_setup() assert self.vm2_fixture_vn1.verify_on_setup() assert self.mirror_vm_fixture_vn1.verify_on_setup() assert self.mirror_vm_fixture_vn3.verify_on_setup() assert self.mirror_vm_fixture_vn2.verify_on_setup() self.nova_h.wait_till_vm_is_up(self.vm1_fixture_vn1.vm_obj) self.nova_h.wait_till_vm_is_up(self.vm2_fixture_vn2.vm_obj) self.nova_h.wait_till_vm_is_up(self.vm2_fixture_vn1.vm_obj) self.nova_h.wait_till_vm_is_up(self.mirror_vm_fixture_vn1.vm_obj) self.nova_h.wait_till_vm_is_up(self.mirror_vm_fixture_vn3.vm_obj) self.nova_h.wait_till_vm_is_up(self.mirror_vm_fixture_vn2.vm_obj) result, msg = self.validate_vn( self.vn1_name, project_name=self.inputs.project_name) assert result, msg result, msg = self.validate_vn( self.vn2_name, project_name=self.inputs.project_name) assert result, msg self.mirror_vm_ip_vn1 = self.mirror_vm_fixture_vn1.get_vm_ips(self.vn1_fq_name)[0] self.mirror_vm_ip_vn3 = self.mirror_vm_fixture_vn3.get_vm_ips(self.vn3_fq_name)[0] self.mirror_vm_ip_vn3 = self.mirror_vm_fixture_vn3.get_vm_ips(self.vn3_fq_name)[0] self.mirror_vm_ip_vn2 = self.mirror_vm_fixture_vn2.get_vm_ips(self.vn2_fq_name)[0] self.logger.info("Verify Port mirroring when src vm in vn1, mirror vm in vn1 and dst vm in vn2..") if not self._verify_intf_mirroring(self.vm1_fixture_vn1, self.vm2_fixture_vn2, self.mirror_vm_fixture_vn1, \ self.vn1_fq_name, self.vn2_fq_name, self.vn1_fq_name, self.mirror_vm_ip_vn1, self.analyzer_name_vn1, self.routing_instance_vn1) : result = result and False self.logger.info("Verify Port mirroring when src vm in vn1, mirror vm in vn3, and dst vm in vn2") if not self._verify_intf_mirroring(self.vm1_fixture_vn1, self.vm2_fixture_vn2, self.mirror_vm_fixture_vn3, \ self.vn1_fq_name, self.vn2_fq_name, self.vn3_fq_name, self.mirror_vm_ip_vn3, self.analyzer_name_vn3, self.routing_instance_vn3) : result = result and False self.logger.info("Verify Port mirroring when src vm, dst vm and mirror vm all are in vn1") if not self._verify_intf_mirroring(self.vm1_fixture_vn1, self.vm2_fixture_vn1, self.mirror_vm_fixture_vn1, \ self.vn1_fq_name, self.vn1_fq_name, self.vn1_fq_name, self.mirror_vm_ip_vn1, self.analyzer_name_vn1, self.routing_instance_vn1) : result = result and False self.logger.info("Verify Port mirroring when src vm in vn1, dst vm in vn2 and mirror vm in vn2") if not self._verify_intf_mirroring(self.vm1_fixture_vn1, self.vm2_fixture_vn2, self.mirror_vm_fixture_vn2, \ self.vn1_fq_name, self.vn2_fq_name, self.vn2_fq_name, self.mirror_vm_ip_vn2, self.analyzer_name_vn2, self.routing_instance_vn2) : result = result and False return result
def verify_add_new_vns(self): # Delete policy self.detach_policy(self.vn1_policy_fix) self.detach_policy(self.vn2_policy_fix) self.unconfig_policy(self.policy_fixture) # Create one more left and right VN's new_left_vn = "new_left_bridge_vn" new_left_vn_net = [get_random_cidr(af=self.inputs.get_af())] new_right_vn = "new_right_bridge_vn" new_right_vn_net = [get_random_cidr(af=self.inputs.get_af())] new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net) new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net) # Launch VMs in new left and right VN's new_left_vm = 'new_left_bridge_vm' new_right_vm = 'new_right_bridge_vm' new_left_vm_fix = self.config_vm(new_left_vn_fix, new_left_vm) new_right_vm_fix = self.config_vm(new_right_vn_fix, new_right_vm) assert new_left_vm_fix.verify_on_setup() assert new_right_vm_fix.verify_on_setup() # Wait for VM's to come up new_left_vm_fix.wait_till_vm_is_up() new_right_vm_fix.wait_till_vm_is_up() # Add rule to policy to allow traffic from new left_vn to right_vn # through SI new_rule = {'direction': '<>', 'protocol': 'any', 'source_network': new_left_vn, 'src_ports': [0, -1], 'dest_network': new_right_vn, 'dst_ports': [0, -1], 'simple_action': None, 'action_list': {'apply_service': self.action_list} } self.rules.append(new_rule) # Create new policy with rule to allow traffci from new VN's self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) # attach policy to new VN's new_policy_left_vn_fix = self.attach_policy_to_vn( self.policy_fixture, new_left_vn_fix) new_policy_right_vn_fix = self.attach_policy_to_vn( self.policy_fixture, new_right_vn_fix) self.verify_si(self.si_fixtures) # Ping from left VM to right VM sleep(5) self.logger.info("Verfiy ICMP traffic between new VN's.") errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip assert new_left_vm_fix.ping_with_certainty( new_right_vm_fix.vm_ip), errmsg self.logger.info( "Verfiy ICMP traffic between new left VN and existing right VN.") errmsg = "Ping to right VM ip %s from left VM passed; \ Expected tp Fail" % self.vm2_fixture.vm_ip assert new_left_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip, expectation=False), errmsg self.logger.info( "Verfiy ICMP traffic between existing VN's with allow all.") errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg self.logger.info( "Verfiy ICMP traffic between existing left VN and new right VN.") errmsg = "Ping to right VM ip %s from left VM passed; \ Expected to Fail" % new_right_vm_fix.vm_ip assert self.vm1_fixture.ping_with_certainty(new_right_vm_fix.vm_ip, expectation=False), errmsg # Ping between left VN's self.logger.info( "Verfiy ICMP traffic between new left VN and existing left VN.") errmsg = "Ping to left VM ip %s from another left VM in different VN \ passed; Expected to fail" % self.vm1_fixture.vm_ip assert new_left_vm_fix.ping_with_certainty(self.vm1_fixture.vm_ip, expectation=False), errmsg self.logger.info( "Verfiy ICMP traffic between new right VN and existing right VN.") errmsg = "Ping to right VM ip %s from another right VM in different VN \ passed; Expected to fail" % self.vm2_fixture.vm_ip assert new_right_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip, expectation=False), errmsg # Delete policy self.detach_policy(self.vn1_policy_fix) self.detach_policy(self.vn2_policy_fix) self.detach_policy(new_policy_left_vn_fix) self.detach_policy(new_policy_right_vn_fix) self.unconfig_policy(self.policy_fixture) # Add rule to policy to allow only tcp traffic from new left_vn to right_vn # through SI self.rules.remove(new_rule) udp_rule = {'direction': '<>', 'protocol': 'udp', 'source_network': new_left_vn, 'src_ports': [8000, 8000], 'dest_network': new_right_vn, 'dst_ports': [9000, 9000], 'simple_action': None, 'action_list': {'apply_service': self.action_list} } self.rules.append(udp_rule) # Create new policy with rule to allow traffci from new VN's self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) # attach policy to new VN's new_policy_left_vn_fix = self.attach_policy_to_vn( self.policy_fixture, new_left_vn_fix) new_policy_right_vn_fix = self.attach_policy_to_vn( self.policy_fixture, new_right_vn_fix) self.verify_si(self.si_fixtures) # Ping from left VM to right VM with udp rule self.logger.info( "Verify ICMP traffic with allow udp only rule from new left VN to new right VN") errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip assert new_left_vm_fix.ping_with_certainty(new_right_vm_fix.vm_ip, expectation=False), errmsg # Install traffic package in VM self.vm1_fixture.install_pkg("Traffic") self.vm2_fixture.install_pkg("Traffic") new_left_vm_fix.install_pkg("Traffic") new_right_vm_fix.install_pkg("Traffic") self.logger.info( "Verify UDP traffic with allow udp only rule from new left VN to new right VN") sport = 8000 dport = 9000 sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix, 'udp', sport=sport, dport=dport) errmsg = "UDP traffic with src port %s and dst port %s failed" % ( sport, dport) assert sent and recv == sent, errmsg self.logger.info("Verfiy ICMP traffic with allow all.") errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg self.logger.info("Verify UDP traffic with allow all") sport = 8001 dport = 9001 sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, 'udp', sport=sport, dport=dport) errmsg = "UDP traffic with src port %s and dst port %s failed" % ( sport, dport) assert sent and recv == sent, errmsg # Delete policy self.delete_vm(new_left_vm_fix) self.delete_vm(new_right_vm_fix) self.detach_policy(new_policy_left_vn_fix) self.detach_policy(new_policy_right_vn_fix) self.delete_vn(new_left_vn_fix) self.delete_vn(new_right_vn_fix) self.verify_si(self.si_fixtures) self.logger.info( "Icmp traffic with allow all after deleting the new left and right VN.") errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg return True
def verify_firewall_with_mirroring( self, si_count=1, svc_scaling=False, max_inst=1, firewall_svc_mode='in-network', mirror_svc_mode='transparent', flavor='contrail_flavor_2cpu', vn1_subnets=None, vn2_subnets=None): """Validate the service chaining in network datapath""" self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn1") self.vn1_name = self.vn1_fq_name.split(':')[2] self.vn1_subnets = [vn1_subnets or get_random_cidr(af=self.inputs.get_af())] self.vm1_name = get_random_name("in_network_vm1") self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ ":" + get_random_name("in_network_vn2") self.vn2_name = self.vn2_fq_name.split(':')[2] self.vn2_subnets = [vn2_subnets or get_random_cidr(af=self.inputs.get_af())] self.vm2_name = get_random_name("in_network_vm2") self.action_list = [] self.firewall_st_name = get_random_name("svc_firewall_template_1") firewall_si_prefix = get_random_name("svc_firewall_instance") + "_" self.mirror_st_name = get_random_name("svc_mirror_template_1") mirror_si_prefix = get_random_name("svc_mirror_instance") + "_" self.policy_name = get_random_name("policy_in_network") self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) if firewall_svc_mode == 'transparent': self.if_list = [] self.st_fixture, self.firewall_si_fixtures = self.config_st_si( self.firewall_st_name, firewall_si_prefix, si_count, svc_scaling, max_inst, left_vn=None, right_vn=None, svc_img_name='tiny_trans_fw', svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name) if firewall_svc_mode == 'in-network'or firewall_svc_mode == 'in-network-nat': self.st_fixture, self.firewall_si_fixtures = self.config_st_si( self.firewall_st_name, firewall_si_prefix, si_count, svc_scaling, max_inst, left_vn=self.vn1_fq_name, right_vn=self.vn2_fq_name, svc_img_name='ubuntu-in-net', svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name) self.action_list = self.chain_si( si_count, firewall_si_prefix, self.inputs.project_name) self.st_fixture, self.mirror_si_fixtures = self.config_st_si( self.mirror_st_name, mirror_si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type='analyzer', svc_mode=mirror_svc_mode, flavor=flavor, project=self.inputs.project_name) self.action_list += (self.chain_si(si_count, mirror_si_prefix, self.inputs.project_name)) self.rules = [ { 'direction': '<>', 'protocol': 'any', 'source_network': self.vn1_name, 'src_ports': [0, -1], 'dest_network': self.vn2_name, 'dst_ports': [0, -1], 'simple_action': 'pass', 'action_list': {'simple_action': 'pass', 'mirror_to': {'analyzer_name': self.action_list[1]}, 'apply_service': self.action_list[:1]} }, ] self.policy_fixture = self.config_policy(self.policy_name, self.rules) self.vn1_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn1_fixture) self.vn2_policy_fix = self.attach_policy_to_vn( self.policy_fixture, self.vn2_fixture) self.vm1_fixture = self.config_vm(self.vn1_fixture, self.vm1_name) self.vm2_fixture = self.config_vm(self.vn2_fixture, self.vm2_name) self.vm1_fixture.wait_till_vm_is_up() self.vm2_fixture.wait_till_vm_is_up() result, msg = self.validate_vn( self.vn1_name, project_name=self.inputs.project_name) assert result, msg result, msg = self.validate_vn( self.vn2_name, project_name=self.inputs.project_name) assert result, msg self.verify_si(self.firewall_si_fixtures) self.verify_si(self.mirror_si_fixtures) for si_fix in self.firewall_si_fixtures: svms = self.get_svms_in_si(si_fix, self.inputs.project_name) for svm in svms: svm_name = svm.name host = self.get_svm_compute(svm_name) svm_node_ip = host # Ping from left VM to right VM errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg # Verify ICMP mirror sessions = self.tcpdump_on_all_analyzer( self.mirror_si_fixtures, mirror_si_prefix, si_count) errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip assert self.vm1_fixture.ping_with_certainty( self.vm2_fixture.vm_ip), errmsg for svm_name, (session, pcap) in sessions.items(): if self.vm1_fixture.vm_node_ip == self.vm2_fixture.vm_node_ip: if firewall_svc_mode == 'transparent': count = 20 else: count = 10 if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: if firewall_svc_mode == 'in-network' and self.vm1_fixture.vm_node_ip == svm_node_ip: count = 10 else: count = 20 self.verify_icmp_mirror(svm_name, session, pcap, count) return True
def verify_intf_mirroring(self, compute_nodes, vn_index_list, sub_intf=False, parent_intf=False, nic_mirror=False): """Validate the interface mirroring Test steps: 1. Create vn1/vm1_vn1, vn1/vm2_vn1, vn1/mirror_vm_vn1, vn2/vm2_vn2, vn2/mirror_vm_vn2, vn3/mirror_vm_vn3 2. Create the policies vn1_vn2 and vn1_vn3 for ICMP/UDP and attach to vn's 3. Enable intf mirroring on src vm's port and test the following cases: src vm in vn1, mirror vm in vn1, and dst vm in vn2 src vm in vn1, mirror vm in vn3, and dst vm in vn2 src vm, dst vm and mirror vm all are in vn1 src vm in vn1, dst vm in vn2, and mirror vm in vn2 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to mirror_vm the analyzer Pass criteria : Pkts(based on direction) getting mirrored to mirror_vm """ result = True src_compute = compute_nodes[0] dst_compute = compute_nodes[1] analyzer_compute = compute_nodes[2] analyzer_port = 8099 image_name = 'cirros' if not sub_intf else 'ubuntu' vn1_subnets = [get_random_cidr(af=self.inputs.get_af())] vn2_subnets = [get_random_cidr(af=self.inputs.get_af())] vn3_subnets = [get_random_cidr(af=self.inputs.get_af())] vn1_fq_name = self.connections.domain_name +":" + self.inputs.project_name + \ ":" + get_random_name("vn1") vn2_fq_name = self.connections.domain_name +":" + self.inputs.project_name + \ ":" + get_random_name("vn2") vn3_fq_name = self.connections.domain_name +":" + self.inputs.project_name + \ ":" + get_random_name("vn3") vn1_name = vn1_fq_name.split(':')[2] vn2_name = vn2_fq_name.split(':')[2] vn3_name = vn3_fq_name.split(':')[2] vn1_fixture = self.config_vn(vn1_name, vn1_subnets) vn2_fixture = self.config_vn(vn2_name, vn2_subnets) vn3_fixture = self.config_vn(vn3_name, vn3_subnets) policy_name_vn1_vn2 = get_random_name("vn1_vn2_pass") policy_name_vn1_vn3 = get_random_name("vn1_vn3_pass") policy_name_vn2_vn3 = get_random_name("vn2_vn3_pass") rules_vn1_vn2 = self.create_policy_rule(vn1_name, vn2_name) rules_vn1_vn3 = self.create_policy_rule(vn1_name, vn3_name) rules_vn2_vn3 = self.create_policy_rule(vn2_name, vn3_name) policy_fixture_vn1_vn2 = self.config_policy(policy_name_vn1_vn2, rules_vn1_vn2) policy_fixture_vn1_vn3 = self.config_policy(policy_name_vn1_vn3, rules_vn1_vn3) policy_fixture_vn2_vn3 = self.config_policy(policy_name_vn2_vn3, rules_vn2_vn3) vn1_v2_attach_to_vn1 = self.attach_policy_to_vn( policy_fixture_vn1_vn2, vn1_fixture) vn1_vn2_attach_to_vn2 = self.attach_policy_to_vn( policy_fixture_vn1_vn2, vn2_fixture) vn1_v3_attach_to_vn1 = self.attach_policy_to_vn( policy_fixture_vn1_vn3, vn1_fixture) vn1_v3_attach_to_vn3 = self.attach_policy_to_vn( policy_fixture_vn1_vn3, vn3_fixture) vn2_v3_attach_to_vn2 = self.attach_policy_to_vn( policy_fixture_vn2_vn3, vn2_fixture) vn2_v3_attach_to_vn3 = self.attach_policy_to_vn( policy_fixture_vn2_vn3, vn3_fixture) vn1_vmi_ref, vn2_vmi_ref, vn3_vmi_ref = None, None, None self.vlan = 101 if vn_index_list[0] == 0: src_vn_fixture = vn1_fixture src_vn_fq_name = vn1_fq_name src_vn_name = vn1_fq_name.split(':')[2] vn1_vmi_ref = True if sub_intf: intf_type = 'src' src_port, src_parent_port, src_parent_port_vn_fixture = self.create_sub_intf(vn1_fixture.uuid, intf_type) elif vn_index_list[0] == 1: src_vn_fixture = vn2_fixture src_vn_fq_name = vn2_fq_name src_vn_name = vn2_fq_name.split(':')[2] vn2_vmi_ref = True if sub_intf: intf_type = 'src' src_port, src_parent_port, src_parent_port_vn_fixture = self.create_sub_intf(vn2_fixture.uuid, intf_type) else: src_vn_fixture = vn3_fixture src_vn_fq_name = vn3_fq_name src_vn_name = vn3_fq_name.split(':')[2] vn3_vmi_ref = True if sub_intf: intf_type = 'src' src_port, src_parent_port, src_parent_port_vn_fixture = self.create_sub_intf(vn3_fixture.uuid, intf_type) if vn_index_list[1] == 0: dst_vn_fixture = vn1_fixture dst_vn_fq_name = vn1_fq_name dst_vn_name = vn1_fq_name.split(':')[2] vn1_vmi_ref = True if sub_intf: intf_type = 'dst' dst_port, dst_parent_port, dst_parent_port_vn_fixture = self.create_sub_intf(vn1_fixture.uuid, intf_type) elif vn_index_list[1] == 1: dst_vn_fixture = vn2_fixture dst_vn_fq_name = vn2_fq_name dst_vn_name = vn2_fq_name.split(':')[2] vn2_vmi_ref = True if sub_intf: intf_type = 'dst' dst_port, dst_parent_port, dst_parent_port_vn_fixture = self.create_sub_intf(vn2_fixture.uuid, intf_type) else: dst_vn_fixture = vn3_fixture dst_vn_fq_name = vn3_fq_name dst_vn_name = vn3_fq_name.split(':')[2] vn3_vmi_ref = True if sub_intf: intf_type = 'dst' dst_port, dst_parent_port, dst_parent_port_vn_fixture = self.create_sub_intf(vn3_fixture.uuid, intf_type) if vn_index_list[2] == 0: analyzer_vn_fixture = vn1_fixture analyzer_vn_fq_name = vn1_fq_name analyzer_vn_name = vn1_fq_name.split(':')[2] vn1_vmi_ref = True if sub_intf: intf_type = 'analyzer' analyzer_port, analyzer_parent_port, analyzer_parent_port_vn_fixture = self.create_sub_intf(vn1_fixture.uuid, intf_type) elif vn_index_list[2] == 1: analyzer_vn_fixture = vn2_fixture analyzer_vn_fq_name = vn2_fq_name analyzer_vn_name = vn2_fq_name.split(':')[2] vn2_vmi_ref = True if sub_intf: intf_type = 'analyzer' analyzer_port, analyzer_parent_port, analyzer_parent_port_vn_fixture = self.create_sub_intf(vn2_fixture.uuid, intf_type) else: analyzer_vn_fixture = vn3_fixture analyzer_vn_fq_name = vn3_fq_name analyzer_vn_name = vn3_fq_name.split(':')[2] vn3_vmi_ref = True if sub_intf: intf_type = 'analyzer' analyzer_port, analyzer_parent_port, analyzer_parent_port_vn_fixture = self.create_sub_intf(vn3_fixture.uuid, intf_type) if parent_intf: policy_name_src_parent_vn_analyzer_vn = get_random_name("src_parent_to_analyzer_pass") policy_name_dst_parent_vn_analyzer_vn = get_random_name("dst_parent_to_analyzer_pass") src_parent_vn_name = src_parent_port_vn_fixture.vn_name dst_parent_vn_name = dst_parent_port_vn_fixture.vn_name rules_src_parent_vn_analyzer_vn = self.create_policy_rule(src_parent_vn_name, analyzer_vn_name) rules_dst_parent_vn_analyzer_vn = self.create_policy_rule(dst_parent_vn_name, analyzer_vn_name) policy_fixture_src_parent_vn_analyzer_vn = self.config_policy( policy_name_src_parent_vn_analyzer_vn, rules_src_parent_vn_analyzer_vn) policy_fixture_dst_parent_vn_analyzer_vn = self.config_policy( policy_name_dst_parent_vn_analyzer_vn, rules_dst_parent_vn_analyzer_vn) self.attach_policy_to_vn( policy_fixture_src_parent_vn_analyzer_vn, analyzer_vn_fixture) self.attach_policy_to_vn( policy_fixture_src_parent_vn_analyzer_vn, src_parent_port_vn_fixture) self.attach_policy_to_vn( policy_fixture_dst_parent_vn_analyzer_vn, analyzer_vn_fixture) self.attach_policy_to_vn( policy_fixture_dst_parent_vn_analyzer_vn,dst_parent_port_vn_fixture) src_vm_name = get_random_name("src_vm") dst_vm_name = get_random_name("dst_vm") analyzer_vm_name = get_random_name("analyzer_vm") analyzer_fq_name = self.connections.domain_name +":" + self.inputs.project_name + \ ":" + analyzer_vm_name routing_instance = analyzer_vn_fq_name + ':' + analyzer_vn_name src_port_ids, dst_port_ids, analyzer_port_ids = [], [], [] src_vn_objs = [src_vn_fixture.obj] dst_vn_objs = [dst_vn_fixture.obj] analyzer_vn_objs = [analyzer_vn_fixture.obj] if sub_intf: src_port_ids.append(src_parent_port.uuid) dst_port_ids.append(dst_parent_port.uuid) analyzer_port_ids.append(analyzer_parent_port.uuid) src_vn_objs = [src_parent_port_vn_fixture.obj] dst_vn_objs = [dst_parent_port_vn_fixture.obj] analyzer_vn_objs = [analyzer_parent_port_vn_fixture.obj] src_vm_fixture = self.create_vm(vn_objs=src_vn_objs, vm_name=src_vm_name, image_name=image_name, node_name=src_compute, port_ids=src_port_ids) dst_vm_fixture = self.create_vm(vn_objs=dst_vn_objs, vm_name=dst_vm_name, image_name=image_name, node_name=dst_compute, port_ids=dst_port_ids) analyzer_vm_fixture = self.create_vm(vn_objs=analyzer_vn_objs, vm_name=analyzer_vm_name, image_name=image_name, node_name=analyzer_compute, port_ids=analyzer_port_ids) assert src_vm_fixture.verify_on_setup() assert dst_vm_fixture.verify_on_setup() assert analyzer_vm_fixture.verify_on_setup() self.nova_h.wait_till_vm_is_up(src_vm_fixture.vm_obj) self.nova_h.wait_till_vm_is_up(dst_vm_fixture.vm_obj) self.nova_h.wait_till_vm_is_up(analyzer_vm_fixture.vm_obj) if vn1_vmi_ref: result, msg = self.validate_vn(vn_fq_name=vn1_fq_name) assert result, msg if vn2_vmi_ref: result, msg = self.validate_vn(vn_fq_name=vn2_fq_name) assert result, msg if vn3_vmi_ref: result, msg = self.validate_vn(vn_fq_name=vn3_fq_name) assert result, msg if sub_intf: src_vm_ip = src_port.obj['fixed_ips'][0]['ip_address'] dst_vm_ip = dst_port.obj['fixed_ips'][0]['ip_address'] analyzer_vm_ip = analyzer_port.obj['fixed_ips'][0]['ip_address'] else: src_vm_ip = src_vm_fixture.get_vm_ips(src_vn_fq_name)[0] dst_vm_ip = dst_vm_fixture.get_vm_ips(dst_vn_fq_name)[0] analyzer_vm_ip = analyzer_vm_fixture.get_vm_ips(analyzer_vn_fq_name)[0] self.logger.info("Compute/VM: SRC: %s / %s, -> DST: %s / %s => ANALYZER: %s / %s" % (src_compute, src_vm_ip, dst_compute, dst_vm_ip, analyzer_compute, analyzer_vm_ip)) if parent_intf: parent_src_vm_ip = src_vm_fixture.get_vm_ips()[0] parent_dst_vm_ip = dst_vm_fixture.get_vm_ips()[0] parent_analyzer_vm_ip = analyzer_vm_fixture.get_vm_ips()[0] sport = None if sub_intf: intf_type = 'src' cmds = ['sudo vconfig add eth0 101','sudo ifconfig eth0.101 up','sudo udhcpc -i eth0.101'] output = src_vm_fixture.run_cmd_on_vm(cmds = cmds) intf_type = 'dst' cmds = ['sudo vconfig add eth0 101','sudo ifconfig eth0.101 up','sudo udhcpc -i eth0.101'] output = dst_vm_fixture.run_cmd_on_vm(cmds = cmds) intf_type = 'analyzer' cmds = ['sudo vconfig add eth0 101','sudo ifconfig eth0.101 up','sudo udhcpc -i eth0.101'] output = analyzer_vm_fixture.run_cmd_on_vm(cmds = cmds) sport = src_port.vmi_obj if not self._verify_intf_mirroring(src_vm_fixture, dst_vm_fixture, analyzer_vm_fixture, \ src_vn_fq_name, dst_vn_fq_name, analyzer_vn_fq_name, analyzer_vm_ip, analyzer_fq_name, routing_instance, src_port=sport, sub_intf=sub_intf, parent_intf=parent_intf, nic_mirror=nic_mirror): result = result and False return result