Exemplo n.º 1
0
 def config_aap(self,
                port,
                prefix,
                prefix_len=32,
                mac='',
                aap_mode='active-standby',
                contrail_api=False,
                left_vn_name=None):
     if left_vn_name is not None:
         self.vnc_h.add_allowed_address_pair(prefix,
                                             si_fq_name=port,
                                             prefix_len=prefix_len,
                                             mac=mac,
                                             mode=aap_mode,
                                             left_vn_name=left_vn_name)
     else:
         self.logger.info('Configuring AAP on port %s' % port)
         if is_v6(prefix):
             prefix_len = 128
         if contrail_api:
             self.vnc_h.add_allowed_address_pair(prefix,
                                                 vmi_id=port,
                                                 prefix_len=prefix_len,
                                                 mac=mac,
                                                 mode=aap_mode)
         else:
             port_dict = {
                 'allowed_address_pairs': [{
                     "ip_address":
                     prefix + '/' + str(prefix_len),
                     "mac_address":
                     mac
                 }]
             }
             port_rsp = self.update_port(port, port_dict)
Exemplo n.º 2
0
    def get_cn_route_table_entry(self, prefix, ri_name, table=None):
        '''Returns the route dictionary for requested prefix and routing instance.
        '''
        try:
            prefix = str(IPNetwork(prefix).network) + '/' + \
                     str(IPNetwork(prefix).prefixlen)
        except AddrFormatError:
            pass
        if not table:
            table = 'inet6.0' if is_v6(prefix) else 'inet.0'

        # In case, ri is default routing instance, path does not contain ri_name
        if ri_name == "default-domain:default-project:ip-fabric:__default__":
            path = 'Snh_ShowRouteReq?x=%s' % (table)
        else:
            path = 'Snh_ShowRouteReq?x=%s.%s' % (ri_name, table)
        xpath = '/ShowRouteResp/tables/list/ShowRouteTable'
        p = self.dict_get(path)
        rt = EtreeToDict(xpath).get_all_entry(p)
        if type(rt) == type(dict()):
            for route in rt['routes']:
                if route['prefix'] == prefix:
                    return route['paths']
        else:
            for entry in rt:
                for route in entry['routes']:
                    if route['prefix'] == prefix:
                        return route['paths']
Exemplo n.º 3
0
    def get_vna_route(self, vrf_id='', ip=None, prefix=None):
        if not ip or not is_v6(ip):
            table = 'Snh_Inet4UcRouteReq'
            plen = 32
        else:
            table = 'Snh_Inet6UcRouteReq'
            plen = 128
        table_resp = table.replace('Req', 'Resp')
        table_resp = table_resp.replace('Snh_', '')
        prefix =  plen if prefix is None else prefix
        routes = {'ip': ip, 'prefix': prefix}
        path = '%s?x=%s' % (table, str(vrf_id))
        xpath = 'route_list/list/RouteUcSandeshData'

        p = self.dict_get(path)

        routelist = EtreeToDict('./%s/%s' %(table_resp, xpath)).get_all_entry(p) or \
            EtreeToDict('./%s' % (xpath)).get_all_entry(p)
        if not ip:
            routes.update({'routes': routelist})
            return routes
        if type(routelist) is dict:
            routelist1 = [routelist]
        else:
            routelist1 = routelist
        for route in routelist1:
            if (route['src_ip'] == ip and route['src_plen'] == str(prefix)):
                routes.update({'routes': [route]})
                return routes
Exemplo n.º 4
0
    def get_cn_route_table_entry(self, prefix, ri_name, table=None):
        '''Returns the route dictionary for requested prefix and routing instance.
        '''
        try:
            prefix = str(IPNetwork(prefix).network) + '/' + \
                     str(IPNetwork(prefix).prefixlen)
        except AddrFormatError:
            pass
        if not table:
            table = 'inet6.0' if is_v6(prefix) else 'inet.0'

        # In case, ri is default routing instance, path does not contain ri_name
        if ri_name == "default-domain:default-project:ip-fabric:__default__":
            path = 'Snh_ShowRouteReq?x=%s' % (table)
        else:
            path = 'Snh_ShowRouteReq?x=%s.%s' % (ri_name, table)
        xpath = '/ShowRouteResp/tables/list/ShowRouteTable'
        p = self.dict_get(path)
        rt = EtreeToDict(xpath).get_all_entry(p)
        if type(rt) == type(dict()):
            for route in rt['routes']:
                if route['prefix'] == prefix:
                    return route['paths']
        else:
            for entry in rt:
                for route in entry['routes']:
                    if route['prefix'] == prefix:
                        return route['paths']
Exemplo n.º 5
0
def _extract_subnet_from_network_ipam(data):
    ip = data['subnet']
    subnet = {}
    subnet['cidr'] = ip['ip_prefix'] + '/' + str(ip['ip_prefix_len'])
    subnet['ip_version'] = 6 if is_v6(subnet['cidr']) else 4
    if 'dns_nameservers' in data:
        subnet['dns_nameservers'] = data['dns_nameservers']
    if 'enable_dhcp' in data:
        subnet['enable_dhcp'] = data['enable_dhcp']
    if 'default_gateway' in data:
        subnet['gateway_ip'] = data['default_gateway']
    if 'allocation_pools' in data:
        lst = []
        for pool in data['allocation_pools']:
            lst.append({
                'allocation_pool_start': pool['start'],
                'allocation_pool_end': pool['end']
            })
        subnet['allocation_pools'] = lst
    if 'host_routes' in data:
        lst = []
        for hr in data['host_routes']['route']:
            lst.append({
                'route_destination': hr['prefix'],
                'route_nexthop': hr['next_hop']
            })
        subnet['host_routes'] = lst
    return subnet
    def get_vna_route(self, vrf_id='', ip=None, prefix=None):
        if not ip or not is_v6(ip):
            table = 'Snh_Inet4UcRouteReq'
            plen = 32
        else:
            table = 'Snh_Inet6UcRouteReq'
            plen = 128
        table_resp = table.replace('Req', 'Resp')
        table_resp = table_resp.replace('Snh_', '')
        prefix = plen if prefix is None else prefix
        routes = {'ip': ip, 'prefix': prefix}
        path = '%s?x=%s' % (table, str(vrf_id))
        xpath = 'route_list/list/RouteUcSandeshData'

        p = self.dict_get(path)

        routelist = EtreeToDict('./%s/%s' %(table_resp, xpath)).get_all_entry(p) or \
            EtreeToDict('./%s' % (xpath)).get_all_entry(p)
        if not ip:
            routes.update({'routes': routelist})
            return routes
        if type(routelist) is dict:
            routelist1 = [routelist]
        else:
            routelist1 = routelist
        for route in routelist1:
            if (route['src_ip'] == ip and route['src_plen'] == str(prefix)):
                routes.update({'routes': [route]})
                return routes
 def _create_subnet(self, subnet):
     params = {
         'network': self.uuid,
         'cidr': subnet,
         'ip_version': '6' if is_v6(subnet) else '4',
         'enable_dhcp': True,
     }
     self._qh.create_subnet({'subnet': params})
Exemplo n.º 8
0
 def add_static_route_in_svm(self, si, vn_fixture, device):
     for subnet in vn_fixture.vn_subnets:
         subnet = subnet['cidr']
         af = ''
         if is_v6(subnet):
             af = '-6'
         for vm in si.svm_list:
             cmd = 'sudo ip %s route add %s dev %s' % (af, subnet, device)
             vm.run_cmd_on_vm([cmd])
Exemplo n.º 9
0
 def add_static_route_in_svm(self, si, vn_fixture, device):
     for subnet in vn_fixture.vn_subnets:
         subnet = subnet['cidr']
         af = ''
         if is_v6(subnet):
             af = '-6'
         for vm in si.svm_list:
             cmd = 'sudo ip %s route add %s dev %s' % (af, subnet, device)
             vm.run_cmd_on_vm([cmd])
Exemplo n.º 10
0
 def __init__(self, sender_vm_fixture, host, *args, **kwargs):
     self.logger = sender_vm_fixture.logger
     self.sender_vm_fixture = sender_vm_fixture
     self.host = host
     self.args_string = self.get_cmd_args(**kwargs)
     self.rnd_str = get_random_name()
     self.log_file = result_file + '_' + self.rnd_str + '.log'
     self.result_file = result_file + '_' + self.rnd_str + '.result'
     self.ping_cmd = 'ping'
     self.pid_file = '/tmp/ping_%s.pid' % (self.rnd_str)
     if is_v6(self.host):
         self.ping_cmd = 'ping6'
Exemplo n.º 11
0
 def config_aap(self, port, prefix, prefix_len=32, mac='', aap_mode='active-standby', contrail_api=False):
     self.logger.info('Configuring AAP on port %s' % port)
     if is_v6(prefix):
         prefix_len = 128
     if contrail_api:
         self.vnc_h.add_allowed_address_pair(
             port, prefix, prefix_len, mac, aap_mode)
     else:
         port_dict = {'allowed_address_pairs': [
             {"ip_address": prefix + '/' + str(prefix_len), "mac_address": mac}]}
         port_rsp = self.update_port(port, port_dict)
     return True
Exemplo n.º 12
0
 def get_cn_vpn_table(self, prefix):
     result= True
     path = 'Snh_ShowRouteReq?x=bgp.l3vpn-inet6.0' if is_v6(prefix) \
            else 'Snh_ShowRouteReq?x=bgp.l3vpn.0'
     xpath = '/ShowRouteResp/tables/list/ShowRouteTable'
     p = self.dict_get(path)
     rt = EtreeToDict(xpath).get_all_entry(p)
     for route in rt['routes']:
         if prefix in route['prefix']:
             result= True
             break
         else:
             result= False
     return result
Exemplo n.º 13
0
 def get_cn_vpn_table(self, prefix):
     result = True
     path = 'Snh_ShowRouteReq?x=bgp.l3vpn-inet6.0' if is_v6(prefix) \
            else 'Snh_ShowRouteReq?x=bgp.l3vpn.0'
     xpath = '/ShowRouteResp/tables/list/ShowRouteTable'
     p = self.dict_get(path)
     rt = EtreeToDict(xpath).get_all_entry(p)
     for route in rt['routes']:
         if prefix in route['prefix']:
             result = True
             break
         else:
             result = False
     return result
Exemplo n.º 14
0
    def vrrp_mas_chk(self,
                     src_vm=None,
                     dst_vm=None,
                     vn=None,
                     ip=None,
                     vsrx=False):
        self.logger.info(
            'Will verify who the VRRP master is and the corresponding route entries in the Agent'
        )
        if is_v4(ip):
            prefix = '32'
            vrrp_mas_chk_cmd = 'ip -4 addr ls'
        elif is_v6(ip):
            prefix = '128'
            vrrp_mas_chk_cmd = 'ip -6 addr ls'

        if vsrx:
            vrrp_mas_chk_cmd = 'show vrrp'
            result = self.get_config_via_netconf(src_vm,
                                                 dst_vm,
                                                 vrrp_mas_chk_cmd,
                                                 timeout=10,
                                                 device='junos',
                                                 hostkey_verify="False",
                                                 format='text')
            if result == False:
                return result
            if 'master' in result:
                self.logger.info('%s is selected as the VRRP Master' %
                                 dst_vm.vm_name)
                result = True
            else:
                result = False
                self.logger.error('VRRP Master not selected')
        else:
            dst_vm.run_cmd_on_vm(cmds=[vrrp_mas_chk_cmd], as_sudo=True)
            output = dst_vm.return_output_cmd_dict[vrrp_mas_chk_cmd]
            result = False
            if ip in output:
                self.logger.info('%s is selected as the VRRP Master' %
                                 dst_vm.vm_name)
                result = True
            else:
                result = False
                self.logger.error('VRRP Master not selected')
        result = result and self.check_master_in_agent(
            dst_vm, vn, ip, prefix_len=prefix)
        return result
Exemplo n.º 15
0
 def __init__(self,
     sender_vm_fixture,
     host,
     *args,
     **kwargs
     ):
     self.logger = sender_vm_fixture.logger
     self.sender_vm_fixture = sender_vm_fixture
     self.host = host
     self.args_string = self.get_cmd_args(**kwargs)
     self.rnd_str = get_random_name()
     self.log_file = result_file + '_' + self.rnd_str + '.log'
     self.result_file = result_file + '_' + self.rnd_str + '.result'
     self.ping_cmd = 'ping'
     self.pid_file = '/tmp/ping_%s.pid' %(self.rnd_str)
     if is_v6(self.host):
         self.ping_cmd = 'ping6'
Exemplo n.º 16
0
        def create_virtual_machine (self, **kwargs):
            assert kwargs['type'] == 'openstack', "Unsupport argument type"

            vm_args = kwargs.copy()
            del vm_args['type']
            lst = []
            for nic in vm_args['networks']:
                nic_dict = {}
                if 'fixed_ip' in nic.keys():
                    af = 'v6' if is_v6(nic['fixed_ip']) else 'v4'
                    nic_dict = {'%s-fixed-ip' % af: nic['fixed_ip']}
                if 'port' in nic.keys():
                    nic_dict['port-id'] =  nic['port']
                if 'network' in nic.keys():
                    nic_dict['net-id'] =  nic['network']
                lst.append(nic_dict)

            vm_args['nics'] = lst
            del vm_args['networks']
            obj = self._nh.servers.create(**vm_args)
            return obj.id
Exemplo n.º 17
0
    def _construct_quantum_params(self, name, prj_fqn, kwargs):
        self._params = {
            'type': 'OS::Neutron::Net',
            'name': name,
            'shared': kwargs.get('shared', False),
            'router:external': kwargs.get('router_external', False),
        }

        if kwargs.get('sriov_enable'):
            self._params['provider:physical_network'] = \
                    kwargs['sriov_provider_network']
            self._params['provider:segmentation_id'] = kwargs['sriov_vlan']

        policy_refs = []
        for policy in kwargs.get('policy_objs', []):
            policy_refs.append(policy.fq_name)
        if policy_refs:
            self._params['policys'] = policy_refs
        self._policies = policy_refs

        ipam_fqn = kwargs.get('ipam_fq_name') or NetworkIpam().get_fq_name()
        gw = kwargs.get('disable_gateway')
        dhcp = kwargs.get('enable_dhcp', True)
        ipam_fqn = ipam_fqn or prj_fqn + ':' + 'default-network-ipam'
        self._subnets = kwargs.get('subnets', [])
        self._subnets_pending = []
        for subnet in self._subnets:
            dd = {
                'enable_dhcp': dhcp,
                'ip_version': '6' if is_v6(subnet) else '4',
                'cidr': subnet,
                'ipam_fq_name': ipam_fqn,
            }
            if gw:
                dd['gateway_ip'] = None
            self._subnets_pending.append(dd)
Exemplo n.º 18
0
    def test_fat_flow_with_aap(self):
        """
        Description: Verify Fat flows with allowed address pair
        Steps:
            1. launch 1 VN and launch 4 VMs in it.2 client VMs and 2 server VMs on different node.
            2. on server VMs, config Fat flow for udp with port 0
            3. from client VMs,send udp traffic to servers and
                verify mastership and Fat flow
            4. Induce mastership switch and verify the Fat flow again
        Pass criteria:
            1. Fat flow and mastership verification should pass
        """
        compute_hosts = self.orch.get_hosts()
        if len(compute_hosts) < 2:
            raise self.skipTest("Skipping test case,"
                                "this test needs atleast 2 compute nodes")

        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']],
                                     node_name=compute_hosts[0])
        vm2_fixture = self.create_vm(vn1_fixture,
                                     vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']],
                                     node_name=compute_hosts[0])

        client_fixtures = self.create_vms(vn_fixture=vn1_fixture,
                                          count=2,
                                          node_name=compute_hosts[1],
                                          image_name=image)
        assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        self.verify_vms(client_fixtures)

        proto = 'udp'
        dport = 53
        fat_port = 0
        baseport = random.randint(12000, 65000)
        sport = [str(baseport), str(baseport + 1)]
        fat_flow_config = {'proto': proto, 'port': fat_port}
        self.add_fat_flow_to_vmis([port1_obj['id'], port2_obj['id']],
                                  fat_flow_config)

        port_list = [port1_obj, port2_obj]
        for port in port_list:
            self.config_aap(port['id'], vIP, mac=port['mac_address'])
        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixtures[0].ping_with_certainty(
                vIP), 'Ping to vIP failure'

        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        for vm in client_fixtures:
            for port in sport:
                assert self.send_nc_traffic(vm,
                                            vrrp_master,
                                            port,
                                            dport,
                                            proto,
                                            ip=vIP)

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for vm in client_fixtures:
            self.verify_fat_flow_on_compute(dst_compute_fix,
                                            vm.vm_ip,
                                            vIP,
                                            fat_port,
                                            proto,
                                            vrf_id_dst,
                                            fat_flow_count=1)

        if is_v6(vIP):
            #Skip further verification as current version of vrrpd does not support IPv6
            return True
        self.logger.info('We will induce a mastership switch')
        port_dict = {'admin_state_up': False}
        self.update_port(port1_obj['id'], port_dict)
        self.logger.info('%s should become the new VRRP master' %
                         vm2_fixture.vm_name)
        vrrp_master = vm2_fixture
        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        for vm in client_fixtures:
            for port in sport:
                assert self.send_nc_traffic(vm,
                                            vrrp_master,
                                            port,
                                            dport,
                                            proto,
                                            ip=vIP)

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for vm in client_fixtures:
            self.verify_fat_flow_on_compute(dst_compute_fix,
                                            vm.vm_ip,
                                            vIP,
                                            fat_port,
                                            proto,
                                            vrf_id_dst,
                                            fat_flow_count=1)
Exemplo n.º 19
0
    def test_fat_flow_with_aap(self):
        """
        Description: Verify Fat flows with allowed address pair
        Steps:
            1. launch 1 VN and launch 4 VMs in it.2 client VMs and 2 server VMs on different node.
            2. on server VMs, config Fat flow for udp with port 0
            3. from client VMs,send udp traffic to servers and
                verify mastership and Fat flow
            4. Induce mastership switch and verify the Fat flow again
        Pass criteria:
            1. Fat flow and mastership verification should pass
        """
        compute_hosts = self.orch.get_hosts()
        if len(compute_hosts) < 2:
            raise self.skipTest("Skipping test case,"
                                    "this test needs atleast 2 compute nodes")

        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture, vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']],
                                     node_name=compute_hosts[0])
        vm2_fixture = self.create_vm(vn1_fixture, vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']],
                                     node_name=compute_hosts[0])

        client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2,
            node_name=compute_hosts[1], image_name=image)
        assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        self.verify_vms(client_fixtures)

        proto = 'udp'
        dport = 53
        fat_port = 0
        baseport = random.randint(12000, 65000)
        sport = [str(baseport), str(baseport+1)]
        fat_flow_config = {'proto':proto,'port':fat_port}
        self.add_fat_flow_to_vmis([port1_obj['id'], port2_obj['id']], fat_flow_config)

        port_list = [port1_obj, port2_obj]                                      
        for port in port_list:                                                  
            self.config_aap(port['id'], vIP, mac=port['mac_address'])
        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixtures[0].ping_with_certainty(vIP), 'Ping to vIP failure'

        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        for vm in client_fixtures:
            for port in sport:
                assert self.send_nc_traffic(vm, vrrp_master,
                    port, dport, proto, ip=vIP)

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for vm in client_fixtures:
            self.verify_fat_flow_on_compute(dst_compute_fix, vm.vm_ip,
                        vIP, fat_port, proto, vrf_id_dst,
                        fat_flow_count=1)

        if is_v6(vIP):
            #Skip further verification as current version of vrrpd does not support IPv6
            return True
        self.logger.info('We will induce a mastership switch')
        port_dict = {'admin_state_up': False}
        self.update_port(port1_obj['id'], port_dict)
        self.logger.info(
            '%s should become the new VRRP master' % vm2_fixture.vm_name)
        vrrp_master = vm2_fixture
        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        for vm in client_fixtures:
            for port in sport:
                assert self.send_nc_traffic(vm, vrrp_master,
                    port, dport, proto, ip=vIP)

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for vm in client_fixtures:
            self.verify_fat_flow_on_compute(dst_compute_fix, vm.vm_ip,
                        vIP, fat_port, proto, vrf_id_dst,
                        fat_flow_count=1)
Exemplo n.º 20
0
    def test_disable_policy_with_aap(self):
        """
        Description: Verify disabling policy with allowed address pair
        Steps:
            1. launch 1 VN and launch 3 VMs in it.1 client VMs and 2 server VMs.
            2. disable the policy on all the VMIs.
            3. from client VMs,send udp traffic to servers and
                verify mastership and no flow
            4. Induce mastership switch and verify no flow again
        Pass criteria:
            1. flow and mastership verification should pass
        """
        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture, vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']])
        vm2_fixture = self.create_vm(vn1_fixture, vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']])

        client_fixture = self.create_vms(vn_fixture= vn1_fixture,count=1,
            image_name=image)[0]
        vm_fix_list = [client_fixture, vm1_fixture, vm2_fixture]
        self.verify_vms(vm_fix_list)

        proto = 'udp'
        dport = 53
        baseport = random.randint(12000, 65000)
        sport = str(baseport)
        compute_node_ips = []
        compute_fixtures = []

        #Get all the VMs compute IPs
        for vm in vm_fix_list:
            if vm.vm_node_ip not in compute_node_ips:
                compute_node_ips.append(vm.vm_node_ip)

        #Get the compute fixture for all the concerned computes
        for ip in compute_node_ips:
            compute_fixtures.append(self.compute_fixtures_dict[ip])

        self.disable_policy_for_vms(vm_fix_list)

        port_list = [port1_obj, port2_obj]                                      
        for port in port_list:                                                  
            self.config_aap(port, vIP, mac=port['mac_address'])

        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixture.ping_with_certainty(vIP), 'Ping to vIP failure'


        assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP)

        assert self.send_nc_traffic(client_fixture, vrrp_master,
            sport, dport, proto, ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture, client_fixture.vm_ip,
                        vIP, vrf_id, vrf_id, sport, dport, proto,
                        ff_exp=0, rf_exp=0)

        if is_v6(vIP):
            #Skip further verification as current version of vrrpd does not support IPv6
            return True
        self.logger.info('We will induce a mastership switch')
        port_dict = {'admin_state_up': False}
        self.update_port(port1_obj['id'], port_dict)
        self.logger.info(
            '%s should become the new VRRP master' % vm2_fixture.vm_name)
        vrrp_master = vm2_fixture
        assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP)

        assert self.send_nc_traffic(client_fixture, vrrp_master,
            sport, dport, proto, ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture, client_fixture.vm_ip,
                        vIP, vrf_id, vrf_id, sport, dport, proto,
                        ff_exp=0, rf_exp=0)

        self.disable_policy_for_vms(vm_fix_list, disable=False)

        assert self.send_nc_traffic(client_fixture, vrrp_master,
            sport, dport, proto, ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture, client_fixture.vm_ip,
                        vIP, vrf_id, vrf_id, sport, dport, proto,
                        ff_exp=1, rf_exp=1)
Exemplo n.º 21
0
    def test_fat_flow_with_aap_ignore_addrs(self):
        """
        Description: Verify Fat flows with ignore addrs with allowed address pair
        Steps:
            1. launch 1 VN and launch 4 VMs in it.2 client VMs and 2 server VMs on different node.
            2. on server VMs, config Fat flow for udp with port 0
            3. from client VMs,send udp traffic to servers and
                verify mastership and Fat flow
            4. Induce mastership switch and verify the Fat flow again
        Pass criteria:
            1. Fat flow and mastership verification should pass
        """
        compute_hosts = self.orch.get_hosts()
        if len(compute_hosts) < 2:
            raise self.skipTest("Skipping test case,"
                                    "this test needs atleast 2 compute nodes")

        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture, vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']],
                                     node_name=compute_hosts[0])
        vm2_fixture = self.create_vm(vn1_fixture, vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']],
                                     node_name=compute_hosts[0])

        client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2,
            node_name=compute_hosts[1], image_name=image)
        assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        self.verify_vms(client_fixtures)

        proto = 'udp'
        dport_list = [53, 54]
        baseport = random.randint(12000, 65000)
        sport_list = [str(baseport), str(baseport+1)]
        port_list = [port1_obj, port2_obj]

        for port in port_list:
            self.config_aap(port['id'], vIP, mac=port['mac_address'])
        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixtures[0].ping_with_certainty(vIP), 'Ping to vIP failure'

        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        fat_ignore_src = {'proto':proto,'port':dport_list[0],
            'ignore_address':'source'}
        fat_ignore_dst = {'proto':proto,'port':dport_list[1],
            'ignore_address':'destination'}
        fat_ignore_dst_port_0 = {'proto':proto,'port':0,
            'ignore_address':'destination'}
        fat_config_list = [fat_ignore_src, fat_ignore_dst,
            fat_ignore_dst_port_0]

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for fat_config in fat_config_list:
            self.add_fat_flow_to_vmis([port1_obj['id'], port2_obj['id']],
                fat_config)
            for vm in client_fixtures:
                for sport in sport_list:
                    for dport in dport_list:
                        assert self.send_nc_traffic(vm, vrrp_master,
                            sport, dport, proto, ip=vIP)

            if fat_config['ignore_address'] == 'source':
                fat_src_ip = '0.0.0.0' if self.inputs.get_af() == 'v4' else '::'
                for vm in client_fixtures:
                    self.verify_fat_flow_on_compute(dst_compute_fix, vm.vm_ip,
                                fat_src_ip, fat_config['port'], proto, vrf_id_dst,
                                fat_flow_count=1)
            if fat_config['ignore_address'] == 'destination':
                fat_dst_ip = '0.0.0.0' if self.inputs.get_af() == 'v4' else '::'
                self.verify_fat_flow_on_compute(dst_compute_fix, fat_dst_ip,
                            vIP, fat_config['port'], proto, vrf_id_dst,
                            fat_flow_count=1)
            self.remove_fat_flow_on_vmis([port1_obj['id'], port2_obj['id']],
                fat_config)
Exemplo n.º 22
0
    def test_disable_policy_with_aap(self):
        """
        Description: Verify disabling policy with allowed address pair
        Steps:
            1. launch 1 VN and launch 3 VMs in it.1 client VMs and 2 server VMs.
            2. disable the policy on all the VMIs.
            3. from client VMs,send udp traffic to servers and
                verify mastership and no flow
            4. Induce mastership switch and verify no flow again
        Pass criteria:
            1. flow and mastership verification should pass
        """
        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']])
        vm2_fixture = self.create_vm(vn1_fixture,
                                     vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']])

        client_fixture = self.create_vms(vn_fixture=vn1_fixture,
                                         count=1,
                                         image_name=image)[0]
        vm_fix_list = [client_fixture, vm1_fixture, vm2_fixture]
        self.verify_vms(vm_fix_list)

        proto = 'udp'
        dport = 53
        baseport = random.randint(12000, 65000)
        sport = str(baseport)
        compute_node_ips = []
        compute_fixtures = []

        #Get all the VMs compute IPs
        for vm in vm_fix_list:
            if vm.vm_node_ip not in compute_node_ips:
                compute_node_ips.append(vm.vm_node_ip)

        #Get the compute fixture for all the concerned computes
        for ip in compute_node_ips:
            compute_fixtures.append(self.compute_fixtures_dict[ip])

        self.disable_policy_for_vms(vm_fix_list)

        port_list = [port1_obj, port2_obj]
        for port in port_list:
            self.config_aap(port, vIP, mac=port['mac_address'])

        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixture.ping_with_certainty(
                vIP), 'Ping to vIP failure'

        assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP)

        assert self.send_nc_traffic(client_fixture,
                                    vrrp_master,
                                    sport,
                                    dport,
                                    proto,
                                    ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture,
                                        client_fixture.vm_ip,
                                        vIP,
                                        vrf_id,
                                        vrf_id,
                                        sport,
                                        dport,
                                        proto,
                                        ff_exp=0,
                                        rf_exp=0)

        if is_v6(vIP):
            #Skip further verification as current version of vrrpd does not support IPv6
            return True
        self.logger.info('We will induce a mastership switch')
        port_dict = {'admin_state_up': False}
        self.update_port(port1_obj['id'], port_dict)
        self.logger.info('%s should become the new VRRP master' %
                         vm2_fixture.vm_name)
        vrrp_master = vm2_fixture
        assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP)

        assert self.send_nc_traffic(client_fixture,
                                    vrrp_master,
                                    sport,
                                    dport,
                                    proto,
                                    ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture,
                                        client_fixture.vm_ip,
                                        vIP,
                                        vrf_id,
                                        vrf_id,
                                        sport,
                                        dport,
                                        proto,
                                        ff_exp=0,
                                        rf_exp=0)

        self.disable_policy_for_vms(vm_fix_list, disable=False)

        assert self.send_nc_traffic(client_fixture,
                                    vrrp_master,
                                    sport,
                                    dport,
                                    proto,
                                    ip=vIP)

        for fixture in compute_fixtures:
            vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0])
            self.verify_flow_on_compute(fixture,
                                        client_fixture.vm_ip,
                                        vIP,
                                        vrf_id,
                                        vrf_id,
                                        sport,
                                        dport,
                                        proto,
                                        ff_exp=1,
                                        rf_exp=1)
Exemplo n.º 23
0
    def test_fat_flow_with_aap_ignore_addrs(self):
        """
        Description: Verify Fat flows with ignore addrs with allowed address pair
        Steps:
            1. launch 1 VN and launch 4 VMs in it.2 client VMs and 2 server VMs on different node.
            2. on server VMs, config Fat flow for udp with port 0
            3. from client VMs,send udp traffic to servers and
                verify mastership and Fat flow
            4. Induce mastership switch and verify the Fat flow again
        Pass criteria:
            1. Fat flow and mastership verification should pass
        """
        compute_hosts = self.orch.get_hosts()
        if len(compute_hosts) < 2:
            raise self.skipTest("Skipping test case,"
                                "this test needs atleast 2 compute nodes")

        vn1_fixture = self.create_vns(count=1)[0]
        vm1_name = get_random_name('vm1')
        vm2_name = get_random_name('vm2')
        result = False
        vIP = self.get_random_ip_from_vn(vn1_fixture)[0]
        image = 'ubuntu-traffic'

        port1_obj = self.create_port(net_id=vn1_fixture.vn_id)
        port2_obj = self.create_port(net_id=vn1_fixture.vn_id)
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm1_name,
                                     image_name=image,
                                     port_ids=[port1_obj['id']],
                                     node_name=compute_hosts[0])
        vm2_fixture = self.create_vm(vn1_fixture,
                                     vm2_name,
                                     image_name=image,
                                     port_ids=[port2_obj['id']],
                                     node_name=compute_hosts[0])

        client_fixtures = self.create_vms(vn_fixture=vn1_fixture,
                                          count=2,
                                          node_name=compute_hosts[1],
                                          image_name=image)
        assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up'
        self.verify_vms(client_fixtures)

        proto = 'udp'
        dport_list = [53, 54]
        baseport = random.randint(12000, 65000)
        sport_list = [str(baseport), str(baseport + 1)]
        port_list = [port1_obj, port2_obj]

        for port in port_list:
            self.config_aap(port['id'], vIP, mac=port['mac_address'])
        self.config_vrrp(vm1_fixture, vIP, '20')
        self.config_vrrp(vm2_fixture, vIP, '10')
        vrrp_master = vm1_fixture
        if is_v6(vIP):
            #current version of vrrpd does not support IPv6, as a workaround add the vIP
            #    on one of the VM and start ping6 to make the VM as master
            assert vm1_fixture.add_ip_on_vm(vIP)
            assert client_fixtures[0].ping_with_certainty(
                vIP), 'Ping to vIP failure'

        assert self.vrrp_mas_chk(dst_vm=vrrp_master, vn=vn1_fixture, ip=vIP)

        fat_ignore_src = {
            'proto': proto,
            'port': dport_list[0],
            'ignore_address': 'source'
        }
        fat_ignore_dst = {
            'proto': proto,
            'port': dport_list[1],
            'ignore_address': 'destination'
        }
        fat_ignore_dst_port_0 = {
            'proto': proto,
            'port': 0,
            'ignore_address': 'destination'
        }
        fat_config_list = [
            fat_ignore_src, fat_ignore_dst, fat_ignore_dst_port_0
        ]

        dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip]
        vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0])
        for fat_config in fat_config_list:
            self.add_fat_flow_to_vmis([port1_obj['id'], port2_obj['id']],
                                      fat_config)
            for vm in client_fixtures:
                for sport in sport_list:
                    for dport in dport_list:
                        assert self.send_nc_traffic(vm,
                                                    vrrp_master,
                                                    sport,
                                                    dport,
                                                    proto,
                                                    ip=vIP)

            if fat_config['ignore_address'] == 'source':
                fat_src_ip = '0.0.0.0' if self.inputs.get_af(
                ) == 'v4' else '::'
                for vm in client_fixtures:
                    self.verify_fat_flow_on_compute(dst_compute_fix,
                                                    vm.vm_ip,
                                                    fat_src_ip,
                                                    fat_config['port'],
                                                    proto,
                                                    vrf_id_dst,
                                                    fat_flow_count=1)
            if fat_config['ignore_address'] == 'destination':
                fat_dst_ip = '0.0.0.0' if self.inputs.get_af(
                ) == 'v4' else '::'
                self.verify_fat_flow_on_compute(dst_compute_fix,
                                                fat_dst_ip,
                                                vIP,
                                                fat_config['port'],
                                                proto,
                                                vrf_id_dst,
                                                fat_flow_count=1)
            self.remove_fat_flow_on_vmis([port1_obj['id'], port2_obj['id']],
                                         fat_config)