def test_fwp_on_vhost0(self):
     src_node = self.inputs.compute_names[0]
     dst_node = self.inputs.compute_names[1]
     encrypt_nodes = self.inputs.compute_names[:2]
     self.enable_encryption(encrypt_nodes)
     self.validate_tunnels(encrypt_nodes, encrypt_nodes)
     src_vhost0_fqname = ['default-global-system-config',
                          src_node, 'vhost0']
     src_vhost0_uuid = self.vnc_h.virtual_machine_interface_read(
                       fq_name=src_vhost0_fqname).uuid
     src_vhost0 = PortFixture(connections=self.connections,
                              uuid=src_vhost0_uuid)
     src_vhost0.setUp()
     src_vhost0.enable_policy()
     self.addCleanup(src_vhost0.disable_policy)
     dst_vhost0_fqname = ['default-global-system-config',
                          dst_node, 'vhost0']
     dst_vhost0_uuid = self.vnc_h.virtual_machine_interface_read(
                       fq_name=dst_vhost0_fqname).uuid
     dst_vhost0 = PortFixture(connections=self.connections,
                              uuid=dst_vhost0_uuid)
     dst_vhost0.setUp()
     dst_vhost0.enable_policy()
     self.addCleanup(dst_vhost0.disable_policy)
     vn_fqname = ["default-domain", "default-project", "ip-fabric"]
     fab_vn_uuid = self.vnc_h.virtual_network_read(fq_name=vn_fqname).uuid
     fab_vn = VNFixture(connections=self.connections, uuid=fab_vn_uuid)
     fab_vn.setUp()
     self.setup_firewall_policy(fab_vn, src_vhost0, dst_vhost0)
     self.fwr.update(protocol='tcp', dports=(7777, 7777))
     self.verify_encrypt_traffic_bw_hosts(src_node, dst_node, '7777')
     self.fwr.update(action='deny', match='None')
     self.verify_encrypt_traffic_bw_hosts(src_node, dst_node, '7777',
                                          expectation=False)
Пример #2
0
 def create_vn(self, connections=None, verify=True, option='contrail', **kwargs):
     connections = connections or self.connections
     vn_fixture = self.create_fixture(VNFixture, connections=connections,
                                      option=option, **kwargs)
     if vn_fixture and verify:
         #WA of verifying using admin creds since RI etal system objects
         #wont be visible to the regular user
         vn_admin_fixture = VNFixture(connections=self.connections,
                                      option=option, uuid=vn_fixture.uuid)
         vn_admin_fixture.read()
         assert vn_admin_fixture.verify_on_setup(), 'VN verification failed'
     return vn_fixture
Пример #3
0
 def test_service_custom_isolation(self):
     """
     Verify reachability of a Service in and out of custom isolated namespace
     Verify following reachability:
     1. Pod inside custom isolated namespace should not be able to reach service within same namespace
        or any service outside the namespace.
     2. After creating a  contrail network policy between Custom VN and service VN< pod inside custom 
         isolated namespace should be able to reach service within and outside this namespace
     3. Pods inside non islated namespace should be able to reach service inside custom isolated namespace.
     """
     client1, client2 = self.setup_common_namespaces_pods(prov_service = True)
     #check 1
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client2[2], expectation = False)
     assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
                                   test_pod=client2[2],  expectation = False)
     #check 2
     #Creating the policy between custom VN ans default service VN
     policy_name='allow-btw-custom-ns-and-service'
     if self.inputs.slave_orchestrator == 'kubernetes':
         k8s_default_service_vn_name = self.connections.project_name + '-default-service-network'
     else:
         k8s_default_service_vn_name = "k8s-default-service-network"
     k8s_default_service_vn_fq_name = [self.connections.domain_name,
                                     self.connections.project_name,
                                     k8s_default_service_vn_name]
     k8s_default_service_vn_obj = self.vnc_lib.virtual_network_read(
                                 fq_name = k8s_default_service_vn_fq_name)
     k8s_service_vn_fixt = VNFixture(connections = self.connections,
                                    vn_name = k8s_default_service_vn_name,
                                    option="contrail",
                                    uuid = k8s_default_service_vn_obj.uuid)
     k8s_service_vn_fixt.setUp()
     vn_service_policy = self.setup_policy_between_vns(client2[7],
                                                       k8s_service_vn_fixt,
                                                       api="contrail",
                                                       connections=self.connections)
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client2[2])
     assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
                                   test_pod=client2[2])
     #check 3
     # Disable of service isolation required or not ? For now, its working without disabling service isolation
     #client2[4].disable_service_isolation()
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client1[2])
Пример #4
0
 def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None,
                enable_dhcp=True, **kwargs):
     '''Classmethod to do only VN creation
     '''
     if not vn_name:
         vn_name = get_random_name('vn')
     connections = kwargs.pop('connections', None) or cls.connections
     project_name = kwargs.pop('project_name', None) or connections.project_name
     vn_fixture = VNFixture(project_name=project_name,
                   connections=connections,
                   inputs=connections.inputs,
                   vn_name=vn_name,
                   subnets=vn_subnets,
                   vxlan_id=vxlan_id,
                   enable_dhcp=enable_dhcp,
                   **kwargs)
     vn_fixture.setUp()
     return vn_fixture
Пример #5
0
    def create_vn(self, vn_name=None, vn_subnets=None, vxlan_id=None,
        enable_dhcp=True, cleanup=True):
        if not vn_name:
            vn_name = get_random_name('vn')
        if not vn_subnets:
            vn_subnets = [get_random_cidr()]
        vn_fixture = VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=vn_name,
                      subnets=vn_subnets,
                      vxlan_id=vxlan_id,
                      enable_dhcp=enable_dhcp)
        vn_fixture.setUp()
        if cleanup:
            self.addCleanup(vn_fixture.cleanUp)

        return vn_fixture
Пример #6
0
 def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True, **kwargs):
     """Classmethod to do only VN creation
     """
     if not vn_name:
         vn_name = get_random_name("vn")
     if not vn_subnets:
         vn_subnets = [get_random_cidr()]
     vn_fixture = VNFixture(
         project_name=cls.inputs.project_name,
         connections=cls.connections,
         inputs=cls.inputs,
         vn_name=vn_name,
         subnets=vn_subnets,
         vxlan_id=vxlan_id,
         enable_dhcp=enable_dhcp,
         **kwargs
     )
     vn_fixture.setUp()
     return vn_fixture
Пример #7
0
 def test_services_custom_isolation_post_kube_manager_restart(self):
     """
     Verify that after restart of contrail-kubemanager, service reachability to 
     and from custom isolated namespace/pod is not affected
     Verify following reachability:
     1. Verify reachability between pods and services
     2. restart contrail-kube-manager
     3. Verify reachability between pods and services
     """
     client1, client2 = self.setup_common_namespaces_pods(prov_service = True)
     policy_name='allow-btw-custom-ns-and-service'
     if self.inputs.slave_orchestrator == 'kubernetes':
         k8s_default_service_vn_name = self.connections.project_name + '-default-service-network'
     else:
         k8s_default_service_vn_name = "k8s-default-service-network"
     k8s_default_service_vn_fq_name = self.connections.inputs.project_fq_name + \
                                         [k8s_default_service_vn_name]
     k8s_default_service_vn_obj = self.vnc_lib.virtual_network_read(
                                 fq_name = k8s_default_service_vn_fq_name)
     k8s_service_vn_fixt = VNFixture(connections = self.connections,
                                    vn_name = k8s_default_service_vn_name,
                                    option="contrail",
                                    uuid = k8s_default_service_vn_obj.uuid)
     k8s_service_vn_fixt.setUp()
     vn_service_policy = self.setup_policy_between_vns(client2[6],
                                                       k8s_service_vn_fixt,
                                                       api="contrail",
                                                       connections=self.connections)
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client2[2])
     assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
                                   test_pod=client2[2])
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client1[2])
     self.restart_kube_manager()
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client2[2])
     assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
                                   test_pod=client2[2])
     assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
                                   test_pod=client1[2])
Пример #8
0
def main():
    import sys
    from vn_test import VNFixture
    from vm_test import VMFixture
#    sys.settrace(tracefunc)
#    obj = LBaasFixture(api_type='neutron', name='LB', connections=setup_test_infra(), network_id='4b39a2bd-4528-40e8-b848-28084e59c944', members={'vms': ['a72ad607-f1ca-44f2-b31e-e825a3f2d408'], 'address': ['192.168.1.10']}, vip_net_id='4b39a2bd-4528-40e8-b848-28084e59c944', protocol='TCP', port='22', healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}])
    conn = setup_test_infra()
    vnfix = VNFixture(connections=conn, vn_name='admin-33688095')
    vnfix.setUp()
    fip_fix = VNFixture(connections=conn, router_external=True, vn_name='fip-vn')
    fip_fix.setUp()
    subnet = vnfix.get_cidrs()[0]
    vm_fix = VMFixture(connections=conn, vn_obj=vnfix.obj, vm_name='member-vm')
    vm_fix.setUp()
    obj = LBaasV2Fixture(lb_name='LB-Test', connections=conn, network_id=vnfix.uuid,
                         fip_net_id=fip_fix.uuid, listener_name='Listener-Test', vip_port='80',
                         vip_protocol='HTTP', pool_name='Pool-Test', pool_port='80', pool_protocol='HTTP',
                         pool_algorithm='ROUND_ROBIN', members={'vms': [vm_fix.vm_id]},
                         hm_delay=5, hm_timeout=5, hm_max_retries=5, hm_probe_type='PING',
                        )
    obj.setUp()
    import pdb; pdb.set_trace()
    obj.verify_on_setup()
    obj.cleanUp()
    exit()
    import pdb; pdb.set_trace()
#    obj = LBaasFixture(api_type='neutron', uuid='58e5fb2c-ec47-4eb8-b4bf-9c66b0473f78', connections=setup_test_infra())
    obj.verify_on_setup()
    obj.delete_custom_attr('max_sess_rate')
    obj.add_custom_attr('client_timeout', 20000)
    obj.delete_custom_attr('server_timeout')
    obj.add_custom_attr('max_sess_rate', 20000)
    obj.delete_custom_attr('rate_limit_sessions')
    obj.add_custom_attr('rate_limit_sessions', 20)
    obj.delete_custom_attr('max_conn')
    obj.add_custom_attr('max_conn', 20)
    obj.delete_custom_attr('http_server_close')
    obj.add_custom_attr('http_server_close', "False")
    obj.verify_on_setup()
    obj.create_fip_on_vip()
    obj.verify_on_setup()
    obj.delete_fip_on_vip()
    obj.verify_on_setup()
    obj.delete_vip()
    obj.verify_on_setup()
    obj.check_and_create_vip()
    obj.verify_on_setup()
    obj.delete_member(address=obj.member_ips[1])
    obj.verify_on_setup()
    obj.create_member(address=get_random_ip(subnet))
    obj.verify_on_setup()
    obj.delete_hmon(obj.hmons.keys()[0])
    obj.verify_on_setup()
    obj.create_hmon({'delay': 5, 'max_retries': 5, 'probe_type': 'PING', 'timeout': 10})
    obj.verify_on_setup()
    obj.cleanUp()
    vm_fix.cleanUp()
    vnfix.cleanUp()
    vip_fix.cleanUp()
    fip_fix.cleanUp()
Пример #9
0
    def test_lbaas_with_different_lb(self):
        '''Create LB, LISTENER, POOL and MEMBER
            create FIP and associate it to VIP, create a VM in the FIP network
           verify: pool, member and vip gets created
           after vip creation nets ns is created in compute node and haproxy
           process starts , fail otherwise
           Verify different LB Method
        '''
        result = True
        pool_members = {}
        members = []

        fip_fix = self.useFixture(
            VNFixture(connections=self.connections, router_external=True))
        client_vm1_fixture = self.create_vm(fip_fix,
                                            flavor='contrail_flavor_small',
                                            image_name='ubuntu')

        vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3)

        vn_vip_fixture = vn_vm_fix[0]
        lb_pool_servers = vn_vm_fix[1]

        assert client_vm1_fixture.wait_till_vm_is_up()
        for VMs in lb_pool_servers:
            members.append(VMs.vm_ip)

        pool_members.update({'address': members})

        pool_name = get_random_name('mypool')
        lb_method = 'ROUND_ROBIN'
        protocol = 'HTTP'
        protocol_port = 80
        vip_name = get_random_name('myvip')
        listener_name = get_random_name('RR')

        self.logger.info("Verify Round Robin Method")
        rr_listener = self.create_lbaas(vip_name,
                                        vn_vip_fixture.uuid,
                                        pool_name=pool_name,
                                        pool_algorithm=lb_method,
                                        pool_protocol=protocol,
                                        pool_port=HTTP_PORT,
                                        members=pool_members,
                                        listener_name=listener_name,
                                        fip_net_id=fip_fix.uuid,
                                        vip_port=HTTP_PORT,
                                        vip_protocol='HTTP',
                                        hm_delay=5,
                                        hm_timeout=5,
                                        hm_max_retries=5,
                                        hm_probe_type=HTTP_PROBE)

        rr_listener.verify_on_setup()
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info("Delete Round Robin Listener")
        rr_listener.delete()

        listener_name = get_random_name('SI')
        lb_method = 'SOURCE_IP'
        pool_name = get_random_name('mypool')

        self.logger.info("Verify Source IP LB Method")
        self.logger.info("Add new Source IP  listener")
        si_listener = self.create_lbaas(vip_name,
                                        vn_vip_fixture.uuid,
                                        pool_name=pool_name,
                                        pool_algorithm=lb_method,
                                        pool_protocol=protocol,
                                        pool_port=HTTP_PORT,
                                        members=pool_members,
                                        listener_name=listener_name,
                                        fip_net_id=fip_fix.uuid,
                                        vip_port=HTTP_PORT,
                                        vip_protocol='HTTP',
                                        hm_delay=5,
                                        hm_timeout=5,
                                        hm_max_retries=5,
                                        hm_probe_type=HTTP_PROBE)

        #si_listener.add_custom_attr('max_conn', 20)
        si_listener.verify_on_setup()

        assert self.verify_lb_method(
            client_vm1_fixture, lb_pool_servers, si_listener.fip_ip,
            "SOURCE_IP"), "Verify LB Method for SOURCE IP failed"

        self.logger.info(
            "Verify Least Connections LB Method, by modifying the lb_algorithm"
        )
        si_listener.network_h.update_lbaas_pool(
            si_listener.pool_uuid, lb_algorithm='LEAST_CONNECTIONS')

        assert self.verify_lb_method(
            client_vm1_fixture, lb_pool_servers, si_listener.fip_ip,
            "LEAST_CONNECTIONS"
        ), "Verify LB Method failed for LEAST_CONNECTIONS"
    def test_config_add_change_while_control_nodes_go_down(self):
        """Tests related to configuration add, change, and delete while switching from normal mode
           to headless and back i.e. control nodes go down and come online."""

        if len(self.inputs.compute_ips) < 2:
            raise unittest.SkipTest("This test needs atleast 2 compute nodes.")
        else:
            self.logger.info("Required resources are in place to run the test.")

        result = True
        topology_class_name = None

        self.compute_fixture_dict = {}
        for each_compute in self.inputs.compute_ips:
            self.compute_fixture_dict[each_compute] = self.useFixture(
                ComputeNodeFixture(
                    connections=self.connections,
                    node_ip=each_compute,
                    username=self.inputs.username,
                    password=self.inputs.password))
            mode = self.compute_fixture_dict[
                each_compute].get_agent_headless_mode()
            if mode is False:
                self.compute_fixture_dict[
                    each_compute].set_agent_headless_mode()
        #
        # Get config for test from topology
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = test_headless_vrouter_topo.sdn_headless_vrouter_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo_obj = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo = {}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo_obj))
        out = setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo_objs, config_topo, vm_fip_info = out['data']

        # Start Test
        proj = config_topo.keys()
        vms = config_topo[proj[0]]['vm'].keys()
        src_vm = config_topo[proj[0]]['vm'][vms[0]]
        dest_vm = config_topo[proj[0]]['vm'][vms[1]]
        flow_cache_timeout = 180

        # Setup Traffic.
        stream = Stream(protocol="ip", proto="icmp",
                        src=src_vm.vm_ip, dst=dest_vm.vm_ip)
        profile = ContinuousProfile(stream=stream, count=0, capfilter="icmp")

        tx_vm_node_ip = src_vm.vm_node_ip
        rx_vm_node_ip = dest_vm.vm_node_ip

        tx_local_host = Host(
            tx_vm_node_ip,
            self.inputs.username,
            self.inputs.password)
        rx_local_host = Host(
            rx_vm_node_ip,
            self.inputs.username,
            self.inputs.password)

        send_host = Host(
            src_vm.local_ip,
            src_vm.vm_username,
            src_vm.vm_password)
        recv_host = Host(
            dest_vm.local_ip,
            dest_vm.vm_username,
            dest_vm.vm_password)

        sender = Sender("icmp", profile, tx_local_host,
                        send_host, self.inputs.logger)
        receiver = Receiver("icmp", profile, rx_local_host,
                            recv_host, self.inputs.logger)

        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)

        #self.start_ping(src_vm, dest_vm)

        flow_index_list = headless_vr_utils.get_flow_index_list(
            self,
            src_vm,
            dest_vm)

        headless_vr_utils.stop_all_control_services(self)
        self.addCleanup(self.inputs.start_service, 'supervisor-control', self.inputs.bgp_ips)
        time.sleep(10)
        headless_vr_utils.check_through_tcpdump(self, dest_vm, src_vm)

        flow_index_list2 = headless_vr_utils.get_flow_index_list(
            self,
            src_vm,
            dest_vm)

        if set(flow_index_list) == set(flow_index_list2):
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        receiver.stop()
        sender.stop()
        project1_instance = config_topo['project1']['project']['project1']
        project1_instance.get_project_connections()
        vnet2_instance = config_topo['project1']['vn']['vnet2']

        # add VM to existing VN
        VM22_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections,
                vn_obj=vnet2_instance.obj,
                vm_name='VM22',
                project_name=project1_instance.project_name))

        # create new IPAM
        ipam3_obj = self.useFixture(
            IPAMFixture(
                project_obj=project1_instance,
                name='ipam3'))
        ipam4_obj = self.useFixture(
            IPAMFixture(
                project_obj=project1_instance,
                name='ipam4'))

        # create new VN
        VN3_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections,
                vn_name='VN3',
                inputs=project1_instance.inputs,
                subnets=['10.3.1.0/24'],
                ipam_fq_name=ipam3_obj.fq_name))

        VN4_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections,
                vn_name='VN4',
                inputs=project1_instance.inputs,
                subnets=['10.4.1.0/24'],
                ipam_fq_name=ipam4_obj.fq_name))

        # create policy
        policy_name = 'policy34'
        rules = []
        rules = [{'direction': '<>',
                  'protocol': 'icmp',
                  'dest_network': VN4_fixture.vn_fq_name,
                  'source_network': VN3_fixture.vn_fq_name,
                  'dst_ports': 'any',
                  'simple_action': 'pass',
                  'src_ports': 'any'},
                 {'direction': '<>',
                  'protocol': 'icmp',
                  'dest_network': VN3_fixture.vn_fq_name,
                  'source_network': VN4_fixture.vn_fq_name,
                  'dst_ports': 'any',
                  'simple_action': 'pass',
                  'src_ports': 'any'}]

        policy34_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name,
                rules_list=rules,
                inputs=project1_instance.inputs,
                connections=project1_instance.project_connections,
                project_fixture=project1_instance))

        # create VN to policy mapping in a dict of policy list.
        vn_policys = {
            VN3_fixture.vn_name: [policy_name],
            VN4_fixture.vn_name: [policy_name]}

        # create a policy object list of policies to be attached to a vm
        policy_obj_dict = {}
        policy_obj_dict[VN3_fixture.vn_name] = [policy34_fixture.policy_obj]
        policy_obj_dict[VN4_fixture.vn_name] = [policy34_fixture.policy_obj]

        # vn fixture dictionary.
        vn_obj_dict = {}
        vn_obj_dict[VN3_fixture.vn_name] = VN3_fixture
        vn_obj_dict[VN4_fixture.vn_name] = VN4_fixture

        # attach policy to VN
        VN3_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections,
                vn_name=VN3_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[
                    VN3_fixture.vn_name],
                project_name=project1_instance.project_name))

        VN4_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections,
                vn_name=VN4_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[
                    VN4_fixture.vn_name],
                project_name=project1_instance.project_name))

        # add VM to new VN
        VM31_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections,
                vn_obj=VN3_fixture.obj,
                vm_name='VM31',
                project_name=project1_instance.project_name))

        VM41_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections,
                vn_obj=VN4_fixture.obj,
                vm_name='VM41',
                project_name=project1_instance.project_name))

        # verification routines.
        test_flag = 0
        if ((VN3_fixture.verify_vn_in_api_server()) and
                (VN3_fixture.verify_vn_not_in_agent()) and
                (VN3_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN3 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN3 FAILED while control nodes down.")
            test_flag = 1

        if ((VN4_fixture.verify_vn_in_api_server()) and
                (VN4_fixture.verify_vn_not_in_agent()) and
                (VN4_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN4 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN4 FAILED while control nodes down.")
            test_flag = 1

        if ((VM22_fixture.verify_vm_launched()) and
                (VM22_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM22 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM22 FAILED while control nodes down.")
            test_flag = 1

        if ((VM31_fixture.verify_vm_launched()) and
                (VM31_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM31 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM31 FAILED while control nodes down.")
            test_flag = 1

        if ((VM41_fixture.verify_vm_launched()) and
                (VM41_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM41 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM41 FAILED while control nodes down.")
            test_flag = 1

        # start all control services.
        headless_vr_utils.start_all_control_services(self)

        # if something went wrong in the controller down state bail out here.
        if test_flag == 1:
            self.logger.error("Verifications and Test failed while the controllers were down in \
                               headless state of agent. Check earlier error logs")
            return False

        # wait for 3 to 5 sec for configuration sync from control nodes to the
        # agents.
        time.sleep(5)

        # wait till VM's are up.
        VM22_fixture.wait_till_vm_is_up()
        VM31_fixture.wait_till_vm_is_up()
        VM41_fixture.wait_till_vm_is_up()

        # verify vm config gets downloaded to the agents.
        if ((VM22_fixture.verify_vm_in_agent()) and
                (VM31_fixture.verify_vm_in_agent()) and
                (VM41_fixture.verify_vm_in_agent())):
            self.logger.info("VM verification on the agent PASSED")
        else:
            self.logger.error("VM verification on the agent FAILED")
            return False

        # check ping success between the two VM's
        assert config_topo['project1']['vm']['VM11'].ping_with_certainty(
            VM22_fixture.vm_ip, expectation=True)
        assert VM31_fixture.ping_with_certainty(
            VM41_fixture.vm_ip,
            expectation=True)
        assert VM41_fixture.ping_with_certainty(
            VM31_fixture.vm_ip,
            expectation=True)

        # verification routines.
        if ((VN3_fixture.verify_on_setup()) and
                (VN4_fixture.verify_on_setup()) and
                (VM22_fixture.verify_on_setup()) and
                (VM31_fixture.verify_on_setup()) and
                (VM41_fixture.verify_on_setup())):
            self.logger.info(
                "All verifications passed after controllers came up in headless agent mode")
        else:
            self.logger.error(
                "Verifications FAILED after controllers came up in headless agent mode")
            return False

        return True
Пример #11
0
    def test_lbaas_health_monitor(self):
        '''Create LB, LISTENER, POOL and MEMBER
            create FIP and associate it to VIP, create a VM in the FIP network
           verify: pool, member and vip gets created
           after vip creation nets ns is created in compute node and haproxy
           process starts , fail otherwise
           Verify different LB Method
        '''
        result = True
        pool_members = {}
        members = []

        fip_fix = self.useFixture(
            VNFixture(connections=self.connections, router_external=True))
        client_vm1_fixture = self.create_vm(fip_fix,
                                            flavor='contrail_flavor_small',
                                            image_name='ubuntu')

        vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3)

        vn_vip_fixture = vn_vm_fix[0]
        lb_pool_servers = vn_vm_fix[1]

        assert client_vm1_fixture.wait_till_vm_is_up()
        for VMs in lb_pool_servers:
            members.append(VMs.vm_ip)

        pool_members.update({'address': members})

        pool_name = get_random_name('mypool')
        lb_method = 'ROUND_ROBIN'
        protocol = 'HTTP'
        protocol_port = 80
        vip_name = get_random_name('myvip')
        listener_name = get_random_name('RR')

        self.logger.info("Verify Round Robin Method")
        rr_listener = self.create_lbaas(vip_name,
                                        vn_vip_fixture.uuid,
                                        pool_name=pool_name,
                                        pool_algorithm=lb_method,
                                        pool_protocol=protocol,
                                        pool_port=HTTP_PORT,
                                        members=pool_members,
                                        listener_name=listener_name,
                                        fip_net_id=fip_fix.uuid,
                                        vip_port=HTTP_PORT,
                                        vip_protocol='HTTP',
                                        hm_delay=5,
                                        hm_timeout=5,
                                        hm_max_retries=5,
                                        hm_probe_type=HTTP_PROBE)

        rr_listener.verify_on_setup()
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info(
            "Verify after stopping webserver from one of the server")
        lb_pool_servers[0].stop_webserver()
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[1:], rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info(
            "Verify after adding few more members, and don't start the webserver on the members"
        )
        for no_of_vm in range(3):
            lb_pool_servers.append(
                self.create_vm(vn_vip_fixture,
                               flavor='contrail_flavor_small',
                               image_name='ubuntu'))
            lb_pool_servers[-1].wait_till_vm_is_up()
            ##lb_pool_servers[-1].start_webserver(listen_port=80)
            rr_listener.create_member(address=lb_pool_servers[-1].vm_ip)

        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[1:-3], rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"
Пример #12
0
          if frame.f_code.co_name.startswith('verify_'):
              print "-" * indent[0] + "> call function", frame.f_code.co_name
      elif event == "return":
#          if frame.f_code.co_name.startswith('verify_'):
#              print "<" + "-" * indent[0], "exit function", frame.f_code.co_name, frame.f_code.co_names
          indent[0] -= 2
      return tracefunc

if __name__ == "__main__":
    import sys
    from vn_test import VNFixture
    from vm_test import VMFixture
#    sys.settrace(tracefunc)
#    obj = LBaasFixture(api_type='neutron', name='LB', connections=setup_test_infra(), network_id='4b39a2bd-4528-40e8-b848-28084e59c944', members={'vms': ['a72ad607-f1ca-44f2-b31e-e825a3f2d408'], 'address': ['192.168.1.10']}, vip_net_id='4b39a2bd-4528-40e8-b848-28084e59c944', protocol='TCP', port='22', healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}])
    conn = setup_test_infra()
    vnfix = VNFixture(connections=conn)
    vnfix.setUp()
    vip_fix = VNFixture(connections=conn)
    vip_fix.setUp()
    fip_fix = VNFixture(connections=conn, router_external=True)
    fip_fix.setUp()
    subnet = vnfix.get_cidrs()[0]
    vm_fix = VMFixture(connections=conn, vn_obj=vnfix.obj)
    vm_fix.setUp()
    obj = LBaasFixture(api_type='neutron', name='LB', connections=conn, network_id=vnfix.uuid,
                       members={'address': [get_random_ip(subnet)], 'vms': [vm_fix.vm_id]},
                       vip_net_id=vip_fix.uuid, fip_net_id=fip_fix.uuid, protocol='TCP', port='22',
                       healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}],
                       custom_attr={'max_conn': 100, 'max_sess_rate': 20, 'server_timeout': 50000, 'rate_limit_sessions': 10, 'http_server_close': "True"})
    obj.setUp()
#    obj = LBaasFixture(api_type='neutron', uuid='58e5fb2c-ec47-4eb8-b4bf-9c66b0473f78', connections=setup_test_infra())
Пример #13
0
    def test_policy_protocol_summary(self):
        ''' Test to validate that when policy is created with multiple rules that can be summarized by protocol

        '''
        proj_name = self.inputs.project_name
        vn1_name = 'vn40'
        vn1_subnets = ['10.1.1.0/24']
        policy1_name = 'policy1'
        policy2_name = 'policy2'

        rules2 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
        ]
        rules1 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
        ]
        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules1,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rules2,
                          inputs=self.inputs,
                          connections=self.connections))

        vn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn1_name,
                      inputs=self.inputs,
                      subnets=vn1_subnets,
                      policy_objs=[policy1_fixture.policy_obj]))
        assert vn1_fixture.verify_on_setup()

        vn1_vm1_name = 'vm1'
        vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn1_fixture.obj,
                      vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()

        inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
        vn_fq_name = inspect_h.get_vna_vn(domain='default-domain',
                                          project=proj_name,
                                          vn_name=vn1_name)['name']

        vna_acl1 = inspect_h.get_vna_acl_by_vn(vn_fq_name)

        policy1_fixture.verify_policy_in_api_server()

        if vn1_fixture.policy_objs:
            policy_fq_names = [
                self.quantum_h.get_policy_fq_name(x)
                for x in vn1_fixture.policy_objs
            ]

        policy_fq_name2 = self.quantum_h.get_policy_fq_name(
            policy2_fixture.policy_obj)
        policy_fq_names.append(policy_fq_name2)
        vn1_fixture.bind_policies(policy_fq_names, vn1_fixture.vn_id)

        vna_acl2 = inspect_h.get_vna_acl_by_vn(vn_fq_name)
        out = policy_test_utils.compare_args('policy_rules',
                                             vna_acl1['entries'],
                                             vna_acl2['entries'])

        if out:
            self.logger.info(
                "policy rules are not matching with expected %s  and actual %s"
                % (vna_acl1['entries'], vna_acl2['entries']))
            self.assertIsNone(out, "policy compare failed")

        return True
Пример #14
0
 def get_ip_fabric_vn_fixture(self):
     fabric_vn = self.vnc_h.virtual_network_read(
         fq_name=['default-domain', 'default-project', 'ip-fabric'])
     fabric_vn = VNFixture(self.connections, uuid=fabric_vn.uuid)
     fabric_vn.read()
     return fabric_vn
Пример #15
0
            print "-" * indent[0] + "> call function", frame.f_code.co_name
    elif event == "return":
        #          if frame.f_code.co_name.startswith('verify_'):
        #              print "<" + "-" * indent[0], "exit function", frame.f_code.co_name, frame.f_code.co_names
        indent[0] -= 2
    return tracefunc


if __name__ == "__main__":
    import sys
    from vn_test import VNFixture
    from vm_test import VMFixture
    #    sys.settrace(tracefunc)
    #    obj = LBaasFixture(api_type='neutron', name='LB', connections=setup_test_infra(), network_id='4b39a2bd-4528-40e8-b848-28084e59c944', members={'vms': ['a72ad607-f1ca-44f2-b31e-e825a3f2d408'], 'address': ['192.168.1.10']}, vip_net_id='4b39a2bd-4528-40e8-b848-28084e59c944', protocol='TCP', port='22', healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}])
    conn = setup_test_infra()
    vnfix = VNFixture(connections=conn)
    vnfix.setUp()
    vip_fix = VNFixture(connections=conn)
    vip_fix.setUp()
    fip_fix = VNFixture(connections=conn, router_external=True)
    fip_fix.setUp()
    subnet = vnfix.get_cidrs()[0]
    vm_fix = VMFixture(connections=conn, vn_obj=vnfix.obj)
    vm_fix.setUp()
    obj = LBaasFixture(api_type='neutron',
                       name='LB',
                       connections=conn,
                       network_id=vnfix.uuid,
                       members={
                           'address': [get_random_ip(subnet)],
                           'vms': [vm_fix.vm_id]
Пример #16
0
    def config_basic(self, check_dm):
        #mx config using device manager
        #both dm_mx and use_device_manager knobs are required for DM
        #this check is present in is_test_applicable
        if check_dm:
            if self.inputs.use_devicemanager_for_md5:
                for i in range(len(list(self.inputs.dm_mx.values()))):
                    router_params = list(self.inputs.dm_mx.values())[i]
                    if router_params['model'] == 'mx':
                        self.phy_router_fixture = self.useFixture(
                            PhysicalRouterFixture(
                                router_params['name'],
                                router_params['control_ip'],
                                model=router_params['model'],
                                vendor=router_params['vendor'],
                                asn=router_params['asn'],
                                ssh_username=router_params['ssh_username'],
                                ssh_password=router_params['ssh_password'],
                                mgmt_ip=router_params['control_ip'],
                                connections=self.connections,
                                dm_managed=True))
                        physical_dev = self.vnc_lib.physical_router_read(
                            id=self.phy_router_fixture.phy_device.uuid)
                        physical_dev.set_physical_router_management_ip(
                            router_params['mgmt_ip'])
                        physical_dev._pending_field_updates
                        self.vnc_lib.physical_router_update(physical_dev)
        else:
            if self.inputs.ext_routers:
                as4_ext_router_dict = dict(self.inputs.as4_ext_routers)
                for i in range(
                        len(list(self.inputs.physical_routers_data.values()))):
                    router_params = list(
                        self.inputs.physical_routers_data.values())[i]
                    if router_params['name'] in as4_ext_router_dict:
                        continue
                    if router_params['model'] == 'mx':
                        cmd = []
                        cmd.append(
                            'set groups md5_tests routing-options router-id %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests routing-options route-distinguisher-id %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests routing-options autonomous-system %s'
                            % router_params['asn'])
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests type internal'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests multihop'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests local-address %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests hold-time 90'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests keep all'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family evpn signaling'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family route-target'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests local-as %s'
                            % router_params['asn'])
                        for node in self.inputs.bgp_control_ips:
                            cmd.append(
                                'set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s'
                                % (node, router_params['asn']))
                        cmd.append('set apply-groups md5_tests')
                        mx_handle = NetconfConnection(
                            host=router_params['mgmt_ip'])
                        mx_handle.connect()
                        cli_output = mx_handle.config(stmts=cmd, timeout=120)

        #ipv6 not supported for vcenter so skipping config
        if self.inputs.orchestrator != 'vcenter':
            vn61_name = "test_vnv6sr"
            vn61_net = ['2001::101:0/120']
            #vn1_fixture = self.config_vn(vn1_name, vn1_net)
            vn61_fixture = self.useFixture(
                VNFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_name=vn61_name,
                          inputs=self.inputs,
                          subnets=vn61_net))
            vn62_name = "test_vnv6dn"
            vn62_net = ['2001::201:0/120']
            #vn2_fixture = self.config_vn(vn2_name, vn2_net)
            vn62_fixture = self.useFixture(
                VNFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_name=vn62_name,
                          inputs=self.inputs,
                          subnets=vn62_net))
            vm61_name = 'source_vm'
            vm62_name = 'dest_vm'
            #vm1_fixture = self.config_vm(vn1_fixture, vm1_name)
            #vm2_fixture = self.config_vm(vn2_fixture, vm2_name)
            vm61_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn61_fixture.obj,
                          vm_name=vm61_name,
                          node_name=None,
                          image_name='cirros',
                          flavor='m1.tiny'))

            vm62_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn62_fixture.obj,
                          vm_name=vm62_name,
                          node_name=None,
                          image_name='cirros',
                          flavor='m1.tiny'))
            vm61_fixture.wait_till_vm_is_up()
            vm62_fixture.wait_till_vm_is_up()

            rule = [
                {
                    'direction': '<>',
                    'protocol': 'any',
                    'source_network': vn61_name,
                    'src_ports': [0, -1],
                    'dest_network': vn62_name,
                    'dst_ports': [0, -1],
                    'simple_action': 'pass',
                },
            ]
            policy_name = 'allow_all'
            policy_fixture = self.config_policy(policy_name, rule)

            vn61_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                       vn61_fixture)
            vn62_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                       vn62_fixture)

        vn1 = "vn1"
        vn2 = "vn2"
        vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']}
        rules = [
            {
                'direction': '<>',
                'protocol': 'any',
                'source_network': vn1,
                'src_ports': [0, -1],
                'dest_network': vn2,
                'dst_ports': [0, -1],
                'simple_action': 'pass',
            },
        ]
        image_name = 'cirros'
        if self.inputs.orchestrator == 'vcenter':
            image_name = 'vcenter_tiny_vm'
        self.logger.info("Configure the policy with allow any")
        self.multi_vn_fixture = self.useFixture(
            MultipleVNFixture(connections=self.connections,
                              inputs=self.inputs,
                              subnet_count=2,
                              vn_name_net=vn_s,
                              project_name=self.inputs.project_name))
        vns = self.multi_vn_fixture.get_all_fixture_obj()
        (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0]
        (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1]
        self.config_policy_and_attach_to_vn(rules)

        self.multi_vm_fixture = self.useFixture(
            MultipleVMFixture(project_name=self.inputs.project_name,
                              connections=self.connections,
                              vm_count_per_vn=1,
                              vn_objs=vns,
                              image_name=image_name,
                              flavor='m1.tiny'))
        vms = self.multi_vm_fixture.get_all_fixture()
        (self.vm1_name, self.vm1_fix) = vms[0]
        (self.vm2_name, self.vm2_fix) = vms[1]
Пример #17
0
 def config_vn(self, vn_name, vn_net, **kwargs):
     vn_fixture = self.useFixture(VNFixture(
         project_name=self.inputs.project_name, connections=self.connections,
         vn_name=vn_name, inputs=self.inputs, subnets=vn_net, **kwargs))
     assert vn_fixture.verify_on_setup()
     return vn_fixture
Пример #18
0
    def test_update_attr_verify_haproxy_conf(self):
        '''Create LB, LISTENER, POOL and MEMBER
            create FIP and associate it to VIP, create a VM in the FIP network
           verify: pool, member and vip gets created
           after vip creation nets ns is created in compute node and haproxy
           process starts , fail otherwise
           Verify different LB Method
        '''
        result = True
        pool_members = {}
        members = []

        fip_fix = self.useFixture(
            VNFixture(connections=self.connections, router_external=True))
        client_vm1_fixture = self.create_vm(fip_fix,
                                            flavor='contrail_flavor_small',
                                            image_name='ubuntu')

        vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3)

        vn_vip_fixture = vn_vm_fix[0]
        lb_pool_servers = vn_vm_fix[1]

        assert client_vm1_fixture.wait_till_vm_is_up()
        for VMs in lb_pool_servers:
            members.append(VMs.vm_ip)

        pool_members.update({'address': members})

        pool_name = get_random_name('mypool')
        lb_method = 'ROUND_ROBIN'
        protocol = 'HTTP'
        protocol_port = 80
        vip_name = get_random_name('myvip')
        listener_name = get_random_name('RR')

        self.logger.info("Verify Round Robin Method")
        rr_listener = self.create_lbaas(vip_name,
                                        vn_vip_fixture.uuid,
                                        pool_name=pool_name,
                                        pool_algorithm=lb_method,
                                        pool_protocol=protocol,
                                        pool_port=HTTP_PORT,
                                        members=pool_members,
                                        listener_name=listener_name,
                                        fip_net_id=fip_fix.uuid,
                                        vip_port=HTTP_PORT,
                                        vip_protocol='HTTP',
                                        hm_delay=5,
                                        hm_timeout=5,
                                        hm_max_retries=5,
                                        hm_probe_type=HTTP_PROBE)

        rr_listener.verify_on_setup()
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info(
            "Verify haproxy config file after modifiying the delay attribute")
        rr_listener.update_hmon(delay=5)
        assert rr_listener.verify_haproxy_configs_on_setup(),\
            "Verify haproxy config file after modifying the delay attribute failed"

        self.logger.info(
            "Verify haproxy config file after modifiying the max_retries attribute"
        )
        rr_listener.update_hmon(max_retries=6)
        assert rr_listener.verify_haproxy_configs_on_setup(),\
            "Verify haproxy config file after modifying the max_retries failed"

        self.logger.info(
            "Verify haproxy config file after modifiying the timeout attribute"
        )
        rr_listener.update_hmon(timeout=7)
        assert rr_listener.verify_haproxy_configs_on_setup(),\
            "Verify haproxy config file failed, after modifying the timeout attribute"

        rr_listener.update_member(rr_listener.member_ids[0], weight=5)
        assert rr_listener.verify_haproxy_configs_on_setup(),\
            "Verify haproxy config file failed, after modifying the member weight attribute"
Пример #19
0
 def get_ip_fabric_vn_fixture(self):
     fabric_vn =  self.vnc_h.virtual_network_read(fq_name=['default-domain', 'default-project', 'ip-fabric'])
     fabric_vn = VNFixture(self.connections, uuid=fabric_vn.uuid)
     fabric_vn.read()
     return fabric_vn
Пример #20
0
    def test_policy_RT_import_export(self):
        ''' Test to validate RT imported/exported in control node.
        Verification is implemented in vn_fixture to compare fixture route data with the data in control node..
        Verification expects test code to compile policy allowed VN info, which is used to validate data in CN.
        Test calls get_policy_peer_vns [internally call get_allowed_peer_vns_by_policy for each VN]. This data is
        fed to verify_vn_route_target, which internally calls get_rt_info to build expected list. This is compared
        against actual by calling cn_ref.get_cn_routing_instance and getting rt info.  '''

        vn1_name = 'vn40'
        vn1_subnets = ['40.1.1.0/24']
        vn2_name = 'vn41'
        vn2_subnets = ['41.1.1.0/24']
        vn3_name = 'vn42'
        vn3_subnets = ['42.1.1.0/24']
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        policy3_name = 'policy3'
        policy4_name = 'policy4'
        # cover all combinations of rules for this test
        # 1. both vn's allow each other 2. one vn allows peer, while other denies 3. policy rule doesnt list local vn
        # 4. allow or deny any vn is not handled now..
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn2_name
            },
            {
                'direction': '<>',
                'simple_action': 'deny',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn3_name
            },
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn2_name,
                'dest_network': vn3_name
            },
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': 'any',
                'dest_network': vn3_name
            },
        ]
        rev_rules2 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn3_name,
            },
        ]

        rev_rules1 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]
        rules2 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn3_name,
                'dest_network': vn1_name,
            },
        ]

        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rev_rules1,
                          inputs=self.inputs,
                          connections=self.connections))
        policy3_fixture = self.useFixture(
            PolicyFixture(policy_name=policy3_name,
                          rules_list=rules2,
                          inputs=self.inputs,
                          connections=self.connections))
        policy4_fixture = self.useFixture(
            PolicyFixture(policy_name=policy4_name,
                          rules_list=rev_rules2,
                          inputs=self.inputs,
                          connections=self.connections))
        vn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn1_name,
                      inputs=self.inputs,
                      subnets=vn1_subnets))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn2_name,
                      inputs=self.inputs,
                      subnets=vn2_subnets))
        assert vn2_fixture.verify_on_setup()
        vn3_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn3_name,
                      inputs=self.inputs,
                      subnets=vn3_subnets))
        assert vn3_fixture.verify_on_setup()

        self.logger.info("TEST STEP: End of setup")
        vn_fixture = {
            vn1_name: vn1_fixture,
            vn2_name: vn2_fixture,
            vn3_name: vn3_fixture
        }
        vnet_list = [vn1_name, vn2_name, vn3_name]

        self.logger.info("TEST STEP: Route verification for VN after setup")
        actual_peer_vns_by_policy = policy_test_utils.get_policy_peer_vns(
            self, vnet_list, vn_fixture)
        for vn in vnet_list:
            err_msg_on_fail = "route verification failed for vn %s" % (vn)
            assert (vn_fixture[vn].verify_vn_route_target(
                policy_peer_vns=actual_peer_vns_by_policy[vn])
                    ), err_msg_on_fail

        self.logger.info(
            "TEST STEP: Bind policys to VN and verify import and export RT values"
        )
        policy_fq_name1 = [policy1_fixture.policy_fq_name]
        policy_fq_name2 = [policy2_fixture.policy_fq_name]
        vn1_fixture.bind_policies(policy_fq_name1, vn1_fixture.vn_id)
        vn1_pol = vn1_fixture.get_policy_attached_to_vn()
        vn2_fixture.bind_policies(policy_fq_name2, vn2_fixture.vn_id)
        vn2_pol = vn2_fixture.get_policy_attached_to_vn()
        vn3_fixture.bind_policies([policy3_fixture.policy_fq_name],
                                  vn3_fixture.vn_id)
        vn3_pol = vn3_fixture.get_policy_attached_to_vn()
        self.logger.info("vn: %s policys: %s" % (vn1_name, vn1_pol))
        self.logger.info("vn: %s policys: %s" % (vn2_name, vn2_pol))
        self.logger.info("vn: %s policys: %s" % (vn3_name, vn3_pol))

        actual_peer_vns_by_policy = policy_test_utils.get_policy_peer_vns(
            self, vnet_list, vn_fixture)
        for vn in vnet_list:
            err_msg_on_fail = "route verification failed for vn %s" % (vn)
            out = vn_fixture[vn].verify_vn_route_target(
                policy_peer_vns=actual_peer_vns_by_policy[vn])
            # control node may not be updated of the config changes right away, as it depends on system load ..
            # one scenario being when multiple tests are run in parallel..
            # wait & retry one more time if result is not as expected..
            if not out:
                self.logger.info("wait and verify VN RT again...")
                time.sleep(5)
                out = vn_fixture[vn].verify_vn_route_target(
                    policy_peer_vns=actual_peer_vns_by_policy[vn])
            assert (out), err_msg_on_fail

        self.logger.info(
            "TEST STEP: Bind one more policy to VN and verify RT import values updated"
        )
        vn1_fixture.bind_policies(
            [policy1_fixture.policy_fq_name, policy4_fixture.policy_fq_name],
            vn1_fixture.vn_id)

        actual_peer_vns_by_policy = policy_test_utils.get_policy_peer_vns(
            self, vnet_list, vn_fixture)
        for vn in vnet_list:
            err_msg_on_fail = "route verification failed for vn %s" % (vn)
            assert (vn_fixture[vn].verify_vn_route_target(
                policy_peer_vns=actual_peer_vns_by_policy[vn])
                    ), err_msg_on_fail

        self.logger.info(
            "TEST STEP: Unbind policy which was added earlier and verify RT import/export values are updated accordingly"
        )
        vn1_fixture.unbind_policies(vn1_fixture.vn_id,
                                    [policy4_fixture.policy_fq_name])
        vn3_fixture.unbind_policies(vn3_fixture.vn_id,
                                    [policy3_fixture.policy_fq_name])

        actual_peer_vns_by_policy = policy_test_utils.get_policy_peer_vns(
            self, vnet_list, vn_fixture)
        for vn in vnet_list:
            err_msg_on_fail = "route verification failed for vn %s" % (vn)
            assert (vn_fixture[vn].verify_vn_route_target(
                policy_peer_vns=actual_peer_vns_by_policy[vn])
                    ), err_msg_on_fail
        return True
Пример #21
0
    def setup_common_objects(self, inputs, connections):
        self.inputs = inputs
        self.connections = connections
        self.base_rel = get_release()
        (self.vn11_name, self.vn11_subnets) = ("vn11", ["192.168.1.0/24"])
        (self.vn22_name, self.vn22_subnets) = ("vn22", ["192.168.2.0/24"])
        (self.fip_vn_name, self.fip_vn_subnets) = ("fip_vn", ['200.1.1.0/24'])
        (self.vn11_vm1_name, self.vn11_vm2_name, self.vn11_vm3_name,
         self.vn11_vm4_name) = ('vn11_vm1', 'vn11_vm2', 'vn11_vm3', 'vn11_vm4')
        self.vn22_vm1_name = 'vn22_vm1'
        self.vn22_vm2_name = 'vn22_vm2'
        self.fvn_vm1_name = 'fvn_vm1'

        # Configure 3 VNs, 2 of them vn11, vn22 and 1 fip_vn
        self.vn11_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=self.vn11_name,
                      subnets=self.vn11_subnets))
        assert self.vn11_fixture.verify_on_setup()
        self.vn22_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=self.vn22_name,
                      subnets=self.vn22_subnets))
        self.fvn_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=self.fip_vn_name,
                      subnets=self.fip_vn_subnets))

        # Configure 4 VMs in VN11, 2 VM in VN22, and 1 VM in FVN
        self.vn11_vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn11_fixture.obj,
                      vm_name=self.vn11_vm1_name,
                      image_name='ubuntu'))
        self.vn11_vm2_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn11_fixture.obj,
                      vm_name=self.vn11_vm2_name,
                      image_name='ubuntu'))
        self.vn11_vm3_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn11_fixture.obj,
                      vm_name=self.vn11_vm3_name,
                      image_name='ubuntu'))
        self.vn11_vm4_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn11_fixture.obj,
                      vm_name=self.vn11_vm4_name,
                      image_name='ubuntu'))
        self.vn22_vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn22_fixture.obj,
                      vm_name=self.vn22_vm1_name,
                      image_name='ubuntu'))
        self.vn22_vm2_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn22_fixture.obj,
                      vm_name=self.vn22_vm2_name,
                      image_name='ubuntu'))
        self.fvn_vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.fvn_fixture.obj,
                      vm_name=self.fvn_vm1_name,
                      image_name='ubuntu'))

        # Adding Policy between vn11 and vn22  ######
        assert self.vn11_fixture.verify_on_setup()
        assert self.vn22_fixture.verify_on_setup()
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]
        policy_name = 'p1'
        self.policy_fixture = self.useFixture(
            PolicyFixture(policy_name=policy_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))

        policy_fq_name = [self.policy_fixture.policy_fq_name]
        self.vn11_fixture.bind_policies(policy_fq_name,
                                        self.vn11_fixture.vn_id)
        self.addCleanup(self.vn11_fixture.unbind_policies,
                        self.vn11_fixture.vn_id,
                        [self.policy_fixture.policy_fq_name])
        self.vn22_fixture.bind_policies(policy_fq_name,
                                        self.vn22_fixture.vn_id)
        self.addCleanup(self.vn22_fixture.unbind_policies,
                        self.vn22_fixture.vn_id,
                        [self.policy_fixture.policy_fq_name])

        # Adding Floating ip ###

        assert self.fvn_fixture.verify_on_setup()

        fip_pool_name = 'some-pool1'
        self.fip_fixture = self.useFixture(
            FloatingIPFixture(project_name=self.inputs.project_name,
                              inputs=self.inputs,
                              connections=self.connections,
                              pool_name=fip_pool_name,
                              vn_id=self.fvn_fixture.vn_id))

        self.vn11_vm1_fixture.verify_on_setup()
        self.vn11_vm1_fixture.wait_till_vm_is_up()
        self.fip_id = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn11_vm1_fixture.vm_id)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id)
        assert self.fip_fixture.verify_fip(self.fip_id, self.vn11_vm1_fixture,
                                           self.fvn_fixture)

        self.vn22_vm1_fixture.verify_on_setup()
        self.vn22_vm1_fixture.wait_till_vm_is_up()
        self.fip_id1 = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn22_vm1_fixture.vm_id)
        assert self.fip_fixture.verify_fip(self.fip_id1, self.vn22_vm1_fixture,
                                           self.fvn_fixture)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id1)

        # Adding  the service chaining resources for firewall  ###
        si_count = 1
        svc_scaling = False
        max_inst = 1
        svc_mode = 'in-network'
        flavor = 'm1.medium'
        self.vn1_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn1"
        self.vn1_name = "in_network_vn1"
        self.vn1_subnets = ['10.1.1.0/24']
        self.vm1_name = 'in_network_vm1'
        self.vn2_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn2"
        self.vn2_name = "in_network_vn2"
        self.vn2_subnets = ['20.2.2.0/24']
        self.vm2_name = 'in_network_vm2'

        self.action_list = []
        self.if_list = [['management', False], ['left', True], ['right', True]]
        self.st_name = 'in_net_svc_template_1'
        si_prefix = 'in_net_svc_instance_'
        self.policy_name = 'policy_in_network'

        self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
        self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
        self.vm1_fixture = self.config_vm(vn_fix=self.vn1_fixture,
                                          vm_name=self.vm1_name)
        self.vm2_fixture = self.config_vm(vn_fix=self.vn1_fixture,
                                          vm_name=self.vm1_name)
        svc_chain_info = self.config_svc_chain(
            left_vn_fixture=self.vn1_fixture,
            right_vn_fixture=self.vn2_fixture,
            service_mode=svc_mode,
            max_inst=max_inst,
            left_vm_fixture=self.vm1_fixture,
            right_vm_fixture=self.vm2_fixture)
        self.st_fixture = svc_chain_info['st_fixture']
        self.si_fixture = svc_chain_info['si_fixture']
        self.policy_fixture = svc_chain_info['policy_fixture']

        # non-admin tenant config
        result = True
        msg = []
        self.topo_obj = sdn_topo_with_multi_project()
        self.setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, self.topo_obj))
        out = self.setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result'] == True:
            self.topo_objs, self.config_topo, vm_fip_info = out['data']
Пример #22
0
    def test_lbaas_with_sg_vip(self):
        '''Create LB, LISTENER, POOL and MEMBER
            create FIP and associate it to VIP, create a VM in the FIP network
           verify: pool, member and vip gets created
           after vip creation nets ns is created in compute node and haproxy
           process starts , fail otherwise
           Verify different LB Method
        '''
        result = True
        pool_members = {}
        members = []

        fip_fix = self.useFixture(
            VNFixture(connections=self.connections, router_external=True))
        client_vm1_fixture = self.create_vm(fip_fix,
                                            flavor='contrail_flavor_small',
                                            image_name='ubuntu')

        vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3)

        vn_vip_fixture = vn_vm_fix[0]
        lb_pool_servers = vn_vm_fix[1]

        assert client_vm1_fixture.wait_till_vm_is_up()
        for VMs in lb_pool_servers:
            members.append(VMs.vm_ip)

        pool_members.update({'address': members})

        pool_name = get_random_name('mypool')
        lb_method = 'ROUND_ROBIN'
        protocol = 'HTTP'
        protocol_port = 80
        vip_name = get_random_name('myvip')
        listener_name = get_random_name('RR')

        self.logger.info("Verify Round Robin Method")
        rr_listener = self.create_lbaas(vip_name,
                                        vn_vip_fixture.uuid,
                                        pool_name=pool_name,
                                        pool_algorithm=lb_method,
                                        pool_protocol=protocol,
                                        pool_port=HTTP_PORT,
                                        members=pool_members,
                                        listener_name=listener_name,
                                        fip_net_id=fip_fix.uuid,
                                        vip_port=HTTP_PORT,
                                        vip_protocol='HTTP',
                                        hm_delay=5,
                                        hm_timeout=5,
                                        hm_max_retries=5,
                                        hm_probe_type=HTTP_PROBE)

        assert rr_listener.verify_on_setup(
        ), "Verify on setup failed after new FIP associated"
        assert client_vm1_fixture.ping_with_certainty(rr_listener.fip_ip)
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info(
            "Apply security group to allow only TCP and verify ping fails")
        default_sg = self.get_default_sg()
        vip_sg = self.create_sg()
        rr_listener.apply_sg_to_vip_vmi([vip_sg.get_uuid()])
        assert client_vm1_fixture.ping_with_certainty(rr_listener.fip_ip,
                                                      expectation=False)
        #assert not self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
        #"Expected LB verification to fail, because the flow from the netns to members has to fail "

        self.logger.info(
            "Apply security group to allow only TCP to the member VMs and verify the LB works"
        )
        for server in lb_pool_servers:
            server.add_security_group(vip_sg.get_uuid())
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"

        self.logger.info(
            "Remove the security group and apply the default and verify again")
        rr_listener.apply_sg_to_vip_vmi([default_sg.get_sg_id()])
        for server in lb_pool_servers:
            server.remove_security_group(vip_sg.get_uuid())
            server.add_security_group(default_sg.get_sg_id())
        assert client_vm1_fixture.ping_with_certainty(rr_listener.fip_ip)
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\
            "Verify LB Method failed for ROUND ROBIN"
Пример #23
0
    def test_check_vxlan_id_reuse(self):
        ''' 
            Create a VN X 
            Create another VN Y and check that the VNid is the next number 
            Delete the two Vns
            On creating a VN again, verify that Vxlan id of X is used
             (i.e vxlan id gets reused)
        '''
        vn1_name = get_random_name('vn')
        vn1_subnets = [get_random_cidr()]
        vn2_name = get_random_name('vn')
        vn2_subnets = [get_random_cidr()]

        # First VN
        vn1_obj = VNFixture(project_name=self.inputs.project_name,
            connections=self.connections,
            inputs=self.inputs,
            vn_name=vn1_name,
            subnets=vn1_subnets)
        vn1_obj.setUp()
        vxlan_id1 = vn1_obj.get_vxlan_id()

        # Second VN
        vn2_obj = VNFixture(project_name=self.inputs.project_name,
            connections=self.connections,
            inputs=self.inputs,
            vn_name=vn2_name,
            subnets=vn2_subnets)
        vn2_obj.setUp()
        vxlan_id2 = vn2_obj.get_vxlan_id()

        assert vxlan_id2 == (vxlan_id1+1), (
            "Vxlan ID allocation is not incremental, "
            "Two VNs were seen to have vxlan ids %s, %s" % (
                vxlan_id1, vxlan_id2))
        # Delete the vns
        vn1_obj.cleanUp()
        vn2_obj.cleanUp()

        vn3_fixture = self.create_vn()
        assert vn3_fixture.verify_on_setup(), "VNFixture verify failed!"
        new_vxlan_id = vn3_fixture.get_vxlan_id()
        assert new_vxlan_id == vxlan_id1, (
            "Vxlan ID reuse does not seem to happen",
            "Expected : %s, Got : %s" % (vxlan_id1, new_vxlan_id))
        self.logger.info('Vxlan ids are reused..ok')
Пример #24
0
    def test_check_vxlan_id_reuse(self):
        ''' 
            Create a VN X 
            Create another VN Y and check that the VNid is the next number 
            Delete the two Vns
            On creating a VN again, verify that Vxlan id of X is used
             (i.e vxlan id gets reused)
        '''
        vn1_name = get_random_name('vn')
        vn1_subnets = [get_random_cidr()]
        vn2_name = get_random_name('vn')
        vn2_subnets = [get_random_cidr()]

        # First VN
        vn1_obj = VNFixture(project_name=self.inputs.project_name,
            connections=self.connections,
            inputs=self.inputs,
            vn_name=vn1_name,
            subnets=vn1_subnets)
        vn1_obj.setUp()
        vxlan_id1 = vn1_obj.get_vxlan_id()

        # Second VN
        vn2_obj = VNFixture(project_name=self.inputs.project_name,
            connections=self.connections,
            inputs=self.inputs,
            vn_name=vn2_name,
            subnets=vn2_subnets)
        vn2_obj.setUp()
        vxlan_id2 = vn2_obj.get_vxlan_id()

        assert vxlan_id2 == (vxlan_id1+1), (
            "Vxlan ID allocation is not incremental, "
            "Two VNs were seen to have vxlan ids %s, %s" % (
                vxlan_id1, vxlan_id2))
        # Delete the vns
        vn1_obj.cleanUp()
        vn2_obj.cleanUp()

        vn3_fixture = self.create_vn()
        assert vn3_fixture.verify_on_setup(), "VNFixture verify failed!"
        new_vxlan_id = vn3_fixture.get_vxlan_id()
        assert new_vxlan_id == vxlan_id1, (
            "Vxlan ID reuse does not seem to happen",
            "Expected : %s, Got : %s" % (vxlan_id1, new_vxlan_id))
        self.logger.info('Vxlan ids are reused..ok')
Пример #25
0
 def test_vdns_ping_same_vn(self):
     ''' 
     Test:- Test vdns functionality. On VM launch agent should dynamically update dns records to dns agent
         1.  Create vDNS server 
         2.  Create IPAM using above vDNS data 
         3.  Create VN using above IPAM and launch 2 VM's within it 
         4.  Ping between these 2 VM's using dns name 
         5.  Try to delete vDNS server which has IPAM back-reference[Negative case] 
         6.  Add CNAME VDNS record for vm1-test and verify we able to ping by alias name 
     Pass criteria: Step 4,5 and 6 should pass
      
     Maintainer: [email protected]
     '''
     vn1_ip = '10.10.10.0/24'
     vm1_name = get_random_name('vm1-test')
     vm2_name = get_random_name('vm2-test')
     vm_list = [vm1_name, vm2_name]
     vn_name = get_random_name('vn1-vdns')
     dns_server_name = get_random_name('vdns1')
     domain_name = 'juniper.net'
     cname_rec = 'vm1-test-alias'
     ttl = 100
     ipam_name = 'ipam1'
     rev_zone = vn1_ip.split('.')
     rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2]))
     rev_zone = rev_zone + '.in-addr.arpa'
     proj_fixt = self.useFixture(ProjectFixture(
         project_name=self.inputs.project_name, connections=self.connections))
     proj_connections = proj_fixt.get_project_connections()
     dns_data = VirtualDnsType(
         domain_name=domain_name, dynamic_records_from_client=True,
         default_ttl_seconds=ttl, record_order='random', reverse_resolution=True)
     # Create VDNS server object.
     vdns_fixt1 = self.useFixture(VdnsFixture(
         self.inputs, self.connections, vdns_name=dns_server_name, dns_data=dns_data))
     result, msg = vdns_fixt1.verify_on_setup()
     self.assertTrue(result, msg)
     dns_server = IpamDnsAddressType(
         virtual_dns_server_name=vdns_fixt1.vdns_fq_name)
     ipam_mgmt_obj = IpamType(
         ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server)
     # Associate VDNS with IPAM.
     ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj= vdns_fixt1.obj, connections=proj_connections, ipamtype=ipam_mgmt_obj))
     vn_fixt = self.useFixture(
         VNFixture(
             self.connections, self.inputs,
             vn_name=vn_name, subnets=[vn1_ip], ipam_fq_name=ipam_fixt1.fq_name))
     vm_fixture = {}
     # Launch  VM with VN Created above. This test verifies on launch of VM agent should updated DNS 'A' and 'PTR' records
     # The following code will verify the same. Also, we should be able ping
     # with VM name.
     for vm_name in vm_list:
         vn_quantum_obj = self.orch.get_vn_obj_if_present(
             vn_name=vn_fixt.vn_name, project_id=proj_fixt.uuid)
         vm_fixture[vm_name] = self.useFixture(
             VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn_quantum_obj, vm_name=vm_name))
         assert vm_fixture[vm_name].verify_vm_launched(), ('VM %s does not'
             'seem to have launched correctly' % (vm_name))
         assert vm_fixture[vm_name].verify_on_setup(), ('VM %s verification'
             'failed' % (vm_name))
         assert vm_fixture[vm_name].wait_till_vm_is_up(), ('VM %s'
             ' failed to come up' % (vm_name))
         vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm(
             vn_fq_name=vm_fixture[vm_name].vn_fq_name)
         vm_rev_ip = vm_ip.split('.')
         vm_rev_ip = '.'.join(
             (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0]))
         vm_rev_ip = vm_rev_ip + '.in-addr.arpa'
         msg = "Ping by using name %s is failed. Dns server should resolve VM name to IP" % (
             vm_name)
         self.assertTrue(vm_fixture[vm_name]
                         .ping_with_certainty(ip=vm_name), msg)
         # Frame the Expected DNS data for VM, one for 'A' record and
         # another 'PTR' record.
         rec_name = vm_name + "." + domain_name
         agent_inspect_h = self.agent_inspect[vm_fixture[vm_name].vm_node_ip]
         assigned_dns_ips = agent_inspect_h.get_vna_dns_server()
         vm_dns_exp_data = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str(
             ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}]
         self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0])
         vm_dns_exp_data = []
     # ping between two vms which are in same subnets by using name.
     self.assertTrue(vm_fixture[vm1_name]
                     .ping_with_certainty(ip=vm_list[1]))
     # delete VDNS with ipam as back refrence.
     self.logger.info(
         "Try deleting the VDNS entry %s with back ref of ipam.", dns_server_name)
     try:
         self.vnc_lib.virtual_DNS_delete(
             fq_name=vdns_fixt1.obj.get_fq_name())
         errmsg = "VDNS entry deleted which is not expected, when it has back refrence of ipam."
         self.logger.error(errmsg)
         assert False, errmsg
     except Exception, msg:
         self.logger.debug(msg)
         self.logger.info(
             "Deletion of the vdns entry failed with back ref of ipam as expected")
Пример #26
0
 def test_policy_with_multi_vn_in_vm(self):
     ''' Test to validate policy action in VM with vnic's in  multiple VN's with different policies.
     Test flow: vm1 in vn1 and vn2; vm3 in vn3. policy to allow traffic from vn2 to vn3 and deny from vn1 to vn3.
     Default route for vm1 in vn1, which has no reachability to vn3 - verify traffic - should fail.
     Add specific route to direct vn3 traffic through vn2 - verify traffic - should pass.
     '''
     vm1_name = 'vm_mine1'
     vm2_name = 'vm_mine2'
     vn1_name = 'vn221'
     vn1_subnets = ['11.1.1.0/24']
     vn2_name = 'vn222'
     vn2_subnets = ['22.1.1.0/24']
     vn3_gateway = '22.1.1.254'
     vn3_name = 'vn223'
     vn3_subnets = ['33.1.1.0/24']
     rules1 = [
         {
             'direction': '>',
             'simple_action': 'deny',
             'protocol': 'icmp',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     rules2 = [
         {
             'direction': '<>',
             'simple_action': 'pass',
             'protocol': 'any',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     policy1_name = 'p1'
     policy2_name = 'p2'
     policy1_fixture = self.useFixture(
         PolicyFixture(policy_name=policy1_name,
                       rules_list=rules1,
                       inputs=self.inputs,
                       connections=self.connections))
     policy2_fixture = self.useFixture(
         PolicyFixture(policy_name=policy2_name,
                       rules_list=rules2,
                       inputs=self.inputs,
                       connections=self.connections))
     vn1_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn1_name,
                   inputs=self.inputs,
                   subnets=vn1_subnets,
                   policy_objs=[policy1_fixture.policy_obj]))
     vn2_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn2_name,
                   inputs=self.inputs,
                   subnets=vn2_subnets,
                   disable_gateway=True,
                   policy_objs=[policy2_fixture.policy_obj]))
     vn3_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn3_name,
                   inputs=self.inputs,
                   subnets=vn3_subnets,
                   policy_objs=[policy2_fixture.policy_obj]))
     assert vn1_fixture.verify_on_setup()
     assert vn2_fixture.verify_on_setup()
     assert vn3_fixture.verify_on_setup()
     assert vn1_fixture.verify_vn_policy_in_api_server()
     assert vn2_fixture.verify_vn_policy_in_api_server()
     assert vn3_fixture.verify_vn_policy_in_api_server()
     vm1_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn1_fixture.obj, vn2_fixture.obj],
                   vm_name=vm1_name,
                   project_name=self.inputs.project_name))
     vm2_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn3_fixture.obj],
                   vm_name=vm2_name,
                   project_name=self.inputs.project_name))
     assert vm1_fixture.verify_on_setup()
     assert vm2_fixture.verify_on_setup()
     self.nova_h.wait_till_vm_is_up(vm1_fixture.vm_obj)
     self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
     # For multi-vn vm, configure ip address for 2nd interface
     multivn_vm_ip_list = vm1_fixture.vm_ips
     interfaces = vm1_fixture.get_vm_interface_list()
     interface1 = vm1_fixture.get_vm_interface_list(
         ip=multivn_vm_ip_list[0])[0]
     interfaces.remove(interface1)
     interface2 = interfaces[0]
     if 'dual' == self.inputs.get_af():
         intf_conf_cmd = "ifconfig %s inet6 add %s" % (
             interface2, multivn_vm_ip_list[3])
     else:
         intf_conf_cmd = "ifconfig %s %s netmask 255.255.255.0" % (
             interface2, multivn_vm_ip_list[1])
     vm_cmds = (intf_conf_cmd, 'ifconfig -a')
     for cmd in vm_cmds:
         cmd_to_output = [cmd]
         vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
         output = vm1_fixture.return_output_cmd_dict[cmd]
     for ip in multivn_vm_ip_list:
         if ip not in output:
             self.logger.error("IP %s not assigned to any eth intf of %s" %
                               (ip, vm1_fixture.vm_name))
             assert False
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw of VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail"
     )
     result = vm1_fixture.ping_with_certainty(expectation=False,
                                              dst_vm_fixture=vm2_fixture)
     assertEqual(result, True, "ping passed which is not expected")
     # Configure VM to reroute traffic to interface belonging to different
     # VN
     self.logger.info(
         "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now"
     )
     cmd_to_output = []
     if 'dual' == self.inputs.get_af():
         cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % (
             vn3_subnets[0].split('/')[0], multivn_vm_ip_list[2],
             interface2)
         cmd_to_output.append(' ip -6 route add %s dev %s' %
                              (vn3_subnets[1], interface2))
     else:
         cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % (
             vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1],
             interface2)
     cmd_to_output.append(cmd)
     vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
     output = vm1_fixture.return_output_cmd_dict[cmd]
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw for VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass"
     )
     result = vm1_fixture.ping_with_certainty(expectation=True,
                                              dst_vm_fixture=vm2_fixture)
     assertEqual(result, True, "ping failed which is not expected")
     return True
Пример #27
0
    def test_lbaas_add_remove_members(self):
        '''Create LB, LISTENER, POOL and MEMBER
            create FIP and associate it to VIP, create a VM in the FIP network
           verify: pool, member and vip gets created
           after vip creation nets ns is created in compute node and haproxy
           process starts , fail otherwise
           Verify HTTP traffic after restarting the active SVC-monitor
        '''
        result = True
        pool_members = {}
        members = []

        fip_fix = self.useFixture(
            VNFixture(connections=self.connections, router_external=True))
        client_vm1_fixture = self.create_vm(fip_fix,
                                            flavor='contrail_flavor_small',
                                            image_name='ubuntu')

        vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3)

        vn_vip_fixture = vn_vm_fix[0]
        lb_pool_servers = vn_vm_fix[1]

        assert client_vm1_fixture.wait_till_vm_is_up()
        for VMs in lb_pool_servers:
            members.append(VMs.vm_ip)

        pool_members.update({'address': members})
        pool_name = get_random_name('mypool')
        lb_method = 'ROUND_ROBIN'
        protocol = 'HTTP'
        protocol_port = 80
        vip_name = get_random_name('myvip')
        listener_name = get_random_name('HTTP')

        http_listener = self.create_lbaas(vip_name,
                                          vn_vip_fixture.uuid,
                                          pool_name=pool_name,
                                          pool_algorithm=lb_method,
                                          pool_protocol=protocol,
                                          pool_port=HTTP_PORT,
                                          members=pool_members,
                                          listener_name=listener_name,
                                          fip_net_id=fip_fix.uuid,
                                          vip_port=HTTP_PORT,
                                          vip_protocol='HTTP',
                                          hm_delay=5,
                                          hm_timeout=5,
                                          hm_max_retries=5,
                                          hm_probe_type=HTTP_PROBE)

        http_listener.verify_on_setup()

        tcp_listener = self.create_lbaas(vip_name,
                                         vn_vip_fixture.uuid,
                                         pool_name=pool_name,
                                         pool_algorithm=lb_method,
                                         pool_protocol='TCP',
                                         pool_port=TCP_PORT,
                                         members=pool_members,
                                         listener_name=get_random_name('TCP'),
                                         fip_net_id=fip_fix.uuid,
                                         vip_port=TCP_PORT,
                                         vip_protocol='TCP',
                                         hm_delay=5,
                                         hm_timeout=5,
                                         hm_max_retries=5,
                                         hm_probe_type=PING_PROBE)

        tcp_listener.verify_on_setup()

        https_listener = self.create_lbaas(
            vip_name,
            vn_vip_fixture.uuid,
            pool_name=pool_name,
            pool_algorithm=lb_method,
            pool_protocol='HTTPS',
            pool_port=HTTPS_PORT,
            members=pool_members,
            listener_name=get_random_name('HTTPS'),
            fip_net_id=fip_fix.uuid,
            vip_port=HTTPS_PORT,
            vip_protocol='HTTPS',
            hm_delay=5,
            hm_timeout=5,
            hm_max_retries=5,
            hm_probe_type=PING_PROBE)

        https_listener.verify_on_setup()

        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, http_listener.fip_ip),\
            "Verify LB failed for ROUND ROBIN"

        self.logger.info("Verify after adding few more members")
        for no_of_vm in range(3):
            lb_pool_servers.append(
                self.create_vm(vn_vip_fixture,
                               flavor='contrail_flavor_small',
                               image_name='ubuntu'))
            lb_pool_servers[-1].wait_till_vm_is_up()
            lb_pool_servers[-1].start_webserver(listen_port=80)
            http_listener.create_member(address=lb_pool_servers[-1].vm_ip)

        time.sleep(50)
        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, http_listener.fip_ip),\
            "Verify LB failed for ROUND ROBIN"

        self.logger.info("Verify after deleting the one of the member VM")
        http_listener.delete_member(address=lb_pool_servers[-1].vm_ip)

        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[:-1], http_listener.fip_ip),\
            "Verify LB failed for ROUND ROBIN"

        for server in lb_pool_servers[:3]:
            server.start_webserver(listen_port=TCP_PORT)
            time.sleep(15)

        assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[:3], http_listener.fip_ip, port=TCP_PORT),\
            "Verify LB failed for ROUND ROBIN"