Пример #1
0
 def test_repeated_policy_modify(self):
     """ Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify.
     """
     ###
     # Get config for test from topology
     # very simple topo will do, one vn, one vm, multiple policies with n
     # rules
     topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config
     self.logger.info("Scenario for the test used is: %s" %
                      (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(project=self.project.project_name,
                                    username=self.project.username,
                                    password=self.project.password)
     except NameError:
         topo = topology_class_name()
     ###
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(
         self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     ###
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo['policy'][policy_name],
                              topo, 'setup')
     ###
     # Test procedure:
     # Test repeated update of a policy attached to a VM
     test_vm = topo.vmc_list[0]
     test_vn = topo.vn_of_vm[test_vm]
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     for policy in topo.policy_list:
         # set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo['policy'][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for %s updated to %s" % (test_vn, policy)
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" %
                          (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         system_vna_verify_policy(self, config_topo['policy'][policy],
                                  updated_topo, state)
     return True
Пример #2
0
 def policy_scale_test_with_ping(self, topo):
     """ Setup multiple VM, VN and policies to allow traffic. From one VM, send ping to all VMs to test..
     Test focus is on the scale of VM/VN created..
     """
     result = True
     msg = []
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(
         self.connections, topo))
     out = setup_obj.topo_setup()
     #out= setup_obj.topo_setup(vm_verify='yes', skip_cleanup='yes')
     self.logger.info("Setup completed with result %s" % (out['result']))
     self.assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     # 1. Define Traffic Params
     test_vm1 = topo.vmc_list[0]  # 'vmc0'
     test_vm1_fixture = config_topo['vm'][test_vm1]
     test_vn = topo.vn_of_vm[test_vm1]  # 'vnet0'
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     test_proto = 'icmp'
     # Assumption: one policy per VN
     policy = topo.vn_policy[test_vn][0]
     policy_info = "policy in effect is : " + str(topo.rules[policy])
     self.logger.info(policy_info)
     for vmi in range(1, len(list(topo.vn_of_vm.items()))):
         test_vm2 = topo.vmc_list[vmi]
         test_vm2_fixture = config_topo['vm'][test_vm2]
         # 2. set expectation to verify..
         # Topology guide: One policy attached to VN has one rule for protocol under test
         # For ping test, set expected result based on action - pass or deny
         # if action = 'pass', expectedResult= True, else Fail;
         matching_rule_action = {}
         num_rules = len(topo.rules[policy])
         for i in range(num_rules):
             proto = topo.rules[policy][i]['protocol']
             matching_rule_action[proto] = topo.rules[policy][i][
                 'simple_action']
         self.logger.info("matching_rule_action: %s" % matching_rule_action)
         # 3. Test with ping
         self.logger.info("Verify ping to vm %s" % (vmi))
         expectedResult = True if matching_rule_action[
             test_proto] == 'pass' else False
         ret = test_vm1_fixture.ping_with_certainty(
             test_vm2_fixture.vm_ip,
             expectation=expectedResult,
             dst_vm_fixture=test_vm2_fixture)
         result_msg = "vm ping test result to vm %s is: %s" % (vmi, ret)
         self.logger.info(result_msg)
         if not ret:
             result = False
             msg.extend([result_msg, policy_info])
     self.assertEqual(result, True, msg)
     return result
Пример #3
0
    def test_policy(self):
        """ Configure policies based on topology and run policy related verifications.
        """
        result = True
        #
        # Get config for test from topology
        topology_class_name = sdn_basic_topology.sdn_basic_config
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name,
                username=self.project.project_username,
                password=self.project.project_user_password)
        except NameError:
            topo = topology_class_name()
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        #
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(
            self,
            config_topo['policy'][policy_name],
            topo,
            'setup')

        # Verify ICMP traffic between the two VM's.
        if not config_topo['vm'][topo.vmc_list[0]].ping_with_certainty(
            expectation=True,
            dst_vm_fixture=config_topo['vm'][topo.vmc_list[1]]):
            self.logger.error(
                'Ping from %s to %s failed,expected it to pass' %
                (config_topo['vm'][topo.vmc_list[0]].vm_name,
                 config_topo['vm'][topo.vmc_list[1]].vm_name))
            return False

        return True
Пример #4
0
    def test_policy_modify_vn_policy(self):
        """ Configure policies based on topology;
        """
        ###
        # Get config for test from topology
        # very simple topo will do, one vn, one vm, one policy, 3 rules
        topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(project=self.project.project_name,
                                       username=self.project.username,
                                       password=self.project.password)
        except NameError:
            topo = topology_class_name()
        ###
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        ###
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(self, config_topo['policy'][policy_name],
                                 topo, 'setup')
        ###
        # Test procedure:
        # Test adding new policy to VN's exiting policy list
        state = "add policy: "
        test_vm = topo.vmc_list[0]
        test_vn = topo.vn_of_vm[test_vm]
        # Init test data, take backup of current topology
        initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn])
        new_policy_to_add = policy_test_utils.get_policy_not_in_vn(
            initial_vn_policy_list, topo.policy_list)
        if not new_policy_to_add:
            result = 'False'
            msg = "test %s cannot be run as required config not available in topology; aborting test"
            self.logger.info(msg)
            assertEqual(result, True, msg)
        initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add])
        new_vn_policy_list = copy.copy(initial_vn_policy_list)
        new_policy_vn_list = copy.copy(initial_policy_vn_list)
        new_vn_policy_list.append(new_policy_to_add)
        new_policy_vn_list.append(test_vn)
        test_vn_fix = config_topo['vn'][test_vn]
        test_vn_id = test_vn_fix.vn_id
        # configure new policy
        config_topo['policy'][new_policy_to_add] = self.useFixture(
            PolicyFixture(policy_name=new_policy_to_add,
                          rules_list=topo.rules[new_policy_to_add],
                          inputs=self.inputs,
                          connections=self.connections))
        # get new policy_set to be pushed for the vn
        test_policy_fq_names = []
        for policy in new_vn_policy_list:
            name = config_topo['policy'][policy].policy_fq_name
            test_policy_fq_names.append(name)
        self.logger.info("adding policy %s to vn %s" %
                         (new_policy_to_add, test_vn))
        test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        self.logger.info("New policy list of VN %s is %s" %
                         (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        topo.policy_vn[new_policy_to_add] = new_policy_vn_list
        system_vna_verify_policy(self,
                                 config_topo['policy'][new_policy_to_add],
                                 topo, state)
        # Test unbinding all policies from VN
        state = "unbinding all policies"
        test_vn_fix.unbind_policies(test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        current_vn_policy_list = new_vn_policy_list
        new_vn_policy_list = []
        self.logger.info("New policy list of VN %s is %s" %
                         (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        for policy in current_vn_policy_list:
            topo.policy_vn[policy].remove(test_vn)
        system_vna_verify_policy(self,
                                 config_topo['policy'][new_policy_to_add],
                                 topo, state)
        return True
Пример #5
0
    def test_traffic_connections_while_control_nodes_go_down(self):
        """Tests related to connections and traffic while switching from normal mode to headless and back
           i.e. control nodes go down and come online."""

        if len(self.inputs.compute_ips) < 2:
            raise unittest.SkipTest("This test needs atleast 2 compute nodes.")
        else:
            self.logger.info(
                "Required resources are in place to run the test.")

        result = True
        topology_class_name = None

        self.compute_fixture_dict = {}
        for each_compute in self.inputs.compute_ips:
            self.compute_fixture_dict[each_compute] = self.useFixture(
                ComputeNodeFixture(connections=self.connections,
                                   node_ip=each_compute,
                                   username=self.inputs.username,
                                   password=self.inputs.password))
            mode = self.compute_fixture_dict[
                each_compute].get_agent_headless_mode()
            if mode is False:
                self.compute_fixture_dict[
                    each_compute].set_agent_headless_mode()
        #
        # Get config for test from topology
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = test_headless_vrouter_topo.sdn_headless_vrouter_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo_obj = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo = {}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo_obj))
        out = setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo_objs, config_topo, vm_fip_info = out['data']

        # Start Test
        proj = config_topo.keys()
        vms = config_topo[proj[0]]['vm'].keys()
        src_vm = config_topo[proj[0]]['vm'][vms[0]]
        dest_vm = config_topo[proj[0]]['vm'][vms[1]]
        flow_cache_timeout = 180

        # Setup Traffic.
        stream = Stream(protocol="ip",
                        proto="icmp",
                        src=src_vm.vm_ip,
                        dst=dest_vm.vm_ip)
        profile = ContinuousProfile(stream=stream, count=0, capfilter="icmp")

        tx_vm_node_ip = src_vm.vm_node_ip
        rx_vm_node_ip = dest_vm.vm_node_ip

        tx_local_host = Host(tx_vm_node_ip, self.inputs.username,
                             self.inputs.password)
        rx_local_host = Host(rx_vm_node_ip, self.inputs.username,
                             self.inputs.password)

        send_host = Host(src_vm.local_ip, src_vm.vm_username,
                         src_vm.vm_password)
        recv_host = Host(dest_vm.local_ip, dest_vm.vm_username,
                         dest_vm.vm_password)

        sender = Sender("icmp", profile, tx_local_host, send_host,
                        self.inputs.logger)
        receiver = Receiver("icmp", profile, rx_local_host, recv_host,
                            self.inputs.logger)

        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)

        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        headless_vr_utils.stop_all_control_services(self)
        self.addCleanup(self.inputs.start_service,
                        'contrail-control',
                        self.inputs.bgp_ips,
                        container='control')

        headless_vr_utils.check_through_tcpdump(self, dest_vm, src_vm)

        flow_index_list2 = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        if flow_index_list == flow_index_list2:
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        # wait_for_flow_cache_timeout
        time.sleep(flow_cache_timeout)

        # verify_flow_is_not_recreated
        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        if flow_index_list == flow_index_list2:
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        receiver.stop()
        sender.stop()

        # wait_for_flow_cache_timeout
        time.sleep(flow_cache_timeout + 5)

        # verify_flow_is_cleared
        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)
        if not len(flow_index_list):
            self.logger.info("No flows are present")
        else:
            self.logger.error("Flows are still present.")
            return False

        # start_ping
        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)
        # verify_flow_is_recreated
        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)
        if (flow_index_list[0] and flow_index_list[1]):
            self.logger.info("Flows are recreated.")
        else:
            self.logger.error("Flows are still absent.")
            return False

        headless_vr_utils.start_all_control_services(self)

        headless_vr_utils.check_through_tcpdump(self, dest_vm, src_vm)

        # wait_for_flow_cache_timeout
        time.sleep(flow_cache_timeout + 5)

        flow_index_list2 = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        if flow_index_list == flow_index_list2:
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        receiver.stop()
        sender.stop()

        # wait_for_flow_cache_timeout
        time.sleep(flow_cache_timeout + 5)

        # verify_flow_is_cleared
        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)
        if not len(flow_index_list):
            self.logger.info("No flows are present")
        else:
            self.logger.error("Flows are still present.")
            return False

        # start_ping
        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)

        # verify_flow_is_recreated
        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)
        if (flow_index_list[0] and flow_index_list[1]):
            self.logger.info("Flows are recreated.")
        else:
            self.logger.error("Flows are still absent.")
            return False

        receiver.stop()
        sender.stop()

        return True
Пример #6
0
    def test_config_add_change_while_control_nodes_go_down(self):
        """Tests related to configuration add, change, and delete while switching from normal mode
           to headless and back i.e. control nodes go down and come online."""

        if len(self.inputs.compute_ips) < 2:
            raise unittest.SkipTest("This test needs atleast 2 compute nodes.")
        else:
            self.logger.info(
                "Required resources are in place to run the test.")

        result = True
        topology_class_name = None

        self.compute_fixture_dict = {}
        for each_compute in self.inputs.compute_ips:
            self.compute_fixture_dict[each_compute] = self.useFixture(
                ComputeNodeFixture(connections=self.connections,
                                   node_ip=each_compute,
                                   username=self.inputs.username,
                                   password=self.inputs.password))
            mode = self.compute_fixture_dict[
                each_compute].get_agent_headless_mode()
            if mode is False:
                self.compute_fixture_dict[
                    each_compute].set_agent_headless_mode()
        #
        # Get config for test from topology
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = test_headless_vrouter_topo.sdn_headless_vrouter_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo_obj = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo = {}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo_obj))
        out = setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo_objs, config_topo, vm_fip_info = out['data']

        # Start Test
        proj = config_topo.keys()
        vms = config_topo[proj[0]]['vm'].keys()
        src_vm = config_topo[proj[0]]['vm'][vms[0]]
        dest_vm = config_topo[proj[0]]['vm'][vms[1]]
        flow_cache_timeout = 180

        # Setup Traffic.
        stream = Stream(protocol="ip",
                        proto="icmp",
                        src=src_vm.vm_ip,
                        dst=dest_vm.vm_ip)
        profile = ContinuousProfile(stream=stream, count=0, capfilter="icmp")

        tx_vm_node_ip = src_vm.vm_node_ip
        rx_vm_node_ip = dest_vm.vm_node_ip

        tx_local_host = Host(tx_vm_node_ip, self.inputs.username,
                             self.inputs.password)
        rx_local_host = Host(rx_vm_node_ip, self.inputs.username,
                             self.inputs.password)

        send_host = Host(src_vm.local_ip, src_vm.vm_username,
                         src_vm.vm_password)
        recv_host = Host(dest_vm.local_ip, dest_vm.vm_username,
                         dest_vm.vm_password)

        sender = Sender("icmp", profile, tx_local_host, send_host,
                        self.inputs.logger)
        receiver = Receiver("icmp", profile, rx_local_host, recv_host,
                            self.inputs.logger)

        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)

        #self.start_ping(src_vm, dest_vm)

        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        headless_vr_utils.stop_all_control_services(self)
        self.addCleanup(self.inputs.start_service,
                        'contrail-control',
                        self.inputs.bgp_ips,
                        container='control')
        time.sleep(10)
        headless_vr_utils.check_through_tcpdump(self, dest_vm, src_vm)

        flow_index_list2 = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        if set(flow_index_list) == set(flow_index_list2):
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        receiver.stop()
        sender.stop()
        project1_instance = config_topo['project1']['project']['project1']
        project1_instance.get_project_connections()
        vnet2_instance = config_topo['project1']['vn']['vnet2']
        # add VM to existing VN
        VM22_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=vnet2_instance.obj,
                vm_name='VM22',
                project_name=project1_instance.project_name))

        # create new IPAM
        ipam3_obj = self.useFixture(
            IPAMFixture(project_obj=project1_instance, name='ipam3'))
        ipam4_obj = self.useFixture(
            IPAMFixture(project_obj=project1_instance, name='ipam4'))

        # create new VN
        VN3_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections['juniper'],
                vn_name='VN3',
                inputs=project1_instance.inputs,
                subnets=['10.3.1.0/24'],
                ipam_fq_name=ipam3_obj.fq_name))

        VN4_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections['juniper'],
                vn_name='VN4',
                inputs=project1_instance.inputs,
                subnets=['10.4.1.0/24'],
                ipam_fq_name=ipam4_obj.fq_name))

        # create policy
        policy_name = 'policy34'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_network': VN4_fixture.vn_fq_name,
            'source_network': VN3_fixture.vn_fq_name,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'icmp',
            'dest_network': VN3_fixture.vn_fq_name,
            'source_network': VN4_fixture.vn_fq_name,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy34_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name,
                rules_list=rules,
                inputs=project1_instance.inputs,
                connections=project1_instance.project_connections['juniper'],
                project_fixture=project1_instance))

        # create VN to policy mapping in a dict of policy list.
        vn_policys = {
            VN3_fixture.vn_name: [policy_name],
            VN4_fixture.vn_name: [policy_name]
        }

        # create a policy object list of policies to be attached to a vm
        policy_obj_dict = {}
        policy_obj_dict[VN3_fixture.vn_name] = [policy34_fixture.policy_obj]
        policy_obj_dict[VN4_fixture.vn_name] = [policy34_fixture.policy_obj]

        # vn fixture dictionary.
        vn_obj_dict = {}
        vn_obj_dict[VN3_fixture.vn_name] = VN3_fixture
        vn_obj_dict[VN4_fixture.vn_name] = VN4_fixture

        # attach policy to VN
        VN3_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections['juniper'],
                vn_name=VN3_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[VN3_fixture.vn_name],
                project_name=project1_instance.project_name))

        VN4_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections['juniper'],
                vn_name=VN4_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[VN4_fixture.vn_name],
                project_name=project1_instance.project_name))

        # add VM to new VN
        VM31_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=VN3_fixture.obj,
                vm_name='VM31',
                project_name=project1_instance.project_name))

        VM41_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=VN4_fixture.obj,
                vm_name='VM41',
                project_name=project1_instance.project_name))

        # verification routines.
        test_flag = 0
        if ((VN3_fixture.verify_vn_in_api_server())
                and (VN3_fixture.verify_vn_not_in_agent())
                and (VN3_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN3 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN3 FAILED while control nodes down.")
            test_flag = 1

        if ((VN4_fixture.verify_vn_in_api_server())
                and (VN4_fixture.verify_vn_not_in_agent())
                and (VN4_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN4 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN4 FAILED while control nodes down.")
            test_flag = 1

        if ((VM22_fixture.verify_vm_launched())
                and (VM22_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM22 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM22 FAILED while control nodes down.")
            test_flag = 1

        if ((VM31_fixture.verify_vm_launched())
                and (VM31_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM31 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM31 FAILED while control nodes down.")
            test_flag = 1

        if ((VM41_fixture.verify_vm_launched())
                and (VM41_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM41 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM41 FAILED while control nodes down.")
            test_flag = 1

        # start all control services.
        headless_vr_utils.start_all_control_services(self)

        # if something went wrong in the controller down state bail out here.
        if test_flag == 1:
            self.logger.error(
                "Verifications and Test failed while the controllers were down in \
                               headless state of agent. Check earlier error logs"
            )
            return False

        # wait for 3 to 5 sec for configuration sync from control nodes to the
        # agents.
        time.sleep(5)

        # wait till VM's are up.
        VM22_fixture.wait_till_vm_is_up()
        VM31_fixture.wait_till_vm_is_up()
        VM41_fixture.wait_till_vm_is_up()

        # verify vm config gets downloaded to the agents.
        if ((VM22_fixture.verify_vm_in_agent())
                and (VM31_fixture.verify_vm_in_agent())
                and (VM41_fixture.verify_vm_in_agent())):
            self.logger.info("VM verification on the agent PASSED")
        else:
            self.logger.error("VM verification on the agent FAILED")
            return False

        # check ping success between the two VM's
        assert config_topo['project1']['vm']['VM11'].ping_with_certainty(
            VM22_fixture.vm_ip, expectation=True)
        assert VM31_fixture.ping_with_certainty(VM41_fixture.vm_ip,
                                                expectation=True)
        assert VM41_fixture.ping_with_certainty(VM31_fixture.vm_ip,
                                                expectation=True)

        # verification routines.
        if ((VN3_fixture.verify_on_setup()) and (VN4_fixture.verify_on_setup())
                and (VM22_fixture.verify_on_setup())
                and (VM31_fixture.verify_on_setup())
                and (VM41_fixture.verify_on_setup())):
            self.logger.info(
                "All verifications passed after controllers came up in headless agent mode"
            )
        else:
            self.logger.error(
                "Verifications FAILED after controllers came up in headless agent mode"
            )
            return False

        return True
Пример #7
0
    def setup_common_objects(self, inputs, connections):
        self.inputs = inputs
        self.connections = connections
        self.base_rel = get_release()
        (self.vn11_name, self.vn11_subnets) = ("vn11", ["192.168.1.0/24"])
        (self.vn22_name, self.vn22_subnets) = ("vn22", ["192.168.2.0/24"])
        (self.fip_vn_name, self.fip_vn_subnets) = ("fip_vn", ['200.1.1.0/24'])
        (self.vn11_vm1_name, self.vn11_vm2_name, self.vn11_vm3_name,
         self.vn11_vm4_name) = ('vn11_vm1', 'vn11_vm2', 'vn11_vm3', 'vn11_vm4')
        self.vn22_vm1_name = 'vn22_vm1'
        self.vn22_vm2_name = 'vn22_vm2'
        self.fvn_vm1_name = 'fvn_vm1'

        # Configure 3 VNs, 2 of them vn11, vn22 and 1 fip_vn
        self.vn11_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections, inputs=self.inputs, vn_name=self.vn11_name, subnets=self.vn11_subnets))
        assert self.vn11_fixture.verify_on_setup()
        self.vn22_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections, inputs=self.inputs, vn_name=self.vn22_name, subnets=self.vn22_subnets))
        self.fvn_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name, connections=self.connections,
                inputs=self.inputs, vn_name=self.fip_vn_name, subnets=self.fip_vn_subnets))

        # Configure 4 VMs in VN11, 2 VM in VN22, and 1 VM in FVN
        self.vn11_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm1_name, image_name='ubuntu'))
        self.vn11_vm2_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm2_name, image_name='ubuntu'))
        self.vn11_vm3_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm3_name, image_name='ubuntu'))
        self.vn11_vm4_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm4_name, image_name='ubuntu'))
        self.vn22_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn22_fixture.obj, vm_name=self.vn22_vm1_name, image_name='ubuntu'))
        self.vn22_vm2_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn22_fixture.obj, vm_name=self.vn22_vm2_name, image_name='ubuntu'))
        self.fvn_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.fvn_fixture.obj, vm_name=self.fvn_vm1_name, image_name='ubuntu'))

        # Adding Policy between vn11 and vn22  ######
        assert self.vn11_fixture.verify_on_setup()
        assert self.vn22_fixture.verify_on_setup()
        rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'any', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]
        policy_name = 'p1'
        self.policy_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name, rules_list=rules, inputs=self.inputs,
                connections=self.connections))

        policy_fq_name = [self.policy_fixture.policy_fq_name]
        self.vn11_fixture.bind_policies(
            policy_fq_name, self.vn11_fixture.vn_id)
        self.addCleanup(self.vn11_fixture.unbind_policies,
                        self.vn11_fixture.vn_id, [self.policy_fixture.policy_fq_name])
        self.vn22_fixture.bind_policies(
            policy_fq_name, self.vn22_fixture.vn_id)
        self.addCleanup(self.vn22_fixture.unbind_policies,
                        self.vn22_fixture.vn_id, [self.policy_fixture.policy_fq_name])

        # Adding Floating ip ###

        assert self.fvn_fixture.verify_on_setup()

        fip_pool_name = 'some-pool1'
        self.fip_fixture = self.useFixture(
            FloatingIPFixture(
                project_name=self.inputs.project_name, inputs=self.inputs,
                connections=self.connections, pool_name=fip_pool_name, vn_id=self.fvn_fixture.vn_id))

        self.vn11_vm1_fixture.verify_on_setup()
        self.vn11_vm1_fixture.wait_till_vm_is_up()
        self.fip_id = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn11_vm1_fixture.vm_id)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id)
        assert self.fip_fixture.verify_fip(
            self.fip_id, self.vn11_vm1_fixture, self.fvn_fixture)

        self.vn22_vm1_fixture.verify_on_setup()
        self.vn22_vm1_fixture.wait_till_vm_is_up()
        self.fip_id1 = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn22_vm1_fixture.vm_id)
        assert self.fip_fixture.verify_fip(
            self.fip_id1, self.vn22_vm1_fixture, self.fvn_fixture)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id1)

        # Adding  the service chaining resources for firewall  ###
        si_count = 1
        svc_scaling = False
        max_inst = 1
        svc_mode = 'in-network'
        flavor = 'm1.medium'
        self.vn1_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn1"
        self.vn1_name = "in_network_vn1"
        self.vn1_subnets = ['10.1.1.0/24']
        self.vm1_name = 'in_network_vm1'
        self.vn2_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn2"
        self.vn2_name = "in_network_vn2"
        self.vn2_subnets = ['20.2.2.0/24']
        self.vm2_name = 'in_network_vm2'

        self.action_list = []
        self.if_list = [['management', False], ['left', True], ['right', True]]
        self.st_name = 'in_net_svc_template_1'
        si_prefix = 'in_net_svc_instance_'
        self.policy_name = 'policy_in_network'

        self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
        self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
        self.vm1_fixture = self.config_vm(vn_fix=self.vn1_fixture, vm_name=self.vm1_name)
        self.vm2_fixture = self.config_vm(vn_fix=self.vn1_fixture, vm_name=self.vm1_name)
        svc_chain_info = self.config_svc_chain(
            left_vn_fixture=self.vn1_fixture,
            right_vn_fixture=self.vn2_fixture,
            service_mode=svc_mode,
            max_inst=max_inst,
            left_vm_fixture=self.vm1_fixture,
            right_vm_fixture=self.vm2_fixture)
        self.st_fixture = svc_chain_info['st_fixture']
        self.si_fixture = svc_chain_info['si_fixture']
        self.policy_fixture = svc_chain_info['policy_fixture']

        # non-admin tenant config
        result = True
        msg = []
        self.topo_obj = sdn_topo_with_multi_project()
        self.setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, self.topo_obj))
        out = self.setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result'] == True:
            self.topo_objs, self.config_topo, vm_fip_info = out['data']
Пример #8
0
 def repeated_policy_update_test_with_ping(self, topo):
     """ Pick 2 VM's for testing, test with ping; modify policy of one VN [in which VM is
     present] and verify the rule functionality with ping.
     """
     result = True
     msg = []
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(
         sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     #out= setup_obj.topo_setup(vm_verify='yes', skip_cleanup='yes')
     self.logger.info("Setup completed with result %s" % (out['result']))
     assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     # 1. Define Traffic Params
     test_vm1 = topo.vmc_list[0]  # 'vmc0'
     test_vm2 = topo.vmc_list[1]  # 'vmc1'
     test_vm1_fixture = config_topo['vm'][test_vm1]
     test_vm2_fixture = config_topo['vm'][test_vm2]
     test_vn = topo.vn_of_vm[test_vm1]  # 'vnet0'
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     test_proto = 'icmp'
     for policy in topo.policy_test_order:
         # 2. set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo['policy'][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for " + test_vn + " updated to " + policy
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" %
                          (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         self.logger.info("Starting Verifications after %s" % (state))
         policy_info = "policy in effect is : %s" % (topo.rules[policy])
         self.logger.info(policy_info)
         # 3. set expectation to verify..
         matching_rule_action = {}
         # Topology guide: There is only one policy assigned to test_vn and there is one rule affecting traffic proto.
         # For ping test, set expected result based on action - pass or deny
         # if action = 'pass', expectedResult= True, else Fail;
         num_rules = len(topo.rules[policy])
         for i in range(num_rules):
             proto = topo.rules[policy][i]['protocol']
             matching_rule_action[proto] = topo.rules[
                 policy][i]['simple_action']
         if num_rules == 0:
             matching_rule_action[test_proto] = 'deny'
         self.logger.info("matching_rule_action: %s" %
                          matching_rule_action)
         # 4. Test with ping
         expectedResult = True if matching_rule_action[
             test_proto] == 'pass' else False
         ret = test_vm1_fixture.ping_with_certainty(
             test_vm2_fixture.vm_ip, expectation=expectedResult,
             dst_vm_fixture=test_vm2_fixture)
         result_msg = "vm ping test result after %s is: %s" % (state, ret)
         self.logger.info(result_msg)
         if not ret:
             result = False
             msg.extend([result_msg, policy_info])
             all_policy_verify(
                 self, config_topo, updated_topo, state, fixture_only='yes')
     assertEqual(result, True, msg)
     test_vn_fix.unbind_policies(test_vn_id)
     return result