예제 #1
0
 def test_qos_queueing_on_vmi_of_si(self):
     '''Test that qos queueing happens when qos config is applied on vmi
        interface of service instance.
        Steps:
        1.Create a Forwarding class with ID 10 to mark dscp as 62
        2.Create a qos config for remarking dscp 0-9 traffic to dscp 62.
        3.Validate that packets on fabric from Service instance VMi to
         node B have DSCP marked to 62
     '''
     self.skip_tc_if_no_queue_config()
     # Below function return the logical to HW queue mapping table
     queue_mapping = self.get_configured_queue_mapping(
                                             self.qos_node_ip)
     # Dynamically creating the queues list containing single logical queue from each entry
     queues = []
     for logical_id in queue_mapping[1]:
         entry = {'queue_id': logical_id}
         queues.append(entry)
     queue_fixtures = self.setup_queues(queues)
     # Dynamically creating FC list based on number of logical qos queues present
     fcs, logical_ids = self.configure_fc_list_dynamically(queue_fixtures)
     fc_fixtures = self.setup_fcs(fcs)
     # Dynamically creating DSCP map based on FCs present
     dscp_map = self.configure_map_dynamically("dscp", fcs)
     qos_fixture = self.setup_qos_config(dscp_map=dscp_map,
                                         default_fc_id=0)
     # Getting the VMI of Service Instance
     cs_si = self.si_fixture.api_s_inspect.get_cs_si(
         project=self.inputs.project_name,
         si=self.si_fixture.si_name,
         refresh=True)
     vm_refs = cs_si['service-instance']['virtual_machine_back_refs']
     svm_ids = [vm_ref['to'][0] for vm_ref in vm_refs]
     cs_svm = self.si_fixture.api_s_inspect.get_cs_vm(
         vm_id=svm_ids[0], refresh=True)
     cs_svmis = cs_svm[
         'virtual-machine']['virtual_machine_interface_back_refs']
     for svmi in cs_svmis:
         if 'right' in svmi['to'][2]:
             right_svmi = svmi['uuid']
             break
     # Getting the SI node IP to check traffic flow on that node
     vm_obj = self.connections.orch.get_vm_by_id(svm_ids[0])
     si_vm_node = self.connections.orch.get_host_of_vm(vm_obj)
     si_vm_node_ip = self.inputs.get_host_ip(si_vm_node)
     si_source_compute_fixture = self.useFixture(ComputeNodeFixture(
                                                 self.connections,
                                                 si_vm_node_ip))
     # Applying qos-config on right VMI of service instance
     self.setup_qos_config_on_vmi(qos_fixture, right_svmi)
     i = 0
     for dscp, fc_id in dscp_map.iteritems():
         hw_queue = self.get_hw_queue_from_fc_id(fc_id, fcs, logical_ids)
         validate_method_args = {
                 'src_vm_fixture': self.vn1_vm1_fixture,
                 'dest_vm_fixture': self.vn2_vm1_fixture,
                 'dscp': dscp,
                 'src_compute_fixture': si_source_compute_fixture,
                 'queue_id' : hw_queue,
                 'interval' : 0.001,
                 'min_expected_pkts' : 5000,
                 'traffic_duration' : 5}
         assert self.validate_packet_qos_marking(**validate_method_args)
예제 #2
0
 def test_qos_remark_dscp_on_vmi_of_si(self):
     '''Test that qos marking happens when qos config is applied on vmi
        interface of service instance.
        Steps:
        1.Create a Forwarding class with ID 10 to mark dscp as 62
        2.Create a qos config for remarking dscp 0-9 traffic to dscp 62.
        3.Validate that packets on fabric from Service instance VMi to
         node B have DSCP marked to 62
     '''
     fc_ids = self.fc_id_obj.get_free_fc_ids(1)
     fcs = [{
         'name': "FC_Test",
         'fc_id': fc_ids[0],
         'dscp': 62,
         'dot1p': 7,
         'exp': 3
     }]
     fc_fixtures = self.setup_fcs(fcs)
     dscp_map = {
         0: fc_ids[0],
         1: fc_ids[0],
         2: fc_ids[0],
         3: fc_ids[0],
         4: fc_ids[0],
         5: fc_ids[0],
         6: fc_ids[0],
         7: fc_ids[0],
         8: fc_ids[0],
         9: fc_ids[0]
     }
     qos_fixture = self.setup_qos_config(dscp_map=dscp_map)
     # Getting the VMI of Service Instance
     cs_si = self.si_fixture.api_s_inspect.get_cs_si(
         project=self.inputs.project_name,
         si=self.si_fixture.si_name,
         refresh=True)
     vm_refs = cs_si['service-instance']['virtual_machine_back_refs']
     svm_ids = [vm_ref['to'][0] for vm_ref in vm_refs]
     cs_svm = self.si_fixture.api_s_inspect.get_cs_vm(vm_id=svm_ids[0],
                                                      refresh=True)
     cs_svmis = cs_svm['virtual-machine'][
         'virtual_machine_interface_back_refs']
     for svmi in cs_svmis:
         if 'right' in svmi['to'][2]:
             right_svmi = svmi['uuid']
             break
     # Getting the SI node IP to check traffic flow on that node
     vm_obj = self.connections.orch.get_vm_by_id(svm_ids[0])
     si_vm_node = self.connections.orch.get_host_of_vm(vm_obj)
     si_vm_node_ip = self.inputs.get_host_ip(si_vm_node)
     si_source_compute_fixture = self.useFixture(
         ComputeNodeFixture(self.connections, si_vm_node_ip))
     # Applying qos-config on right VMI of service instance
     self.setup_qos_config_on_vmi(qos_fixture, right_svmi)
     si_right_vrf_id = self.agent_inspect[si_vm_node_ip].get_vna_vrf_objs(
         project=self.project.project_name,
         vn_name=self.vn2_fixture.vn_name)['vrf_list'][0]['ucindex']
     assert self.validate_packet_qos_marking(
         src_vm_fixture=self.vn1_vm1_fixture,
         dest_vm_fixture=self.vn2_vm1_fixture,
         dscp=dscp_map.keys()[9],
         expected_dscp=fcs[0]['dscp'],
         expected_exp=fcs[0]['exp'],
         expected_dot1p=fcs[0]['dot1p'],
         src_compute_fixture=si_source_compute_fixture,
         vrf_id=si_right_vrf_id)
예제 #3
0
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross
           various triggers for verification accross VN's and accross multiple
           projects.
        """
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))

        # 1. Start Traffic
        num_flows = 15000
        flow_gen_rate = 1000
        proto = 'udp'
        profile = 'TrafficProfile1'
        details = self.topo[self.topo.keys()[0]].traffic_profile[profile]
        self.traffic_setup(profile, details, num_flows, flow_gen_rate, proto)
        self.traffic_obj = self.useFixture(
            traffic_tests.trafficTestFixture(self.connections))

        # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
        # stream_proto= 'udp', start_sport= 8000,
        # total_single_instance_streams= 20):
        startStatus = self.traffic_obj.startTraffic(
            total_single_instance_streams=num_flows,
            pps=self.flow_gen_rate,
            start_sport=1000,
            cfg_profile='ContinuousSportRange',
            tx_vm_fixture=self.src_vm_fixture,
            rx_vm_fixture=self.dst_vm_fixture,
            stream_proto=self.proto)

        msg1 = "Status of start traffic : %s, %s, %s" % (
            self.proto, self.src_vm_fixture.vm_ip, startStatus['status'])
        self.logger.info(msg1)
        assert startStatus['status'], msg1
        # 2. Poll live traffic & verify VM flow count
        flow_test_utils.verify_node_flow_setup(self)
        # 3. Stop Traffic
        self.logger.info("Proceed to stop traffic..")
        self.traffic_obj.stopTraffic(wait_for_stop=False)
        start_time = time.time()
        # 4. Verify flow ageing
        self.logger.info(
            "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
        sleep(self.flow_cache_timeout)
        while True:
            begin_flow_count = self.comp_node_fixt[
                self.cmp_node].get_vrouter_matching_flow_count(
                self.flow_data)
            self.logger.debug('begin_flow_count: %s' % (begin_flow_count))
            if begin_flow_count['all'] == 0:
                break
            flow_teardown_time = math.ceil(
                flow_test_utils.get_max_flow_removal_time(
                    begin_flow_count['all'],
                    self.flow_cache_timeout))
            # flow_teardown_time is not the actual time to remove flows
            # Based on flow_count at this time, teardown_time is calculated to the value
            # which will vary with agent's poll, which is done at regular
            # intervals..
            self.logger.info('Sleeping for %s secs' % (flow_teardown_time))
            sleep(flow_teardown_time)
            # at the end of wait, actual_flows should be atleast < 50% of total
            # flows before start of teardown
            current_flow_count = self.comp_node_fixt[
                self.cmp_node].get_vrouter_matching_flow_count(
                self.flow_data)
            self.logger.debug('current_flow_count: %s' % (current_flow_count))
            if current_flow_count['all'] > (0.5 * begin_flow_count['all']):
                msg = [
                    'Flow removal not happening as expected in node %s' %
                    self.cmp_node]
                msg.append(
                    'Flow count before wait: %s, after wait of %s secs, its: %s' %
                    (begin_flow_count['all'],
                     flow_teardown_time,
                     current_flow_count['all']))
                assert False, msg
            if current_flow_count['all'] < (0.1 * begin_flow_count['all']):
                break
        # end of while loop
        elapsed_time = time.time() - start_time
        self.logger.info(
            "Flows aged out as expected in configured flow_cache_timeout")
        return True
예제 #4
0
    def test_max_vm_flows(self):
        ''' Test to validate setting up of the max_vm_flows parameter in agent
            config file has expected effect on the flows in the system.
            1. Set VM flow cache time and max_vm_flows to 0.01% of max system
               flows(512K).
            2. Create 2 VN's and connect them using a policy.
            3. Launch 2 VM's in the respective VN's.
            4. Start traffic with around 20000 flows.
            6. Restart vrouter agent service and check the flows are limited
               0.01% of max system flows.
        Pass criteria: Step 6 should pass
        '''
        result = True

        # Set VM flow cache time to 30 and max_vm_flows to 0.1% of max system
        # flows(512K).
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 10
        self.max_system_flows = 0
        self.max_vm_flows = 0.01
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()
            if self.max_system_flows < self.comp_node_fixt[
                    cmp_node].max_system_flows:
                self.max_system_flows = self.comp_node_fixt[
                    cmp_node].max_system_flows
        self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
                        self.inputs.compute_ips, self.comp_node_fixt)

        # Define resources for this test.
        vn1_name = get_random_name('VN1')
        vn1_subnets = ['10.1.1.0/24']
        vn2_name = get_random_name('VN2')
        vn2_subnets = ['10.2.1.0/24']
        vn1_vm1_name = get_random_name('VM1')
        vn2_vm2_name = get_random_name('VM2')
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]

        # Create 2 VN's and connect them using a policy.
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
        assert vn2_fixture.verify_on_setup()

        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rev_rules,
                          inputs=self.inputs,
                          connections=self.connections))

        vn1_fixture.bind_policies([policy1_fixture.policy_fq_name],
                                  vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id,
                        [policy1_fixture.policy_fq_name])
        vn2_fixture.bind_policies([policy2_fixture.policy_fq_name],
                                  vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id,
                        [policy2_fixture.policy_fq_name])

        # Launch 2 VM's in the respective VN's.
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm_name=vn1_vm1_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic')
        vm2_fixture = self.create_vm(vn2_fixture,
                                     vm_name=vn2_vm2_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic')
        assert vm1_fixture.verify_on_setup(), 'VM1 verifications FAILED'
        assert vm2_fixture.verify_on_setup(), 'VM2 verifications FAILED'
        assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), \
            'Ping from VM1 to VM2 FAILED'

        # Set num_flows to fixed, smaller value but > 1% of
        # system max flows
        max_system_flows = self.max_system_flows
        vm_flow_limit = int((self.max_vm_flows / 100.0) * max_system_flows)
        num_flows = vm_flow_limit + 30
        generated_flows = 2 * num_flows
        flow_gen_rate = 5
        proto = 'udp'

        # Start Traffic.
        self.traffic_obj = self.useFixture(
            traffic_tests.trafficTestFixture(self.connections))
        startStatus = self.traffic_obj.startTraffic(
            total_single_instance_streams=int(num_flows),
            pps=flow_gen_rate,
            start_sport=5000,
            cfg_profile='ContinuousSportRange',
            tx_vm_fixture=vm1_fixture,
            rx_vm_fixture=vm2_fixture,
            stream_proto=proto)
        msg1 = "Status of start traffic : %s, %s, %s" % (
            proto, vm1_fixture.vm_ip, startStatus['status'])
        self.logger.info(msg1)
        assert startStatus['status'], msg1
        self.logger.info("Wait for 3 sec for flows to be setup.")
        sleep(3)

        # 4. Poll live traffic & verify VM flow count
        flow_cmd = 'flow -l | grep %s -A2 |' % vm1_fixture.vm_ip
        flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l'
        sample_time = 2
        vm_flow_list = []
        for i in range(5):
            sleep(sample_time)
            vm_flow_record = self.inputs.run_cmd_on_server(
                vm1_fixture.vm_node_ip, flow_cmd,
                self.inputs.host_data[vm1_fixture.vm_node_ip]['username'],
                self.inputs.host_data[vm1_fixture.vm_node_ip]['password'])
            vm_flow_record = vm_flow_record.strip()
            vm_flow_list.append(int(vm_flow_record))
            self.logger.info("%s iteration DONE." % i)
            self.logger.info("VM flow count = %s." % vm_flow_list[i])
            self.logger.info("Sleeping for %s sec before next iteration." %
                             sample_time)

        vm_flow_list.sort(reverse=True)
        if vm_flow_list[0] > int(1.1 * vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is greater than configured.")
            result = False
        elif vm_flow_list[0] < int(0.9 * vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is much lower than config.")
            self.logger.error(
                "Something is stopping flow creation. Please debug")
            result = False
        else:
            self.logger.info("TEST PASSED")
            self.logger.info("Expected range of vm flows seen.")
            self.logger.info("Max VM flows = %s" % vm_flow_list[0])

        # Stop Traffic.
        self.logger.info("Proceed to stop traffic..")
        try:
            self.traffic_obj.stopTraffic(wait_for_stop=False)
        except:
            self.logger.warn("Failed to get a VM handle and stop traffic.")

        self.logger.info("Wait for the flows to get purged.")
        sleep(self.flow_cache_timeout)

        return result
예제 #5
0
    def test_max_vm_flows(self):
        ''' Test to validate setting up of the max_vm_flows parameter in agent
            config file has expected effect on the flows in the system.
            1. Set VM flow cache time and max_vm_flows to 0.1% of max system
               flows(512K) i.e about 500 flows
            2. Create 2 VN's and connect them using a policy.
            3. Launch 2 VM's in the respective VN's.
            4. Start traffic with connections exceeding the VM flow limit
            5. Check the flows are limited to about 500 flows
        '''
        result = True

        # Set VM flow cache time to 20 and max_vm_flows to 0.1% of max system
        # flows(512K).
        comp_node_fixt = {}
        flow_cache_timeout = 20
        max_system_flows = 0
        max_vm_flows = 0.1
        compute_ips = [self.inputs.compute_ips[0], self.inputs.compute_ips[0]]
        if len(self.inputs.compute_ips) > 1:
            compute_ips[1] = self.inputs.compute_ips[1]

        for cmp_node in compute_ips:
            comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            comp_node_fixt[cmp_node].set_flow_aging_time(flow_cache_timeout)
            comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
            comp_node_fixt[cmp_node].set_per_vm_flow_limit(max_vm_flows)
            comp_node_fixt[cmp_node].sup_vrouter_process_restart()
            if max_system_flows < comp_node_fixt[cmp_node].max_system_flows:
                max_system_flows = comp_node_fixt[cmp_node].max_system_flows
        self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
                        compute_ips, comp_node_fixt)

        # Define resources for this test.
        vn1_name = get_random_name('VN1')
        vn1_subnets = ['10.1.1.0/24']
        vn2_name = get_random_name('VN2')
        vn2_subnets = ['10.2.1.0/24']
        vn1_vm1_name = get_random_name('VM1')
        vn2_vm2_name = get_random_name('VM2')
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]

        # Create 2 VN's and connect them using a policy.
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
        assert vn2_fixture.verify_on_setup()

        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rev_rules,
                          inputs=self.inputs,
                          connections=self.connections))

        vn1_fixture.bind_policies([policy1_fixture.policy_fq_name],
                                  vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id,
                        [policy1_fixture.policy_fq_name])
        vn2_fixture.bind_policies([policy2_fixture.policy_fq_name],
                                  vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id,
                        [policy2_fixture.policy_fq_name])

        # Launch 2 VM's in the respective VN's.
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm_name=vn1_vm1_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic',
                                     node_name=self.inputs.compute_names[0])
        vm2_fixture = self.create_vm(vn2_fixture,
                                     vm_name=vn2_vm2_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic',
                                     node_name=self.inputs.compute_names[1])
        assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip, count=1), \
            'Ping from VM1 to VM2 FAILED'

        # Set num_flows to fixed, smaller value but > 1% of
        # system max flows
        max_system_flows = max_system_flows
        vm_flow_limit = int((max_vm_flows / 100.0) * max_system_flows)
        num_flows = vm_flow_limit + 30
        interval = 'u10000'
        proto = 'udp'
        # Try UDP echo
        dest_port = 7

        hping_h = Hping3(vm1_fixture,
                         vm2_fixture.vm_ip,
                         destport=dest_port,
                         count=num_flows,
                         interval=interval,
                         udp=True)
        self.sleep(flow_cache_timeout * 2)
        # No need to stop hping
        hping_h.start(wait=False)
        self.sleep(5)

        computes = [
            comp_node_fixt[vm1_fixture.vm_node_ip],
            comp_node_fixt[vm2_fixture.vm_node_ip]
        ]
        for compute in computes:
            (fwd_flow_cnt, rev_flow_cnt) = compute.get_flow_count(
                source_ip=vm1_fixture.vm_ip,
                dest_ip=vm2_fixture.vm_ip,
                dest_port=dest_port,
                proto=proto)
            current_flow_cnt = fwd_flow_cnt + rev_flow_cnt
            msg = 'VM flow count : Expected:%s, Seen: %s' % (vm_flow_limit,
                                                             current_flow_cnt)
            assert is_almost_same(current_flow_cnt, vm_flow_limit, 25), msg
            self.logger.info('On compute %s, %s..OK' % (compute.ip, msg))
예제 #6
0
 def validate_packet_qos_marking(self,
                                 src_vm_fixture,
                                 dest_vm_fixture,
                                 traffic_generator="hping",
                                 dest_ip=None,
                                 count=30000,
                                 dscp=None,
                                 dot1p=None,
                                 exp=None,
                                 protocol='udp',
                                 src_port=None,
                                 dest_port=None,
                                 src_compute_fixture=None,
                                 expected_dscp=None,
                                 expected_dot1p=None,
                                 expected_exp=None,
                                 encap=None,
                                 vrf_id=None,
                                 af="ipv4",
                                 **kwargs):
     '''
         dest_compute_fixture should be supplied if underlay traffic is 
         being checked
         dest_vm_fixture should be supplied if traffic is being checked for a
         specific estination VM
         
         Few things to note:
         1. traffic_generator can be "scapy" or "hping"
         2. "scapy" is specifically used here to test l2 and IPv6 traffic only.
            For all other traffic, hping is being used.
     '''
     interval = kwargs.get('interval', 1)
     src_mac = kwargs.get('src_mac', "11:22:33:44:55:66")
     dst_mac = kwargs.get('dst_mac', "ff:ff:ff:ff:ff:ff")
     ipv6_src = kwargs.get('ipv6_src', None)
     ipv6_dst = kwargs.get('ipv6_dst', None)
     src_vm_cidr = src_vm_fixture.vn_objs[0]['network']\
                     ['contrail:subnet_ipam'][0]['subnet_cidr']
     dest_vm_cidr = dest_vm_fixture.vn_objs[0]['network']\
                     ['contrail:subnet_ipam'][0]['subnet_cidr']
     if IPNetwork(src_vm_cidr) == IPNetwork(dest_vm_cidr):
         traffic_between_diff_networks = False
     else:
         traffic_between_diff_networks = True
     #src_vm_interface = kwargs.get('src_vm_interface', "eth0")
     # TCP is anyway the default for hping3
     icmp = False
     tcp = False
     udp = False
     if protocol == 'icmp': icmp = True
     if protocol == 'udp': udp = True
     if isinstance(dscp, int):
         tos = format(dscp << 2, 'x')
     else:
         tos = None
     if not src_compute_fixture and src_vm_fixture:
         src_compute_fixture = self.useFixture(
             ComputeNodeFixture(self.connections,
                                src_vm_fixture.vm_node_ip))
     username = self.inputs.host_data[src_compute_fixture.ip]['username']
     password = self.inputs.host_data[src_compute_fixture.ip]['password']
     interface = src_compute_fixture.agent_physical_interface
     src_ip = src_vm_fixture.vm_ip
     dest_ip = dest_ip or dest_vm_fixture.vm_ip
     if traffic_generator == "scapy":
         self.logger.debug("Generating L2 only stream and ignoring all"
                           " other parameters of layers above L2")
         dot1p = dot1p or 0
         ether = {'src': src_mac, 'dst': dst_mac}
         dot1q = {'prio': dot1p, 'vlan': 100}
         ipv6 = {}
         udp_header = {}
         if af == "ipv6":
             tos = int(tos, 16) if dscp else 0
             ipv6 = {'tc': tos, 'src': ipv6_src, 'dst': ipv6_dst}
             ## WA for Bug 1614472. Internal protocol inside IPv6 is must
             udp_header = {'sport': 1234}
         offset = 156 if ipv6 else 100
         traffic_obj, scapy_obj = self._generate_scapy_traffic(
             src_vm_fixture,
             src_compute_fixture,
             interface,
             encap=encap,
             interval=interval,
             count=count,
             ether=ether,
             dot1q=dot1q,
             ipv6=ipv6,
             udp=udp_header)
         session, pcap = traffic_obj.packet_capture_start(
             capture_on_payload=True,
             signature_string='5a5a5a5a5a5a5a5a',
             offset=offset,
             bytes_to_match=8,
             min_length=100,
             max_length=250)
     elif traffic_generator == "hping":
         traffic_obj, hping_obj = self._generate_hping_traffic(
             src_vm_fixture,
             src_compute_fixture,
             interface,
             dest_ip=dest_ip,
             src_port=src_port,
             dest_port=dest_port,
             encap=encap,
             interval=interval,
             count=count,
             proto=protocol,
             vrf_id=vrf_id,
             udp=udp,
             tos=tos)
         session, pcap = traffic_obj.packet_capture_start(
             traffic_between_diff_networks=traffic_between_diff_networks)
     sleep(5)
     traffic_obj.packet_capture_stop()
     if traffic_generator == "scapy":
         scapy_obj.stop()
     elif traffic_generator == "hping":
         (stats, hping_log) = hping_obj.stop()
     if isinstance(expected_dscp, int):
         result = traffic_obj.verify_packets('dscp',
                                             pcap_path_with_file_name=pcap,
                                             expected_count=1,
                                             dscp=expected_dscp)
         assert result, 'DSCP remarking checks failed. Please check logs'
     if isinstance(expected_dot1p, int):
         result = traffic_obj.verify_packets('dot1p',
                                             pcap_path_with_file_name=pcap,
                                             expected_count=1,
                                             dot1p=expected_dot1p)
         assert result, '802.1p remarking checks failed. Please check logs'
     if isinstance(expected_exp, int):
         result = traffic_obj.verify_packets('exp',
                                             pcap_path_with_file_name=pcap,
                                             expected_count=1,
                                             mpls_exp=expected_exp)
         assert result, 'MPLS exp remarking checks failed. Please check logs'
     self.inputs.run_cmd_on_server(
         src_compute_fixture.ip,
         "rm %s" % pcap,
     )
     return True
    def test_agent_flow_settings(self):
        """Basic systest with single project with many features & traffic..
        """
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        # import mini_flow_test_topo
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        topology_class_name = flow_test_topo.systest_topo_single_project
        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            config_topo = out['data'][1]
        self.proj = list(config_topo.keys())[0]
        self.topo, self.config_topo = topo, config_topo

        # 2. set agent flow_cache_timeout to 60s
        # set max_vm_flows to 1% of 500k, comes to 5000
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 60
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()

        # 3. Start Traffic
        for profile, details in self.topo.traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = self.proj
            self.dst_proj = self.proj
            # Set num_flows to fixed, smaller value but > 1% of
            # system max flows
            num_flows = 5555
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            # 3a. Set max_vm_flows to 1% in TX VM node
            self.max_vm_flows = 1
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            flow_test_utils.update_vm_mdata_ip(self.cmp_node, self)
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 4. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 5. Increase max_vm_flows to 50% in TX VM node
            self.max_vm_flows = 50
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            # 6. Poll live traffic
            self.verify_node_flow_setup()
            # 7. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 8. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            retries = 0
            retry_wait_time = 10
            flow_teardown_time = math.ceil(
                flow_test_utils.get_max_flow_removal_time(
                    self.generated_flows, self.flow_cache_timeout))
            self.logger.debug("flow tear down time based on calcualtion: %s" %
                              flow_teardown_time)
            max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time)
            while retries < max_retries:
                actual_flows = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                actual_flows = int(actual_flows['all'])
                if actual_flows > 10:
                    self.logger.info("Waiting for flows to age out")
                    sleep(retry_wait_time)
                    retries += 1
                else:
                    break
            elapsed_time = time.time() - start_time
            if actual_flows > 50:
                msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % (
                    elapsed_time, self.cmp_node, actual_flows)
                assert False, msg
            else:
                self.logger.info(
                    "Flows aged out as expected in configured flow_cache_timeout"
                )
                self.logger.info(
                    "elapsed_time after stopping traffic is %s, flow_count is %s"
                    % (elapsed_time, actual_flows))
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[
                self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('begin_flow_count: %s' % (begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(
                    flow_test_utils.get_max_flow_removal_time(
                        begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
                # which will vary with agent's poll, which is done at regular intervals..
                self.logger.info('Sleeping for %s secs' % (flow_teardown_time))
                sleep(flow_teardown_time)
                # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown
                current_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('current_flow_count: %s' %
                                  (current_flow_count))
                if current_flow_count['all'] > (0.5 * begin_flow_count['all']):
                    msg = [
                        'Flow removal not happening as expected in node %s' %
                        self.cmp_node
                    ]
                    msg.append(
                        'Flow count before wait: %s, after wait of %s secs, its: %s'
                        % (begin_flow_count['all'], flow_teardown_time,
                           current_flow_count['all']))
                    assert False, msg
                if current_flow_count['all'] < (0.1 * begin_flow_count['all']):
                    break
            # end of while loop
            elapsed_time = time.time() - start_time
            self.logger.info(
                "Flows aged out as expected in configured flow_cache_timeout")
        # end of profile for loop
        return True
    def generate_udp_flows_and_do_verification(self, traffic_profile,
                                               build_version):
        """ Routine to generate UDP flows by calling the start_traffic routine in a thread and do parallel verification of
            flow setup rate.
            @inputs :
            traffic_profile - a list of traffic generation parameters as explained in test_flow_single_project and test_flow_multi_project routines.
            build_version - os_version, release_version and build_version for logging purposes.
        """
        for cmp_node in self.inputs.compute_ips:
            comp_node_fixt = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            flows_now = comp_node_fixt.get_vrouter_flow_count()
            for action, count in flows_now.iteritems():
                # Any flows set by previous traffic tests should have retired
                # by now..
                if int(count) > 1000:
                    self.logger.error(
                        "unexpected flow count of %s with action as %s" %
                        (count, action))
                    return False

        Shost = socket.gethostbyaddr(traffic_profile[0].vm_node_ip)
        Dhost = socket.gethostbyaddr(traffic_profile[7].vm_node_ip)
        self.logger.info(
            "Src_VM = %s, Src_IP_Range = %s to %s, Dest_VM = %s, Dest_IP = %s, Src_VN = %s, Dest_VN = %s,"
            " Port_Range = %s to %s, Src_Node = %s, Dst_Node = %s." %
            (traffic_profile[0].vm_name, traffic_profile[1],
             traffic_profile[2], traffic_profile[7].vm_name,
             traffic_profile[3], traffic_profile[0].vn_name,
             traffic_profile[7].vn_name, traffic_profile[4],
             traffic_profile[5], Shost[0], Dhost[0]))

        th = threading.Thread(target=self.start_traffic,
                              args=(traffic_profile[0], traffic_profile[1],
                                    traffic_profile[2], traffic_profile[3],
                                    traffic_profile[4], traffic_profile[5],
                                    traffic_profile[6]))
        th.start()

        #
        # Flow setup rate calculation.
        NoOfFlows = []
        FlowRatePerInterval = []
        AverageFlowSetupRate = 0
        default_setup_rate = 7000  # A default value of 7K flows per second.
        src_vm_obj = traffic_profile[0]
        dst_vm_obj = traffic_profile[7]

        #
        # Decide the test is for NAT Flow or Policy Flow.
        PolNatSI = 'NONE'
        srcFIP = src_vm_obj.chk_vmi_for_fip(src_vm_obj.vn_fq_name)
        dstFIP = dst_vm_obj.chk_vmi_for_fip(dst_vm_obj.vn_fq_name)
        if srcFIP is None:
            if dstFIP is None:
                PolNatSI = 'Policy Flow'
        else:
            PolNatSI = 'NAT Flow'

        #
        # Get or calculate the sleep_interval/wait time before getting the no of flows in vrouter for each release based
        # on a file defining a release to average flow setup rate mapping. The threshold defined in the file is for Policy Flows,
        # so NAT flow is calculated at 70% of the average flow setup rate
        # defined.
        RelVer = build_version.split('-')[1]
        import ReleaseToFlowSetupRateMapping
        #from ReleaseToFlowSetupRateMapping import *
        try:
            DefinedSetupRate = ReleaseToFlowSetupRateMapping.expected_flow_setup_rate[
                'policy'][RelVer]
        except KeyError:
            # A default value of 7K flows per second is set.
            DefinedSetupRate = default_setup_rate

        #
        # Set Expected NAT Flow Rate
        if PolNatSI == 'NAT Flow':
            DefinedSetupRate = ReleaseToFlowSetupRateMapping.expected_flow_setup_rate[
                'nat'][RelVer]
        #
        # The flow setup rate is calculated based on setup time required for first 100K flows. So TotalFlows is set to 100K and 5
        # samples (NoOfIterations) are taken within the time required to setup 100K flows. The time interval (sleep_interval) is
        # calculated based on DefinedSetupRate for the particular release
        # version.
        TotalFlows = 100000
        NoOfIterations = 5
        sleep_interval = (float(TotalFlows) / float(DefinedSetupRate)) / \
            float(NoOfIterations)

        # For scaled flows & low profile VM, it takes time for VM/tool to start sending packets...
        #self.logger.info("Sleeping for 20 sec, for VM to start sending packets.")
        #time.sleep(20)
        #
        # After each sleep_interval we get the number of active forward or nat flows setup on the vrouter which is repeated for
        # NoOfIterations times. and the average is calculated in each
        # iteration.
        for ind in range(NoOfIterations):
            time.sleep(sleep_interval)
            flows_now = flow_test_utils.vm_vrouter_flow_count(src_vm_obj)
            NoOfFlows.append(flows_now)
            if ind == 0:
                FlowRatePerInterval.append(NoOfFlows[ind])
                AverageFlowSetupRate = FlowRatePerInterval[ind]
            elif ind > 0:
                FlowRatePerInterval.append(NoOfFlows[ind] - NoOfFlows[ind - 1])
                AverageFlowSetupRate = (AverageFlowSetupRate +
                                        FlowRatePerInterval[ind]) / 2
            self.logger.info("Flows setup in last %s sec = %s" %
                             (sleep_interval, FlowRatePerInterval[ind]))
            self.logger.info(
                "Average flow setup rate per %s sec till this iteration = %s" %
                (sleep_interval, AverageFlowSetupRate))
            self.logger.info("Flow samples so far: %s" % (NoOfFlows))
            self.logger.info(" ")
            if flows_now > 90000:
                self.logger.info("Flows setup so far: %s" % (flows_now))
                self.logger.info("Close to 100k flows setup, no need to wait")
                break

        # @setup rate of 9000 flows per sec, 30*9000=270k flows can be setup
        # with ~10s over with above loop, wait for another 20s
        # self.logger.info("Sleeping for 20 sec, for all the flows to be setup.")
        # time.sleep(20)
        # Calculate the flow setup rate per second = average flow setup in
        # sleep interval over the above iterations / sleep interval.
        AverageFlowSetupRate = int(AverageFlowSetupRate / sleep_interval)
        self.logger.info("Flow setup rate seen in this test is = %s" %
                         (AverageFlowSetupRate))
        if (AverageFlowSetupRate < (0.9 * DefinedSetupRate)):
            self.logger.warn(
                "Flow setup rate seen in this test fell below 90 percent of the defined flow setup rate for this release - %s."
                % (DefinedSetupRate))
        else:
            self.logger.info(
                "Flow setup rate seen in this test is close to or above the defined flow setup rate for this release - %s."
                % (DefinedSetupRate))

        # write to a file to do record keeping of the flow rate on a particular
        # node.
        ts = time.time()
        mtime = datetime.datetime.fromtimestamp(ts).strftime(
            '%Y-%m-%d %H:%M:%S')

        fh = open("Flow_Test_Data.xls", "a")
        localflow = 'Remote Flow'
        # Check if it's a remote or local flow to log the data accordingly.
        if Shost[0] == Dhost[0]:
            localflow = 'Local Flow'
        # if source and destination VN are same then it's not a NAT/Policy flow
        # else it is a NAT/Policy flow and needs to be logged accordingly.
        if src_vm_obj.vn_name == dst_vm_obj.vn_name:
            mystr = "%s\t%s\t%s\t%s\t%s\n" % (build_version, mtime, Shost[0],
                                              AverageFlowSetupRate, localflow)
        else:
            mystr = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
                build_version, mtime, Shost[0], AverageFlowSetupRate,
                localflow, PolNatSI)

        fh.write(mystr)
        fh.close()

        self.logger.info("Joining thread")
        th.join()

        #
        # Fail the test if the actual flow setup rate is < 70% of the defined
        # flow setup rate for the release.
        if (AverageFlowSetupRate < (0.6 * DefinedSetupRate)):
            self.logger.error(
                "The Flow setup rate seen in this test is below 70% of the defined (expected) flow setup rate for this release."
            )
            self.logger.error(
                "The Actual Flow setup rate = %s and the Defined Flow setup rate = %s."
                % (AverageFlowSetupRate, DefinedSetupRate))
            self.logger.error(
                "This clearly indicates there is something wrong here and thus the test will execute no further test cases."
            )
            self.logger.error("Exiting Now!!!")
            return False

        return True