Beispiel #1
0
    def test_traffic_with_control_node_switchover(self):
        ''' Stop the control node and check peering with agent fallback to other control node.

        '''
        if len(set(self.inputs.bgp_ips)) < 2:
            raise self.skipTest(
                "Skipping Test. At least 2 control node required to run the test"
            )
        result = True
        fip_pool_name = get_random_name('some-pool1')

        (self.vn1_name, self.vn1_subnets) = (get_random_name("vn1"),
                                             ["11.1.1.0/24"])
        (self.vn2_name, self.vn2_subnets) = (get_random_name("vn2"),
                                             ["22.1.1.0/24"])
        (self.fvn_public_name,
         self.fvn_public_subnets) = (get_random_name("fip_vn_public"),
                                     ['10.204.219.16/28'])
        (self.fvn1_name, self.fvn1_subnets) = (get_random_name("fip_vn1"),
                                               ['100.1.1.0/24'])
        (self.fvn2_name, self.fvn2_subnets) = (get_random_name("fip_vn2"),
                                               ['200.1.1.0/24'])
        (self.fvn3_name, self.fvn3_subnets) = (get_random_name("fip_vn3"),
                                               ['170.1.1.0/29'])
        (self.vn1_vm1_name, self.vn1_vm2_name) = (get_random_name('vn1_vm1'),
                                                  get_random_name('vn1_vm2'))
        (self.vn2_vm1_name, self.vn2_vm2_name) = (get_random_name('vn2_vm1'),
                                                  get_random_name('vn2_vm2'))
        (self.fvn_public_vm1_name) = (get_random_name('fvn_public_vm1'))
        (self.fvn1_vm1_name) = (get_random_name('fvn1_vm1'))
        (self.fvn2_vm1_name) = (get_random_name('fvn2_vm1'))
        (self.fvn3_vm1_name) = (get_random_name('fvn3_vm1'))
        (self.vn1_vm1_traffic_name) = get_random_name('VN1_VM1_traffic')
        (self.fvn1_vm1_traffic_name) = get_random_name('FVN1_VM1_traffic')
        fip_pool_name1 = get_random_name('some-pool1')
        fip_pool_name2 = get_random_name('some-pool2')

        # Get all compute host
        host_list = self.connections.nova_h.get_hosts()
        compute_1 = host_list[0]
        compute_2 = host_list[0]
        if len(host_list) > 1:
            compute_1 = host_list[0]
            compute_2 = host_list[1]
        self.fvn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=self.fvn1_name,
                      subnets=self.fvn1_subnets))
        self.vn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      vn_name=self.vn1_name,
                      subnets=self.vn1_subnets))

        self.fvn1_vm1_traffic_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.fvn1_fixture.obj,
                      flavor='contrail_flavor_small',
                      image_name='ubuntu-traffic',
                      vm_name=self.fvn1_vm1_traffic_name,
                      node_name=compute_2))
        self.vn1_vm1_traffic_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn1_fixture.obj,
                      flavor='contrail_flavor_small',
                      image_name='ubuntu-traffic',
                      vm_name=self.vn1_vm1_traffic_name,
                      node_name=compute_1))

        fvn1_fixture = self.fvn1_fixture
        fvn1_vm1_traffic_fixture = self.fvn1_vm1_traffic_fixture
        fvn1_subnets = self.fvn1_subnets
        fvn1_vm1_traffic_name = self.fvn1_vm1_traffic_name
        vn1_fixture = self.vn1_fixture
        vn1_vm1_traffic_fixture = self.vn1_vm1_traffic_fixture
        vn1_subnets = self.vn1_subnets
        vn1_vm1_traffic_name = self.vn1_vm1_traffic_name

        assert fvn1_fixture.verify_on_setup()
        assert vn1_fixture.verify_on_setup()
        assert fvn1_vm1_traffic_fixture.verify_on_setup()
        assert vn1_vm1_traffic_fixture.verify_on_setup()

        fip_fixture1 = self.useFixture(
            FloatingIPFixture(project_name=self.inputs.project_name,
                              inputs=self.inputs,
                              connections=self.connections,
                              pool_name=fip_pool_name1,
                              vn_id=fvn1_fixture.vn_id))
        assert fip_fixture1.verify_on_setup()

        fip_id1 = fip_fixture1.create_and_assoc_fip(
            fvn1_fixture.vn_id, vn1_vm1_traffic_fixture.vm_id)
        self.addCleanup(fip_fixture1.disassoc_and_delete_fip, fip_id1)
        assert fip_fixture1.verify_fip(fip_id1, vn1_vm1_traffic_fixture,
                                       fvn1_fixture)
        if not vn1_vm1_traffic_fixture.ping_with_certainty(
                fvn1_vm1_traffic_fixture.vm_ip):
            result = result and False

        # Figuring the active control node
        active_controller = None
        inspect_h = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
        for entry in agent_xmpp_status:
            if entry['cfg_controller'] == 'Yes':
                active_controller = entry['controller_ip']
        self.logger.info(
            'Active control node from the Agent %s is %s' %
            (vn1_vm1_traffic_fixture.vm_node_ip, active_controller))

        fvn1_vm1_traffic_fixture.wait_till_vm_is_up()
        vn1_vm1_traffic_fixture.wait_till_vm_is_up()
        # Install traffic pkg in VM
        vn1_vm1_traffic_fixture.install_pkg("Traffic")
        fvn1_vm1_traffic_fixture.install_pkg("Traffic")
        # Start Traffic
        traffic_obj = {}
        startStatus = {}
        stopStatus = {}
        traffic_proto_l = ['icmp']
        total_streams = {}
        total_streams['icmp'] = 1
        dpi = 9100
        proto = 'icmp'
        for proto in traffic_proto_l:
            traffic_obj[proto] = {}
            startStatus[proto] = {}
            traffic_obj[proto] = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            startStatus[proto] = traffic_obj[proto].startTraffic(
                num_streams=total_streams[proto],
                start_port=dpi,
                tx_vm_fixture=vn1_vm1_traffic_fixture,
                rx_vm_fixture=fvn1_vm1_traffic_fixture,
                stream_proto=proto)
            self.logger.info(
                "Status of start traffic : %s, %s, %s" %
                (proto, vn1_vm1_traffic_fixture.vm_ip, startStatus[proto]))
            if startStatus[proto]['status'] != True:
                result = False
        self.logger.info("-" * 80)

        # Poll live traffic
        traffic_stats = {}
        self.logger.info("Poll live traffic and get status..")
        for proto in traffic_proto_l:
            traffic_stats = traffic_obj[proto].getLiveTrafficStats()
            err_msg = "Traffic disruption is seen: details: "
        #self.assertEqual(traffic_stats['status'], True, err_msg)
        assert (traffic_stats['status']), err_msg
        self.logger.info("-" * 80)

        # Stop on Active node
        self.logger.info('Stoping the Control service in  %s' %
                         (active_controller))
        self.inputs.stop_service('contrail-control', [active_controller],
                                 container='control')
        sleep(5)

        # Check the control node shifted to other control node
        new_active_controller = None
        new_active_controller_state = None
        inspect_h = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
        for entry in agent_xmpp_status:
            if entry['cfg_controller'] == 'Yes':
                new_active_controller = entry['controller_ip']
                new_active_controller_state = entry['state']

        self.logger.info(
            'Active control node from the Agent %s is %s' %
            (vn1_vm1_traffic_fixture.vm_node_ip, new_active_controller))
        if new_active_controller == active_controller:
            self.logger.error(
                'Control node switchover fail. Old Active controlnode was %s and new active control node is %s'
                % (active_controller, new_active_controller))
            result = False
        if new_active_controller_state != 'Established':
            self.logger.error(
                'Agent does not have Established XMPP connection with Active control node'
            )
            result = result and False

        # Verify Flow records here
        inspect_h1 = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        inspect_h2 = self.agent_inspect[fvn1_vm1_traffic_fixture.vm_node_ip]
        flow_rec1 = None
        udp_src = unicode(8000)
        dpi = unicode(dpi)

        # Verify Ingress Traffic
        self.logger.info('Verifying Ingress Flow Record')
        vn_fq_name = vn1_vm1_traffic_fixture.vn_fq_name
        compute_node_fixture = self.useFixture(
            ComputeNodeFixture(self.connections,
                               vn1_vm1_traffic_fixture.vm_node_ip))
        fwd_flow, rev_flow = compute_node_fixture.get_flow_entry(
            source_ip=vn1_vm1_traffic_fixture.vm_ip,
            dest_ip=fvn1_vm1_traffic_fixture.vm_ip,
            dest_port='0',
            proto='1')
        if fwd_flow:
            sport = fwd_flow.source_port
            flow_rec1 = inspect_h1.get_vna_fetchflowrecord(
                nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]
                ['flow_key_idx'],
                sip=vn1_vm1_traffic_fixture.vm_ip,
                dip=fvn1_vm1_traffic_fixture.vm_ip,
                sport=sport,
                dport='0',
                protocol='1')
        else:
            flow_rec1 = None
        if flow_rec1 is not None:
            self.logger.info('Verifying NAT in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec1, 'nat', 'enabled')
            if match is False:
                self.logger.error(
                    'Test Failed. NAT is not enabled in given flow. Flow details %s'
                    % (flow_rec1))
                result = result and False
            self.logger.info('Verifying traffic direction in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec1, 'direction', 'ingress')
            if match is False:
                self.logger.error(
                    'Test Failed. Traffic direction is wrong should be ingress. Flow details %s'
                    % (flow_rec1))
                result = result and False
        else:
            self.logger.error(
                'Test Failed. Required ingress Traffic flow not found')
            result = result and False

        # Verify Egress Traffic
        # Check VMs are in same agent or not. Need to compute source vrf
        # accordingly
        self.logger.info('Verifying Egress Flow Records')
        if fwd_flow:
            flow_rec2 = inspect_h1.get_vna_fetchflowrecord(
                nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]
                ['flow_key_idx'],
                sip=fvn1_vm1_traffic_fixture.vm_ip,
                dip=fip_fixture1.fip[fip_id1],
                sport=sport,
                dport='0',
                protocol='1')
        else:
            flow_rec2 = False
        if flow_rec2 is not None:
            self.logger.info('Verifying NAT in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec2, 'nat', 'enabled')
            if match is False:
                self.logger.error(
                    'Test Failed. NAT is not enabled in given flow. Flow details %s'
                    % (flow_rec2))
                result = result and False
            self.logger.info('Verifying traffic direction in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec2, 'direction', 'egress')
            if match is False:
                self.logger.error(
                    'Test Failed. Traffic direction is wrong should be Egress. Flow details %s'
                    % (flow_rec1))
                result = result and False
        else:
            self.logger.error(
                'Test Failed. Required Egress Traffic flow not found')
            result = result and False

        # Stop Traffic
        self.logger.info("Proceed to stop traffic..")
        self.logger.info("-" * 80)
        for proto in traffic_proto_l:
            stopStatus[proto] = {}
            stopStatus[proto] = traffic_obj[proto].stopTraffic()
            #if stopStatus[proto] != []: msg.append(stopStatus[proto]); result= False
            if stopStatus[proto] != []:
                result = False
            self.logger.info("Status of stop traffic for proto %s is %s" %
                             (proto, stopStatus[proto]))
        self.logger.info("-" * 80)

        # Start the control node service again
        self.logger.info('Starting the Control service in  %s' %
                         (active_controller))
        self.inputs.start_service('contrail-control', [active_controller],
                                  container='control')

        sleep(10)
        # Check the BGP peering status from the currently active control node
        self.logger.info(
            'Checking the BGP peering from new active controler  %s' %
            (new_active_controller))
        cn_bgp_entry = self.cn_inspect[
            new_active_controller].get_cn_bgp_neigh_entry()
        sleep(5)
        for entry in cn_bgp_entry:
            if entry['state'] != 'Established':
                result = result and False
                self.logger.error(
                    'With Peer %s peering is not Established. Current State %s '
                    % (entry['peer'], entry['state']))

        # fip_fixture1.disassoc_and_delete_fip(fip_id1)
        if not result:
            self.logger.error('Switchover of control node failed')
            assert result
        return True
    def test_max_vm_flows(self):
        ''' Test to validate setting up of the max_vm_flows parameter in agent
            config file has expected effect on the flows in the system.
            1. Set VM flow cache time and max_vm_flows to 0.01% of max system
               flows(512K).
            2. Create 2 VN's and connect them using a policy.
            3. Launch 2 VM's in the respective VN's.
            4. Start traffic with around 20000 flows.
            6. Restart vrouter agent service and check the flows are limited
               0.01% of max system flows.
        Pass criteria: Step 6 should pass
        '''
        result = True

        # Set VM flow cache time to 30 and max_vm_flows to 0.1% of max system
        # flows(512K).
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 10
        self.max_system_flows = 0
        self.max_vm_flows = 0.01
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()
            if self.max_system_flows < self.comp_node_fixt[
                cmp_node].max_system_flows:
                self.max_system_flows = self.comp_node_fixt[
                    cmp_node].max_system_flows
        self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
            self.inputs.compute_ips,
            self.comp_node_fixt)

        # Define resources for this test.
        vn1_name = get_random_name('VN1')
        vn1_subnets = ['10.1.1.0/24']
        vn2_name = get_random_name('VN2')
        vn2_subnets = ['10.2.1.0/24']
        vn1_vm1_name = get_random_name('VM1')
        vn2_vm2_name = get_random_name('VM2')
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]

        # Create 2 VN's and connect them using a policy.
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
        assert vn2_fixture.verify_on_setup()

        policy1_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy1_name,
                rules_list=rules, inputs=self.inputs,
                connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy2_name,
                rules_list=rev_rules, inputs=self.inputs,
                connections=self.connections))

        vn1_fixture.bind_policies(
            [policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies,
                        vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
        vn2_fixture.bind_policies(
            [policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies,
                        vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])

        # Launch 2 VM's in the respective VN's.
        vm1_fixture = self.create_vm(vn1_fixture,vm_name=vn1_vm1_name,
                flavor='contrail_flavor_small', image_name='ubuntu-traffic')
        vm2_fixture = self.create_vm(vn2_fixture,vm_name=vn2_vm2_name,
                flavor='contrail_flavor_small', image_name='ubuntu-traffic')
        assert vm1_fixture.verify_on_setup(), 'VM1 verifications FAILED'
        assert vm2_fixture.verify_on_setup(), 'VM2 verifications FAILED'
        assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), \
            'Ping from VM1 to VM2 FAILED'

        # Set num_flows to fixed, smaller value but > 1% of
        # system max flows
        max_system_flows = self.max_system_flows
        vm_flow_limit = int((self.max_vm_flows/100.0)*max_system_flows)
        num_flows = vm_flow_limit + 30
        generated_flows = 2*num_flows
        flow_gen_rate = 5
        proto = 'udp'

        # Start Traffic.
        self.traffic_obj = self.useFixture(
            traffic_tests.trafficTestFixture(self.connections))
        startStatus = self.traffic_obj.startTraffic(
            total_single_instance_streams=int(num_flows),
            pps=flow_gen_rate,
            start_sport=5000,
            cfg_profile='ContinuousSportRange',
            tx_vm_fixture=vm1_fixture,
            rx_vm_fixture=vm2_fixture,
            stream_proto=proto)
        msg1 = "Status of start traffic : %s, %s, %s" % (
            proto, vm1_fixture.vm_ip, startStatus['status'])
        self.logger.info(msg1)
        assert startStatus['status'], msg1
        self.logger.info("Wait for 3 sec for flows to be setup.")
        sleep(3)

        # 4. Poll live traffic & verify VM flow count
        flow_cmd = 'flow -l | grep %s -A1 |' % vm1_fixture.vm_ip
        flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l'
        sample_time = 2
        vm_flow_list=[]
        for i in range(5):
            sleep(sample_time)
            vm_flow_record = self.inputs.run_cmd_on_server(
                vm1_fixture.vm_node_ip,
                flow_cmd,
                self.inputs.host_data[vm1_fixture.vm_node_ip]['username'],
                self.inputs.host_data[vm1_fixture.vm_node_ip]['password'])
            vm_flow_record = vm_flow_record.strip()
            vm_flow_list.append(int(vm_flow_record))
            self.logger.info("%s iteration DONE." % i)
            self.logger.info("VM flow count = %s." % vm_flow_list[i])
            self.logger.info("Sleeping for %s sec before next iteration."
                % sample_time)

        vm_flow_list.sort(reverse=True)
        if vm_flow_list[0] > int(1.1*vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is greater than configured.")
            result = False
        elif vm_flow_list[0] < int(0.9*vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is much lower than config.")
            self.logger.error("Something is stopping flow creation. Please debug")
            result = False
        else:
            self.logger.info("TEST PASSED")
            self.logger.info("Expected range of vm flows seen.")
            self.logger.info("Max VM flows = %s" % vm_flow_list[0])

        # Stop Traffic.
        self.logger.info("Proceed to stop traffic..")
        try:
            self.traffic_obj.stopTraffic(wait_for_stop=False)
        except:
            self.logger.warn("Failed to get a VM handle and stop traffic.")

        self.logger.info("Wait for the flows to get purged.")
        sleep(self.flow_cache_timeout)

        return result
Beispiel #3
0
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross
           various triggers for verification accross VN's and accross multiple
           projects.
        """
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))

        # 1. Start Traffic
        num_flows = 15000
        flow_gen_rate = 1000
        proto = 'udp'
        profile = 'TrafficProfile1'
        details = self.topo[self.topo.keys()[0]].traffic_profile[profile]
        self.traffic_setup(profile, details, num_flows, flow_gen_rate, proto)
        self.traffic_obj = self.useFixture(
            traffic_tests.trafficTestFixture(self.connections))

        # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
        # stream_proto= 'udp', start_sport= 8000,
        # total_single_instance_streams= 20):
        startStatus = self.traffic_obj.startTraffic(
            total_single_instance_streams=num_flows,
            pps=self.flow_gen_rate,
            start_sport=1000,
            cfg_profile='ContinuousSportRange',
            tx_vm_fixture=self.src_vm_fixture,
            rx_vm_fixture=self.dst_vm_fixture,
            stream_proto=self.proto)

        msg1 = "Status of start traffic : %s, %s, %s" % (
            self.proto, self.src_vm_fixture.vm_ip, startStatus['status'])
        self.logger.info(msg1)
        assert startStatus['status'], msg1
        # 2. Poll live traffic & verify VM flow count
        flow_test_utils.verify_node_flow_setup(self)
        # 3. Stop Traffic
        self.logger.info("Proceed to stop traffic..")
        self.traffic_obj.stopTraffic(wait_for_stop=False)
        start_time = time.time()
        # 4. Verify flow ageing
        self.logger.info(
            "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
        sleep(self.flow_cache_timeout)
        while True:
            begin_flow_count = self.comp_node_fixt[
                self.cmp_node].get_vrouter_matching_flow_count(
                self.flow_data)
            self.logger.debug('begin_flow_count: %s' % (begin_flow_count))
            if begin_flow_count['all'] == 0:
                break
            flow_teardown_time = math.ceil(
                flow_test_utils.get_max_flow_removal_time(
                    begin_flow_count['all'],
                    self.flow_cache_timeout))
            # flow_teardown_time is not the actual time to remove flows
            # Based on flow_count at this time, teardown_time is calculated to the value
            # which will vary with agent's poll, which is done at regular
            # intervals..
            self.logger.info('Sleeping for %s secs' % (flow_teardown_time))
            sleep(flow_teardown_time)
            # at the end of wait, actual_flows should be atleast < 50% of total
            # flows before start of teardown
            current_flow_count = self.comp_node_fixt[
                self.cmp_node].get_vrouter_matching_flow_count(
                self.flow_data)
            self.logger.debug('current_flow_count: %s' % (current_flow_count))
            if current_flow_count['all'] > (0.5 * begin_flow_count['all']):
                msg = [
                    'Flow removal not happening as expected in node %s' %
                    self.cmp_node]
                msg.append(
                    'Flow count before wait: %s, after wait of %s secs, its: %s' %
                    (begin_flow_count['all'],
                     flow_teardown_time,
                     current_flow_count['all']))
                assert False, msg
            if current_flow_count['all'] < (0.1 * begin_flow_count['all']):
                break
        # end of while loop
        elapsed_time = time.time() - start_time
        self.logger.info(
            "Flows aged out as expected in configured flow_cache_timeout")
        return True
Beispiel #4
0
    def test_max_vm_flows(self):
        ''' Test to validate setting up of the max_vm_flows parameter in agent
            config file has expected effect on the flows in the system.
            1. Set VM flow cache time and max_vm_flows to 0.01% of max system
               flows(512K).
            2. Create 2 VN's and connect them using a policy.
            3. Launch 2 VM's in the respective VN's.
            4. Start traffic with around 20000 flows.
            6. Restart vrouter agent service and check the flows are limited
               0.01% of max system flows.
        Pass criteria: Step 6 should pass
        '''
        result = True

        # Set VM flow cache time to 30 and max_vm_flows to 0.1% of max system
        # flows(512K).
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 10
        self.max_system_flows = 0
        self.max_vm_flows = 0.01
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()
            if self.max_system_flows < self.comp_node_fixt[
                    cmp_node].max_system_flows:
                self.max_system_flows = self.comp_node_fixt[
                    cmp_node].max_system_flows
        self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
                        self.inputs.compute_ips, self.comp_node_fixt)

        # Define resources for this test.
        vn1_name = get_random_name('VN1')
        vn1_subnets = ['10.1.1.0/24']
        vn2_name = get_random_name('VN2')
        vn2_subnets = ['10.2.1.0/24']
        vn1_vm1_name = get_random_name('VM1')
        vn2_vm2_name = get_random_name('VM2')
        policy1_name = 'policy1'
        policy2_name = 'policy2'
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn2_name,
            },
        ]
        rev_rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn2_name,
                'dest_network': vn1_name,
            },
        ]

        # Create 2 VN's and connect them using a policy.
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
        assert vn2_fixture.verify_on_setup()

        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rev_rules,
                          inputs=self.inputs,
                          connections=self.connections))

        vn1_fixture.bind_policies([policy1_fixture.policy_fq_name],
                                  vn1_fixture.vn_id)
        self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id,
                        [policy1_fixture.policy_fq_name])
        vn2_fixture.bind_policies([policy2_fixture.policy_fq_name],
                                  vn2_fixture.vn_id)
        self.addCleanup(vn2_fixture.unbind_policies, vn2_fixture.vn_id,
                        [policy2_fixture.policy_fq_name])

        # Launch 2 VM's in the respective VN's.
        vm1_fixture = self.create_vm(vn1_fixture,
                                     vm_name=vn1_vm1_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic')
        vm2_fixture = self.create_vm(vn2_fixture,
                                     vm_name=vn2_vm2_name,
                                     flavor='contrail_flavor_small',
                                     image_name='ubuntu-traffic')
        assert vm1_fixture.verify_on_setup(), 'VM1 verifications FAILED'
        assert vm2_fixture.verify_on_setup(), 'VM2 verifications FAILED'
        assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
        assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
        assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), \
            'Ping from VM1 to VM2 FAILED'

        # Set num_flows to fixed, smaller value but > 1% of
        # system max flows
        max_system_flows = self.max_system_flows
        vm_flow_limit = int((self.max_vm_flows / 100.0) * max_system_flows)
        num_flows = vm_flow_limit + 30
        generated_flows = 2 * num_flows
        flow_gen_rate = 5
        proto = 'udp'

        # Start Traffic.
        self.traffic_obj = self.useFixture(
            traffic_tests.trafficTestFixture(self.connections))
        startStatus = self.traffic_obj.startTraffic(
            total_single_instance_streams=int(num_flows),
            pps=flow_gen_rate,
            start_sport=5000,
            cfg_profile='ContinuousSportRange',
            tx_vm_fixture=vm1_fixture,
            rx_vm_fixture=vm2_fixture,
            stream_proto=proto)
        msg1 = "Status of start traffic : %s, %s, %s" % (
            proto, vm1_fixture.vm_ip, startStatus['status'])
        self.logger.info(msg1)
        assert startStatus['status'], msg1
        self.logger.info("Wait for 3 sec for flows to be setup.")
        sleep(3)

        # 4. Poll live traffic & verify VM flow count
        flow_cmd = 'flow -l | grep %s -A2 |' % vm1_fixture.vm_ip
        flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l'
        sample_time = 2
        vm_flow_list = []
        for i in range(5):
            sleep(sample_time)
            vm_flow_record = self.inputs.run_cmd_on_server(
                vm1_fixture.vm_node_ip, flow_cmd,
                self.inputs.host_data[vm1_fixture.vm_node_ip]['username'],
                self.inputs.host_data[vm1_fixture.vm_node_ip]['password'])
            vm_flow_record = vm_flow_record.strip()
            vm_flow_list.append(int(vm_flow_record))
            self.logger.info("%s iteration DONE." % i)
            self.logger.info("VM flow count = %s." % vm_flow_list[i])
            self.logger.info("Sleeping for %s sec before next iteration." %
                             sample_time)

        vm_flow_list.sort(reverse=True)
        if vm_flow_list[0] > int(1.1 * vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is greater than configured.")
            result = False
        elif vm_flow_list[0] < int(0.9 * vm_flow_limit):
            self.logger.error("TEST FAILED.")
            self.logger.error("VM flow count seen is much lower than config.")
            self.logger.error(
                "Something is stopping flow creation. Please debug")
            result = False
        else:
            self.logger.info("TEST PASSED")
            self.logger.info("Expected range of vm flows seen.")
            self.logger.info("Max VM flows = %s" % vm_flow_list[0])

        # Stop Traffic.
        self.logger.info("Proceed to stop traffic..")
        try:
            self.traffic_obj.stopTraffic(wait_for_stop=False)
        except:
            self.logger.warn("Failed to get a VM handle and stop traffic.")

        self.logger.info("Wait for the flows to get purged.")
        sleep(self.flow_cache_timeout)

        return result
    def test_agent_flow_settings(self):
        """Basic systest with single project with many features & traffic..
        """
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        # import mini_flow_test_topo
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        topology_class_name = flow_test_topo.systest_topo_single_project
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            config_topo = out['data'][1]
        self.proj = list(config_topo.keys())[0]
        self.topo, self.config_topo = topo, config_topo

        # 2. set agent flow_cache_timeout to 60s
        # set max_vm_flows to 1% of 500k, comes to 5000
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 60
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()

        # 3. Start Traffic
        for profile, details in self.topo.traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %(profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = self.proj
            self.dst_proj = self.proj
            # Set num_flows to fixed, smaller value but > 1% of
            # system max flows
            num_flows = 5555
            self.generated_flows = 2*num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            # 3a. Set max_vm_flows to 1% in TX VM node
            self.max_vm_flows = 1
            self.comp_node_fixt[
                self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            flow_test_utils.update_vm_mdata_ip(self.cmp_node, self)
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 4. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 5. Increase max_vm_flows to 50% in TX VM node
            self.max_vm_flows = 50
            self.comp_node_fixt[
                self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            # 6. Poll live traffic
            self.verify_node_flow_setup()
            # 7. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 8. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
            sleep(self.flow_cache_timeout)
            retries = 0
            retry_wait_time = 10
            flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(self.generated_flows, self.flow_cache_timeout))
            self.logger.debug("flow tear down time based on calcualtion: %s" %flow_teardown_time)
            max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time)
            while retries < max_retries:
                actual_flows = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                actual_flows = int(actual_flows['all'])
                if actual_flows > 10:
                    self.logger.info("Waiting for flows to age out")
                    sleep(retry_wait_time)
                    retries += 1
                else:
                    break
            elapsed_time = time.time() - start_time
            if actual_flows > 50:
                msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % (
                    elapsed_time, self.cmp_node, actual_flows)
                assert False, msg
            else:
                self.logger.info(
                    "Flows aged out as expected in configured flow_cache_timeout")
                self.logger.info(
                    "elapsed_time after stopping traffic is %s, flow_count is %s" %
                    (elapsed_time, actual_flows))
    def test_traffic_with_control_node_switchover(self):
        ''' Stop the control node and check peering with agent fallback to other control node.

        '''
        if len(set(self.inputs.bgp_ips)) < 2:
            raise self.skipTest(
                "Skipping Test. At least 2 control node required to run the test")
        result = True
        fip_pool_name = get_random_name('some-pool1')

        (self.vn1_name, self.vn1_subnets) = (
            get_random_name("vn1"), ["11.1.1.0/24"])
        (self.vn2_name, self.vn2_subnets) = (
            get_random_name("vn2"), ["22.1.1.0/24"])
        (self.fvn_public_name, self.fvn_public_subnets) = (
            get_random_name("fip_vn_public"), ['10.204.219.16/28'])
        (self.fvn1_name, self.fvn1_subnets) = (
            get_random_name("fip_vn1"), ['100.1.1.0/24'])
        (self.fvn2_name, self.fvn2_subnets) = (
            get_random_name("fip_vn2"), ['200.1.1.0/24'])
        (self.fvn3_name, self.fvn3_subnets) = (
            get_random_name("fip_vn3"), ['170.1.1.0/29'])
        (self.vn1_vm1_name, self.vn1_vm2_name) = (
            get_random_name('vn1_vm1'), get_random_name('vn1_vm2'))
        (self.vn2_vm1_name, self.vn2_vm2_name) = (
            get_random_name('vn2_vm1'), get_random_name('vn2_vm2'))
        (self.fvn_public_vm1_name) = (get_random_name('fvn_public_vm1'))
        (self.fvn1_vm1_name) = (get_random_name('fvn1_vm1'))
        (self.fvn2_vm1_name) = (get_random_name('fvn2_vm1'))
        (self.fvn3_vm1_name) = (get_random_name('fvn3_vm1'))
        (self.vn1_vm1_traffic_name) = get_random_name('VN1_VM1_traffic')
        (self.fvn1_vm1_traffic_name) = get_random_name('FVN1_VM1_traffic')
        fip_pool_name1 = get_random_name('some-pool1')
        fip_pool_name2 = get_random_name('some-pool2')

        # Get all compute host
        host_list = self.connections.nova_h.get_hosts()
        compute_1 = host_list[0]
        compute_2 = host_list[0]
        if len(host_list) > 1:
            compute_1 = host_list[0]
            compute_2 = host_list[1]
        self.fvn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                inputs=self.inputs,
                vn_name=self.fvn1_name,
                subnets=self.fvn1_subnets))
        self.vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                inputs=self.inputs,
                vn_name=self.vn1_name,
                subnets=self.vn1_subnets))

        self.fvn1_vm1_traffic_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=self.fvn1_fixture.obj,
                flavor='contrail_flavor_small',
                image_name='ubuntu-traffic',
                vm_name=self.fvn1_vm1_traffic_name,
                node_name=compute_2))
        self.vn1_vm1_traffic_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=self.vn1_fixture.obj,
                flavor='contrail_flavor_small',
                image_name='ubuntu-traffic',
                vm_name=self.vn1_vm1_traffic_name,
                node_name=compute_1))

        fvn1_fixture = self.fvn1_fixture
        fvn1_vm1_traffic_fixture = self.fvn1_vm1_traffic_fixture
        fvn1_subnets = self.fvn1_subnets
        fvn1_vm1_traffic_name = self.fvn1_vm1_traffic_name
        vn1_fixture = self.vn1_fixture
        vn1_vm1_traffic_fixture = self.vn1_vm1_traffic_fixture
        vn1_subnets = self.vn1_subnets
        vn1_vm1_traffic_name = self.vn1_vm1_traffic_name

        assert fvn1_fixture.verify_on_setup()
        assert vn1_fixture.verify_on_setup()
        assert fvn1_vm1_traffic_fixture.verify_on_setup()
        assert vn1_vm1_traffic_fixture.verify_on_setup()

        fip_fixture1 = self.useFixture(
            FloatingIPFixture(
                project_name=self.inputs.project_name,
                inputs=self.inputs,
                connections=self.connections,
                pool_name=fip_pool_name1,
                vn_id=fvn1_fixture.vn_id))
        assert fip_fixture1.verify_on_setup()

        fip_id1 = fip_fixture1.create_and_assoc_fip(
            fvn1_fixture.vn_id, vn1_vm1_traffic_fixture.vm_id)
        self.addCleanup(fip_fixture1.disassoc_and_delete_fip, fip_id1)
        assert fip_fixture1.verify_fip(
            fip_id1, vn1_vm1_traffic_fixture, fvn1_fixture)
        if not vn1_vm1_traffic_fixture.ping_with_certainty(
                fvn1_vm1_traffic_fixture.vm_ip):
            result = result and False

        # Figuring the active control node
        active_controller = None
        inspect_h = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
        for entry in agent_xmpp_status:
            if entry['cfg_controller'] == 'Yes':
                active_controller = entry['controller_ip']
        self.logger.info(
            'Active control node from the Agent %s is %s' %
            (vn1_vm1_traffic_fixture.vm_node_ip, active_controller))

        fvn1_vm1_traffic_fixture.wait_till_vm_is_up()
        vn1_vm1_traffic_fixture.wait_till_vm_is_up()
        # Install traffic pkg in VM
        vn1_vm1_traffic_fixture.install_pkg("Traffic")
        fvn1_vm1_traffic_fixture.install_pkg("Traffic")
        # Start Traffic
        traffic_obj = {}
        startStatus = {}
        stopStatus = {}
        traffic_proto_l = ['icmp']
        total_streams = {}
        total_streams[
            'icmp'] = 1
        dpi = 9100
        proto = 'icmp'
        for proto in traffic_proto_l:
            traffic_obj[proto] = {}
            startStatus[proto] = {}
            traffic_obj[proto] = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            startStatus[proto] = traffic_obj[proto].startTraffic(
                num_streams=total_streams[proto],
                start_port=dpi,
                tx_vm_fixture=vn1_vm1_traffic_fixture,
                rx_vm_fixture=fvn1_vm1_traffic_fixture,
                stream_proto=proto)
            self.logger.info(
                "Status of start traffic : %s, %s, %s" %
                (proto, vn1_vm1_traffic_fixture.vm_ip, startStatus[proto]))
            if startStatus[proto]['status'] != True:
                result = False
        self.logger.info("-" * 80)

        # Poll live traffic
        traffic_stats = {}
        self.logger.info("Poll live traffic and get status..")
        for proto in traffic_proto_l:
            traffic_stats = traffic_obj[proto].getLiveTrafficStats()
            err_msg = "Traffic disruption is seen: details: "
        #self.assertEqual(traffic_stats['status'], True, err_msg)
        assert(traffic_stats['status']), err_msg
        self.logger.info("-" * 80)

        # Stop on Active node
        self.logger.info('Stoping the Control service in  %s' %
                         (active_controller))
        self.inputs.stop_service('contrail-control', [active_controller],
                                 container='control')
        sleep(5)

        # Check the control node shifted to other control node
        new_active_controller = None
        new_active_controller_state = None
        inspect_h = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
        for entry in agent_xmpp_status:
            if entry['cfg_controller'] == 'Yes':
                new_active_controller = entry['controller_ip']
                new_active_controller_state = entry['state']

        self.logger.info(
            'Active control node from the Agent %s is %s' %
            (vn1_vm1_traffic_fixture.vm_node_ip, new_active_controller))
        if new_active_controller == active_controller:
            self.logger.error(
                'Control node switchover fail. Old Active controlnode was %s and new active control node is %s' %
                (active_controller, new_active_controller))
            result = False
        if new_active_controller_state != 'Established':
            self.logger.error(
                'Agent does not have Established XMPP connection with Active control node')
            result = result and False

        # Verify Flow records here
        inspect_h1 = self.agent_inspect[vn1_vm1_traffic_fixture.vm_node_ip]
        inspect_h2 = self.agent_inspect[fvn1_vm1_traffic_fixture.vm_node_ip]
        flow_rec1 = None
        udp_src = unicode(8000)
        dpi = unicode(dpi)

        # Verify Ingress Traffic
        self.logger.info('Verifying Ingress Flow Record')
        vn_fq_name=vn1_vm1_traffic_fixture.vn_fq_name
        compute_node_fixture = self.useFixture(ComputeNodeFixture(
                self.connections, vn1_vm1_traffic_fixture.vm_node_ip))
        fwd_flow, rev_flow = compute_node_fixture.get_flow_entry(
                                    source_ip = vn1_vm1_traffic_fixture.vm_ip,
                                    dest_ip = fvn1_vm1_traffic_fixture.vm_ip,
                                    dest_port = '0',
                                    proto = '1')
        if fwd_flow: 
            sport = fwd_flow.source_port 
            flow_rec1 = inspect_h1.get_vna_fetchflowrecord(
                nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'],
                sip=vn1_vm1_traffic_fixture.vm_ip,
                dip=fvn1_vm1_traffic_fixture.vm_ip,
                sport=sport,
                dport='0',
                protocol='1')
	else:
            flow_rec1 = None
        if flow_rec1 is not None:
            self.logger.info('Verifying NAT in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec1, 'nat', 'enabled')
            if match is False:
                self.logger.error(
                    'Test Failed. NAT is not enabled in given flow. Flow details %s' %
                    (flow_rec1))
                result = result and False
            self.logger.info('Verifying traffic direction in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec1, 'direction', 'ingress')
            if match is False:
                self.logger.error(
                    'Test Failed. Traffic direction is wrong should be ingress. Flow details %s' %
                    (flow_rec1))
                result = result and False
        else:
            self.logger.error(
                'Test Failed. Required ingress Traffic flow not found')
            result = result and False

        # Verify Egress Traffic
        # Check VMs are in same agent or not. Need to compute source vrf
        # accordingly
        self.logger.info('Verifying Egress Flow Records')
        if fwd_flow:
            flow_rec2 = inspect_h1.get_vna_fetchflowrecord(
                nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'],
                sip=fvn1_vm1_traffic_fixture.vm_ip,
                dip=fip_fixture1.fip[fip_id1],
                sport=sport,
                dport='0',
                protocol='1')
        else:
            flow_rec2 = False
        if flow_rec2 is not None:
            self.logger.info('Verifying NAT in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec2, 'nat', 'enabled')
            if match is False:
                self.logger.error(
                    'Test Failed. NAT is not enabled in given flow. Flow details %s' %
                    (flow_rec2))
                result = result and False
            self.logger.info('Verifying traffic direction in flow records')
            match = inspect_h1.match_item_in_flowrecord(
                flow_rec2, 'direction', 'egress')
            if match is False:
                self.logger.error(
                    'Test Failed. Traffic direction is wrong should be Egress. Flow details %s' %
                    (flow_rec1))
                result = result and False
        else:
            self.logger.error(
                'Test Failed. Required Egress Traffic flow not found')
            result = result and False

        # Stop Traffic
        self.logger.info("Proceed to stop traffic..")
        self.logger.info("-" * 80)
        for proto in traffic_proto_l:
            stopStatus[proto] = {}
            stopStatus[proto] = traffic_obj[proto].stopTraffic()
            #if stopStatus[proto] != []: msg.append(stopStatus[proto]); result= False
            if stopStatus[proto] != []:
                result = False
            self.logger.info("Status of stop traffic for proto %s is %s" %
                             (proto, stopStatus[proto]))
        self.logger.info("-" * 80)

        # Start the control node service again
        self.logger.info('Starting the Control service in  %s' %
                         (active_controller))
        self.inputs.start_service('contrail-control', [active_controller],
                                  container='control')

        sleep(10)
        # Check the BGP peering status from the currently active control node
        self.logger.info(
            'Checking the BGP peering from new active controler  %s' %
            (new_active_controller))
        cn_bgp_entry = self.cn_inspect[
            new_active_controller].get_cn_bgp_neigh_entry()
        sleep(5)
        for entry in cn_bgp_entry:
            if entry['state'] != 'Established':
                result = result and False
                self.logger.error(
                    'With Peer %s peering is not Established. Current State %s ' %
                    (entry['peer'], entry['state']))

        # fip_fixture1.disassoc_and_delete_fip(fip_id1)
        if not result:
            self.logger.error('Switchover of control node failed')
            assert result
        return True
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %(profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2*num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                self.logger.debug('begin_flow_count: %s' %(begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
                # which will vary with agent's poll, which is done at regular intervals..
                self.logger.info('Sleeping for %s secs' %(flow_teardown_time))
                sleep(flow_teardown_time)
                # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown
                current_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                self.logger.debug('current_flow_count: %s' %(current_flow_count))
                if current_flow_count['all'] > (0.5*begin_flow_count['all']):
                    msg = ['Flow removal not happening as expected in node %s' %self.cmp_node]
                    msg.append('Flow count before wait: %s, after wait of %s secs, its: %s' %
                        (begin_flow_count['all'], flow_teardown_time, current_flow_count['all']))
                    assert False, msg
                if current_flow_count['all'] < (0.1*begin_flow_count['all']):
                    break
            # end of while loop
            elapsed_time = time.time() - start_time
            self.logger.info(
                "Flows aged out as expected in configured flow_cache_timeout")
        # end of profile for loop
        return True
    def test_agent_flow_settings(self):
        """Basic systest with single project with many features & traffic..
        """
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        # import mini_flow_test_topo
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        topology_class_name = flow_test_topo.systest_topo_single_project
        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            config_topo = out['data'][1]
        self.proj = list(config_topo.keys())[0]
        self.topo, self.config_topo = topo, config_topo

        # 2. set agent flow_cache_timeout to 60s
        # set max_vm_flows to 1% of 500k, comes to 5000
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 60
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()

        # 3. Start Traffic
        for profile, details in self.topo.traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = self.proj
            self.dst_proj = self.proj
            # Set num_flows to fixed, smaller value but > 1% of
            # system max flows
            num_flows = 5555
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            # 3a. Set max_vm_flows to 1% in TX VM node
            self.max_vm_flows = 1
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            flow_test_utils.update_vm_mdata_ip(self.cmp_node, self)
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 4. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 5. Increase max_vm_flows to 50% in TX VM node
            self.max_vm_flows = 50
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            # 6. Poll live traffic
            self.verify_node_flow_setup()
            # 7. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 8. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            retries = 0
            retry_wait_time = 10
            flow_teardown_time = math.ceil(
                flow_test_utils.get_max_flow_removal_time(
                    self.generated_flows, self.flow_cache_timeout))
            self.logger.debug("flow tear down time based on calcualtion: %s" %
                              flow_teardown_time)
            max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time)
            while retries < max_retries:
                actual_flows = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                actual_flows = int(actual_flows['all'])
                if actual_flows > 10:
                    self.logger.info("Waiting for flows to age out")
                    sleep(retry_wait_time)
                    retries += 1
                else:
                    break
            elapsed_time = time.time() - start_time
            if actual_flows > 50:
                msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % (
                    elapsed_time, self.cmp_node, actual_flows)
                assert False, msg
            else:
                self.logger.info(
                    "Flows aged out as expected in configured flow_cache_timeout"
                )
                self.logger.info(
                    "elapsed_time after stopping traffic is %s, flow_count is %s"
                    % (elapsed_time, actual_flows))
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[
                self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('begin_flow_count: %s' % (begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(
                    flow_test_utils.get_max_flow_removal_time(
                        begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
                # which will vary with agent's poll, which is done at regular intervals..
                self.logger.info('Sleeping for %s secs' % (flow_teardown_time))
                sleep(flow_teardown_time)
                # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown
                current_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('current_flow_count: %s' %
                                  (current_flow_count))
                if current_flow_count['all'] > (0.5 * begin_flow_count['all']):
                    msg = [
                        'Flow removal not happening as expected in node %s' %
                        self.cmp_node
                    ]
                    msg.append(
                        'Flow count before wait: %s, after wait of %s secs, its: %s'
                        % (begin_flow_count['all'], flow_teardown_time,
                           current_flow_count['all']))
                    assert False, msg
                if current_flow_count['all'] < (0.1 * begin_flow_count['all']):
                    break
            # end of while loop
            elapsed_time = time.time() - start_time
            self.logger.info(
                "Flows aged out as expected in configured flow_cache_timeout")
        # end of profile for loop
        return True
Beispiel #10
0
    def test_traffic_across_projects(self):
        """Traffic test with policy applied across multiple projects"""
        result = True
        topology_class_name = None

        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True

        #
        # Get config for test from topology
        import sdn_policy_topo_with_multi_project
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = sdn_policy_topo_with_multi_project.sdn_basic_policy_topo_with_fip

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo_obj = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo = {}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo_obj))
        out = setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result'] == True:
            topo_objs, config_topo, vm_fip_info = out['data']

        p_lst = topo_obj.project_list  # projects
        p1vm1 = topo_objs[p_lst[0]].vmc_list[0]  # 'vmc1'
        p2vm2 = topo_objs[p_lst[1]].vmc_list[0]  # 'vmc2'
        p3vm3 = topo_objs[p_lst[2]].vmc_list[0]  # 'vmc3'
        adminvm = topo_objs[p_lst[3]].vmc_list[0]  # 'vmc-admin'

        result = True
        msg = []
        traffic_obj = {}
        startStatus = {}
        stopStatus = {}
        traffic_proto_l = ['tcp', 'icmp']
        total_streams = {}
        total_streams['icmp'] = 1
        total_streams['udp'] = 1
        total_streams['tcp'] = 1
        dpi = 9100
        expectedResult = {}
        for proto in traffic_proto_l:
            expectedResult[proto] = True
            traffic_obj[proto] = {}
            startStatus[proto] = {}
            if proto == 'icmp':
                tx_vm_fixt = config_topo[p_lst[0]]['vm'][
                    p1vm1]
                rx_vm_fixt = config_topo[p_lst[1]]['vm'][p2vm2]
            else:
                tx_vm_fixt = config_topo[p_lst[0]]['vm'][
                    p1vm1]
                rx_vm_fixt = config_topo[p_lst[2]]['vm'][p3vm3]

            traffic_obj[proto] = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (self, name, num_streams= 1, start_port= 9100, tx_vm_fixture= None, rx_vm_fixture= None, stream_proto= 'udp', \
            # packet_size= 100, start_sport= 8000,
            # total_single_instance_streams= 20):
            if vm_fip_info[0]:
                startStatus[proto] = traffic_obj[proto].startTraffic(
                    num_streams=total_streams[proto], start_port=dpi,
                    tx_vm_fixture=tx_vm_fixt, rx_vm_fixture=rx_vm_fixt, stream_proto=proto, vm_fip_info=vm_fip_info[1])
            else:
                startStatus[proto] = traffic_obj[proto].startTraffic(
                    num_streams=total_streams[proto], start_port=dpi,
                    tx_vm_fixture=tx_vm_fixt, rx_vm_fixture=rx_vm_fixt, stream_proto=proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                proto, tx_vm_fixt.vm_ip, startStatus[proto]['status'])
            if startStatus[proto]['status'] == False:
                self.logger.error(msg1)
                msg.extend(
                    [msg1, 'More info on failure: ', startStatus[proto]['msg']])
            else:
                self.logger.info(msg1)
            self.assertEqual(startStatus[proto]['status'], True, msg)
        self.logger.info("-" * 80)
        # Poll live traffic
        traffic_stats = {}
        err_msg = []
        self.logger.info("Poll live traffic and get status..")
        for proto in traffic_proto_l:
            self.logger.info("Poll live traffic %s and get status.." %
                             (proto))
            traffic_stats = traffic_obj[proto].getLiveTrafficStats()
            if not traffic_stats['status']:
                err_msg.extend(
                    ["Traffic disruption is seen:", traffic_stats['msg']])
        self.assertEqual(traffic_stats['status'],
                         expectedResult[proto], err_msg)
        self.logger.info("-" * 80)

        # Stop Traffic
        self.logger.info("Proceed to stop traffic..")
        self.logger.info("-" * 80)
        for proto in traffic_proto_l:
            stopStatus[proto] = traffic_obj[proto].stopTraffic()
            status = True if stopStatus[proto] == [] else False
            if status != expectedResult[proto]:
                msg.append(stopStatus[proto])
                result = False
            self.logger.info("Status of stop traffic for proto %s is %s" %
                             (proto, stopStatus[proto]))
        self.logger.info("-" * 80)
        self.assertEqual(result, True, msg)

        result = True
        msg = []
        dst_vm = p3vm3  # 'vmc3'
        dst_vm_fixture = config_topo[p_lst[2]]['vm'][p3vm3]
        dst_vm_ip = dst_vm_fixture.vm_ip
        src_vm = p1vm1  # 'vmc1'
        src_vm_fixture = config_topo[p_lst[0]]['vm'][p1vm1]
        self.logger.info(
            "With proto tcp allowed between %s and %s, trying to send icmp traffic" % (p1vm1, p3vm3))
        expectedResult = False
        self.logger.info(
            "Verify ping to vm %s from vm %s, expecting it to fail" %
            (dst_vm, src_vm))
        ret = src_vm_fixture.ping_with_certainty(
            dst_vm_ip, expectation=expectedResult)
        result_msg = "vm ping test result to vm %s is: %s" % (dst_vm, ret)
        self.logger.info(result_msg)
        if ret != True:
            result = False
            msg.extend(
                ["icmp traffic passed with deny rule:", result_msg])
        self.assertEqual(result, True, msg)

        result = True
        msg = []
        expectedResult = True
        dst_vm = p2vm2  # 'vmc2'
        dst_vm_fixture = config_topo[p_lst[1]]['vm'][p2vm2]
        dst_vm_ip = dst_vm_fixture.vm_ip
        src_vm_fixture = config_topo[p_lst[3]]['vm'][adminvm]  # 'vmc-admin'
        self.logger.info(
            "Now will test ICMP traffic between admin VM %s and non-default project VM %s" %
            (adminvm, p2vm2))
        ret = src_vm_fixture.ping_with_certainty(
            dst_vm_ip, expectation=expectedResult)
        result_msg = "vm ping test result to vm %s is: %s" % (dst_vm, ret)
        self.logger.info(result_msg)
        if ret != True:
            result = False
            msg.extend(
                ["ICMP traffic failed between default and non-default project with policy:", result_msg])
        self.assertEqual(result, True, msg)
        return True
Beispiel #11
0
    def policy_test_with_scaled_udp_flows(self, topo, num_udp_streams=100, pps=100, wait_time_after_start_traffic=300, vms_on_single_compute=False, setup_only=False):
        """Pick 2n VM's for testing, have rules affecting udp protocol..
        pick 2 VN's, source and destination. each VM in src will send to a unique VM in dest.
        Generate traffic streams matching policy rules - udp for now..
        Check for system stability with flow scaling and traffic behavior as expected.
        """
        result = True
        msg = []
        #
        # Test setup: Configure policy, VN, & VM
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        if vms_on_single_compute:
            out = setup_obj.topo_setup(vms_on_single_compute=True)
        else:
            out = setup_obj.topo_setup()
        #out= setup_obj.topo_setup(vm_verify='yes', skip_cleanup='yes')
        self.logger.info("Setup completed with result %s" % (out['result']))
        self.assertEqual(out['result'], True, out['msg'])
        if out['result'] == True:
            topo, config_topo = out['data']
        # Setup/Verify Traffic ---
        # 1. Define Traffic Params
        # This will be source_vn for traffic test
        test_vn = topo.vnet_list[0]
        dest_vn = topo.vnet_list[1]
        topo_helper_obj = topology_helper(topo)
        vms_in_vn = topo_helper_obj.get_vm_of_vn()
        src_vms = vms_in_vn[test_vn]
        dest_vms = vms_in_vn[dest_vn]
        self.logger.info("----" * 20)
        self.logger.info("num_udp_streams: %s, pps: %s, src_vms: %s" %
                         (num_udp_streams, pps, len(src_vms)))
        self.logger.info("----" * 20)
        # using default protocol udp, traffic_proto_l= ['udp']
        total_single_instance_streams = num_udp_streams
        # 2. set expectation to verify..
        matching_rule_action = {}
        # Assumption made here: one policy assigned to test_vn
        policy = topo.vn_policy[test_vn][0]
        policy_info = "policy in effect is : " + str(topo.rules[policy])
        num_rules = len(topo.rules[policy])
        # Assumption made here: one rule for each dest_vn
        for i in range(num_rules):
            dvn = topo.rules[policy][i]['dest_network']
            matching_rule_action[dvn] = topo.rules[policy][i]['simple_action']
        if num_rules == 0:
            matching_rule_action[dvn] = 'deny'
        self.logger.info("matching_rule_action: %s" % matching_rule_action)
        # 3. Start Traffic
        traffic_obj = {}
        startStatus = {
        }
        stopStatus = {}
        expectedResult = {}
        for i in range(len(src_vms)):
            test_vm1 = src_vms[i]
            test_vm2 = dest_vms[i]
            test_vm1_fixture = config_topo['vm'][test_vm1]
            test_vm2_fixture = config_topo['vm'][test_vm2]
            expectedResult[i] = True if matching_rule_action[
                dest_vn] == 'pass' else False
            startStatus[i] = {}
            traffic_obj[i] = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (self, name=name, num_streams= 1, start_port= 9100, tx_vm_fixture, rx_vm_fixture, \
            # stream_proto, packet_size= 100, start_sport= 8000,
            # total_single_instance_streams= 20)
            startStatus[i] = traffic_obj[i].startTraffic(
                tx_vm_fixture=test_vm1_fixture,
                rx_vm_fixture=test_vm2_fixture, total_single_instance_streams=total_single_instance_streams,
                cfg_profile='ContinuousSportRange', pps=pps)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                i, test_vm1_fixture.vm_ip, startStatus[i]['status'])
            if startStatus[i]['status'] == False:
                self.logger.error(msg1)
                msg.extend(
                    [msg1, 'More info on failure: ', startStatus[i]['msg']])
            else:
                self.logger.info(msg1)
            self.assertEqual(startStatus[i]['status'], True, msg)
        self.logger.info("-" * 80)
        sessions = self.tcpdump_on_analyzer(topo.si_list[0])
        for svm_name, (session, pcap) in sessions.items():
            self.verify_mirror(svm_name, session, pcap)

        if setup_only:
            self.logger.info("Test called with setup only..")
            return True
        else:
            # Should be more than 3 mins, aging time, for flows to peak and
            # stabilise
            time.sleep(wait_time_after_start_traffic)
        # 4. Stop Traffic & validate received packets..
        self.logger.info("Proceed to stop traffic..")
        self.logger.info("-" * 80)
        for i in range(len(src_vms)):
            stopStatus[i] = traffic_obj[i].stopTraffic(loose='yes')
            status = True if stopStatus[i] == [] else False
            if status != expectedResult[i]:
                msg.append(stopStatus[i])
            self.logger.info("Status of stop traffic for instance %s is %s" %
                             (i, stopStatus[i]))
        if msg != []:
            result = False
        self.assertEqual(result, True, msg)
        self.logger.info("-" * 80)
        # 5. verify kernel flows after stopping traffic.. we dont expect any
        # stuck HOLD flows.
        for compNode in self.inputs.compute_ips:
            retry = 0
            while retry < 5:
                kflows = self.agent_inspect[compNode].get_vna_kflowresp()
                wait_time = 60 if len(kflows) > 1000 else 30
                if len(kflows) == 0:
                    break
                else:
                    self.logger.info(
                        "Waiting for Kernel flows to drain in Compute %s, attempt %s, num_flows %s" %
                        (compNode, retry, len(kflows)))
                    time.sleep(wait_time)
                    retry += 1
        for compNode in self.inputs.compute_ips:
            out = self.check_exception_flows_in_kernel(compNode)
            self.logger.info("out is: %s" % out)
            self.assertEqual(out['status'], True, out['msg'])
        # end checking flows
        return result