Exemplo n.º 1
0
 def test_repeated_policy_modify(self):
     """ Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify.
     """
     ###
     # Get config for test from topology
     # very simple topo will do, one vn, one vm, multiple policies with n
     # rules
     topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config
     self.logger.info("Scenario for the test used is: %s" %
                      (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(project=self.project.project_name,
                                    username=self.project.username,
                                    password=self.project.password)
     except NameError:
         topo = topology_class_name()
     ###
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(
         self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     ###
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo['policy'][policy_name],
                              topo, 'setup')
     ###
     # Test procedure:
     # Test repeated update of a policy attached to a VM
     test_vm = topo.vmc_list[0]
     test_vn = topo.vn_of_vm[test_vm]
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     for policy in topo.policy_list:
         # set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo['policy'][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for %s updated to %s" % (test_vn, policy)
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" %
                          (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         system_vna_verify_policy(self, config_topo['policy'][policy],
                                  updated_topo, state)
     return True
    def test_flow_single_project(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's within a single project"""
        result = True
        #self.agent_objs = {}
        #self.set_flow_tear_time()
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = flow_test_topo.systest_topo_single_project
        # mini topo for testing script
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data'][0], out['data'][1]
        proj = list(topo.keys())[0]

        # Get the vrouter build version for logging purposes.
        BuildTag = get_OS_Release_BuildVersion(self)

        # Create traffic profile with all details like IP addresses, port
        # numbers and no of flows, from the profile defined in the topology.
        traffic_profiles = self.create_traffic_profiles(
            topo[proj],
            config_topo)

        self.topo, self.config_topo = topo, config_topo
        for each_profile in traffic_profiles:
            result = self.generate_udp_flows_and_do_verification(
                traffic_profiles[each_profile], str(BuildTag))
            # verify_system_parameters(self, out)
            self.delete_agent_flows()
            if not result:
                 False

        return True
Exemplo n.º 3
0
 def test_policy(self):
     """ Configure policies based on topology and run policy related verifications.
     """
     result = True
     #
     # Get config for test from topology
     topology_class_name = sdn_basic_topology.sdn_basic_config
     self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(
             project=self.project.project_name,
             username=self.project.project_username,
             password=self.project.project_user_password,
         )
     except NameError:
         topo = topology_class_name()
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out["result"], True, out["msg"])
     if out["result"]:
         topo, config_topo = out["data"]
     #
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
     return True
Exemplo n.º 4
0
def all_policy_verify(
        self,
        config_topo,
        topo,
        state='unspecified',
        fixture_only='no'):
    '''Call all policy related verifications..
    Useful to debug failures... call this on failure..
    Verify & assert on fail'''
    self.logger.debug("Starting Verifications after %s" % (state))
    # calling policy fixture verifications
    for policy_name, policy_fixt in config_topo['policy'].items():
        ret = policy_fixt.verify_on_setup()
        assertEqual(ret['result'], True, ret['msg'])
    # calling vn-policy verification
    for vn_name, vn_fixt in config_topo['vn'].items():
        ret = vn_fixt.verify_vn_policy_in_api_server()
        assertEqual(ret['result'], True, ret['msg'])
    if fixture_only == 'no':
        # This is not a fixture verfication,
        # requires runtime[config_topo] & user-def[topo] topology to be in sync to verify
        # calling vna-acl verification
        # pick any policy configured
        policy_fixt = config_topo['policy'][str(topo.policy_list[0])]
        system_vna_verify_policy(self, policy_fixt, topo, state)
Exemplo n.º 5
0
def all_policy_verify(
        self,
        config_topo,
        topo,
        state='unspecified',
        fixture_only='no'):
    '''Call all policy related verifications..
    Useful to debug failures... call this on failure..
    Verify & assert on fail'''
    self.logger.debug("Starting Verifications after %s" % (state))
    # calling policy fixture verifications
    for policy_name, policy_fixt in list(config_topo['policy'].items()):
        ret = policy_fixt.verify_on_setup()
        assertEqual(ret['result'], True, ret['msg'])
    # calling vn-policy verification
    for vn_name, vn_fixt in list(config_topo['vn'].items()):
        ret = vn_fixt.verify_vn_policy_in_api_server()
        assertEqual(ret['result'], True, ret['msg'])
    if fixture_only == 'no':
        # This is not a fixture verfication,
        # requires runtime[config_topo] & user-def[topo] topology to be in sync to verify
        # calling vna-acl verification
        # pick any policy configured
        policy_fixt = config_topo['policy'][str(topo.policy_list[0])]
        system_vna_verify_policy(self, policy_fixt, topo, state)
Exemplo n.º 6
0
    def test_flow_scaling_interNode_interVN(self):
        """Basic systest with single project with many features & traffic..
        """
        result = False
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = flow_scale_topo.FlowScaleTopology
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        try:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips,
                project=self.project.project_name,
                username=self.project.username,
                password=self.project.password)
        except NameError:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips)

        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map)
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']

        self.traffic_scenarios = self.build_traffic_scenarios(
                                     topo,
                                     config_topo)

        src_vm_obj=self.traffic_scenarios['1to2'][0]
        vn_fq_name=config_topo['vn']['vnet1'].vn_fq_name
        src_vm_intf_id=src_vm_obj.cs_vmi_obj[
            vn_fq_name]['virtual-machine-interface']['uuid']
        src_vm_obj.provision_static_route(
            prefix='111.1.0.0/16',
            virtual_machine_interface_id=src_vm_intf_id)

        result = self.create_scaled_flows()
        self.delete_agent_flows()
        return True
    def test_system_single_project(self):
        """Basic systest with single project with many features & traffic..
        """
        result = True
        #self.agent_objs = {}
        #self.set_flow_tear_time()
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = system_test_topo.systest_topo_single_project
        # For testing script, use mini topology
        # topology_class_name =
        # mini_system_test_topo.systest_topo_single_project
        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data'][0], out['data'][1]
        proj = list(topo.keys())[0]

        # Get the vrouter build version for logging purposes.
        BuildTag = get_OS_Release_BuildVersion(self)

        # Create traffic profile with all details like IP addresses, port
        # numbers and no of flows, from the profile defined in the topology.
        traffic_profiles = self.create_traffic_profiles(
            topo[proj], config_topo)

        self.topo, self.config_topo = topo, config_topo
        for each_profile in traffic_profiles:
            result = self.generate_udp_flows(traffic_profiles[each_profile],
                                             str(BuildTag))
            #verify_system_parameters(self, out)
            self.delete_agent_flows()
            if not result:
                return False

        return True
Exemplo n.º 8
0
def config_topo_single_proj(class_instance,
                            topology_class_name,
                            create_traffic_profile=True):
    """Initialize and Setup configurations for single project related flow
       system tests.
    """
    #self.agent_objs = {}
    # self.set_flow_tear_time()
    #
    # Check if there are enough nodes i.e. atleast 2 compute nodes to run this
    # test.
    # else report that minimum 2 compute nodes are needed for this test and
    # exit.
    # if len(self.inputs.compute_ips) < 2:
    if len(class_instance.inputs.compute_ips) < 2:
        class_instance.logger.warn(
            "Minimum 2 compute nodes are needed for this test to run")
        class_instance.logger.warn(
            "Exiting since this test can't be run on single compute node")
        return True
    #
    # Get config for test from topology
    #topology_class_name = system_test_topo.SystestTopoSingleProject
    # For testing script, use mini topology
    # topology_class_name =
    # mini_system_test_topo.SystestTopoSingleProject
    class_instance.logger.info("Scenario for the test used is: %s" %
                               (str(topology_class_name)))

    topo = topology_class_name(
        compute_node_list=class_instance.inputs.compute_ips)
    #
    # Test setup: Configure policy, VN, & VM
    # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
    # Returned topo is of following format:
    # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
    # vm_fixture}
    setup_obj = class_instance.useFixture(
        class_instance(), sdnTopoSetupFixture(class_instance.connections,
                                              topo))
    out = setup_obj.sdn_topo_setup()
    assertEqual(out['result'], True, out['msg'])
    if out['result']:
        topo, config_topo = out['data'][0], out['data'][1]
    proj = list(topo.keys())[0]

    # Get the vrouter build version for logging purposes.
    class_instance.BuildTag = get_OS_Release_BuildVersion(class_instance)

    # Create traffic profile with all details like IP addresses, port
    # numbers and no of flows, from the profile defined in the topology.
    if create_traffic_profile:
        class_instance.traffic_profiles = create_traffic_profiles(
            topo[proj], config_topo)

    class_instance.topo, class_instance.config_topo = topo, config_topo
    class_instance.config_setup_obj = setup_obj
Exemplo n.º 9
0
    def test_flow_scaling_interNode_interVN(self):
        """Basic systest with single project with many features & traffic..
        """
        result = False
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = flow_scale_topo.FlowScaleTopology
        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        try:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips,
                project=self.project.project_name,
                username=self.project.username,
                password=self.project.password)
        except NameError:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips)

        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map)
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']

        self.traffic_scenarios = self.build_traffic_scenarios(
            topo, config_topo)

        src_vm_obj = self.traffic_scenarios['1to2'][0]
        vn_fq_name = config_topo['vn']['vnet1'].vn_fq_name
        src_vm_intf_id = src_vm_obj.cs_vmi_obj[vn_fq_name][
            'virtual-machine-interface']['uuid']
        src_vm_obj.provision_static_route(
            prefix='111.1.0.0/16', virtual_machine_interface_id=src_vm_intf_id)

        result = self.create_scaled_flows()
        self.delete_agent_flows()
        return True
Exemplo n.º 10
0
 def test_repeated_policy_modify(self):
     """ Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify.
     """
     ###
     # Get config for test from topology
     # very simple topo will do, one vn, one vm, multiple policies with n
     # rules
     topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config
     self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(
             project=self.project.project_name, username=self.project.username, password=self.project.password
         )
     except NameError:
         topo = topology_class_name()
     ###
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out["result"], True, out["msg"])
     if out["result"]:
         topo, config_topo = out["data"]
     ###
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
     ###
     # Test procedure:
     # Test repeated update of a policy attached to a VM
     test_vm = topo.vmc_list[0]
     test_vn = topo.vn_of_vm[test_vm]
     test_vn_fix = config_topo["vn"][test_vn]
     test_vn_id = test_vn_fix.vn_id
     for policy in topo.policy_list:
         # set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo["policy"][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for %s updated to %s" % (test_vn, policy)
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" % (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         system_vna_verify_policy(self, config_topo["policy"][policy], updated_topo, state)
     return True
def config_topo_single_proj(class_instance, topology_class_name):
    """Initialize and Setup configurations for single project related flow
       system tests.
    """
    #self.agent_objs = {}
    #self.set_flow_tear_time()
    #
    # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
    # else report that minimum 2 compute nodes are needed for this test and
    # exit.
    #if len(self.inputs.compute_ips) < 2:
    if len(class_instance.inputs.compute_ips) < 2:
        class_instance.logger.warn(
            "Minimum 2 compute nodes are needed for this test to run")
        class_instance.logger.warn(
            "Exiting since this test can't be run on single compute node")
        return True
    #
    # Get config for test from topology
    #topology_class_name = system_test_topo.systest_topo_single_project
    # For testing script, use mini topology
    # topology_class_name =
    # mini_system_test_topo.systest_topo_single_project
    class_instance.logger.info(
        "Scenario for the test used is: %s" %
        (str(topology_class_name)))

    topo = topology_class_name(
        compute_node_list=class_instance.inputs.compute_ips)
    #
    # Test setup: Configure policy, VN, & VM
    # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
    # Returned topo is of following format:
    # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
    # vm_fixture}
    setup_obj = class_instance.useFixture(class_instance(),
        sdnTopoSetupFixture(class_instance.connections, topo))
    out = setup_obj.sdn_topo_setup()
    assertEqual(out['result'], True, out['msg'])
    if out['result']:
        topo, config_topo = out['data'][0], out['data'][1]
    proj = list(topo.keys())[0]

    # Get the vrouter build version for logging purposes.
    class_instance.BuildTag = get_OS_Release_BuildVersion(class_instance)

    # Create traffic profile with all details like IP addresses, port
    # numbers and no of flows, from the profile defined in the topology.
    class_instance.traffic_profiles = create_traffic_profiles(
        topo[proj],
        config_topo)

    class_instance.topo, class_instance.config_topo = topo, config_topo
Exemplo n.º 12
0
    def test_policy(self):
        """ Configure policies based on topology and run policy related verifications.
        """
        result = True
        #
        # Get config for test from topology
        topology_class_name = sdn_basic_topology.sdn_basic_config
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name,
                username=self.project.project_username,
                password=self.project.project_user_password)
        except NameError:
            topo = topology_class_name()
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        #
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(
            self,
            config_topo['policy'][policy_name],
            topo,
            'setup')

        # Verify ICMP traffic between the two VM's.
        if not config_topo['vm'][topo.vmc_list[0]].ping_with_certainty(
            expectation=True,
            dst_vm_fixture=config_topo['vm'][topo.vmc_list[1]]):
            self.logger.error(
                'Ping from %s to %s failed,expected it to pass' %
                (config_topo['vm'][topo.vmc_list[0]].vm_name,
                 config_topo['vm'][topo.vmc_list[1]].vm_name))
            return False

        return True
Exemplo n.º 13
0
    def test_policy(self):
        """ Configure policies based on topology and run policy related verifications.
        """
        result = True
        #
        # Get config for test from topology
        topology_class_name = sdn_basic_topology.sdn_basic_config
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name,
                username=self.project.project_username,
                password=self.project.project_user_password)
        except NameError:
            topo = topology_class_name()
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        #
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(
            self,
            config_topo['policy'][policy_name],
            topo,
            'setup')

        # Verify ICMP traffic between the two VM's.
        if not config_topo['vm'][topo.vmc_list[0]].ping_with_certainty(
            expectation=True,
            dst_vm_fixture=config_topo['vm'][topo.vmc_list[1]]):
            self.logger.error(
                'Ping from %s to %s failed,expected it to pass' %
                (config_topo['vm'][topo.vmc_list[0]].vm_name,
                 config_topo['vm'][topo.vmc_list[1]].vm_name))
            return False

        return True
Exemplo n.º 14
0
def inventory_tests(self, node_name=None):
    if node_name is None:
        self.logger.error("ERROR :: Target node name has to be specified to test inventory information.")
        return False
    self.logger.info("------------INVENTORY TEST FOR NODE %s------------" % node_name)
    local("server-manager-client display inventory --server_id %s > working_db.txt" % node_name)
    fd=open('working_db.txt','r')
    lines=fd.readlines()
    fd.close()
    fd=open('working_db.json','w')
    for i in range(1,len(lines)-1):
        fd.write(lines[i])
    fd.close()
    fd=open('working_db.json','r')
    inventory_data=json.load(fd)
    fd.close()

    node_ip=self.smgr_fixture.get_ip_using_server_id(node_name)
    node_pswd=self.smgr_fixture.get_pswd_using_server_id(node_name)

    #Check for cpu details in inventory.
    with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
        cpu_cores=run('cat /proc/cpuinfo | grep "cpu cores" | head -n 1 |cut -d ":" -f2')
        clock_speed=run('cat /proc/cpuinfo | grep "cpu MHz" | head -n 1 |cut -d ":" -f2')
        model=run('cat /proc/cpuinfo | grep "model name" | head -n 1 |cut -d ":" -f2')

    assertEqual(int(cpu_cores), inventory_data['ServerInventoryInfo']['cpu_cores_count'],
        "cpu_cores_count mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s" % (node_name,inventory_data['ServerInventoryInfo']['cpu_cores_count'],cpu_cores))
    assertEqual(float(clock_speed), float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']),
        "clock_speed mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s"
            % (node_name,float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']),float(clock_speed)))
    assertEqual(model, inventory_data['ServerInventoryInfo']['cpu_info_state']['model'],
        "model mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s"
            % (node_name,inventory_data['ServerInventoryInfo']['cpu_info_state']['model'],model))

    #Check for interface details in inventory both physical and virtual intrerfaces should be listed.
    with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
        intf_names=run("ifconfig -a | grep 'Link encap' | awk '{print $1}'")
        intf_list=intf_names.split('\r\n')

    track_intf=list(intf_list)
    for i in range(0,len(track_intf)):
        if '-' in track_intf[i]:
            del track_intf[i]

    for intf_data in inventory_data['ServerInventoryInfo']['interface_infos']:
        if '_' in intf_data['interface_name']:
            continue
        if intf_data['interface_name'] in track_intf:
            if (intf_data['ip_addr'] and intf_data['ip_addr'] != 'N/A'):
                with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
                    ip_addr=run("ifconfig " + intf_data['interface_name'] + " | grep inet | awk '{print $2}' | cut -d ':' -f 2")
                assertEqual(ip_addr, intf_data['ip_addr'], "ip address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['ip_addr'],ip_addr))

            if (intf_data['macaddress'] and intf_data['macaddress'] != 'N/A'):
                with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
                    mac_addr=run("cat /sys/class/net/" + intf_data['interface_name'] + "/address")
                assertEqual(mac_addr.lower(), intf_data['macaddress'].lower(), "mac address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['macaddress'].lower(),mac_addr.lower()))

            if (intf_data['netmask'] and intf_data['netmask'] != 'N/A'):
                with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
                    mask=run("ifconfig " + intf_data['interface_name'] + " | grep Mask | awk '{print $4}' | cut -d ':' -f 2")
                assertEqual(mask, intf_data['netmask'], "netmask mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['netmask'],mask))

        else:
            self.logger.error("ERROR :: Interface not found in inventory but there as part of the system info")
            self.logger.error("ERROR :: Inventory interface information %s" % intf_data)
            self.logger.error("ERROR :: System interface information %s" % track_intf)
            return False

    #Check for memory state and number of disks.
    with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
        dimm_size_mb=run("dmidecode -t 17 | grep Size | head -n 1 | awk '{print $2}'")
        mem_speed_MHz=run("dmidecode -t 17 | grep Speed | head -n 1 | awk '{print $2}'")
        mem_type=run("dmidecode -t 17 | grep Type | head -n 1 | awk '{print $2}'")
        num_of_dimms=run("dmidecode -t 17 | grep 'Memory Device' | wc -l")
        swap_size_mb=run("vmstat -s -S M | grep 'total swap' | awk '{print $1}'")
        total_mem_mb=run("vmstat -s -S M | grep 'total memory' | awk '{print $1}'")

    assertEqual(int(dimm_size_mb), inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'],
        "dimm_size_mb mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'],int(dimm_size_mb)))
    assertEqual(int(mem_speed_MHz), inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'],
        "mem_speed_MHz mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'],int(mem_speed_MHz)))
    assertEqual(mem_type, inventory_data['ServerInventoryInfo']['mem_state']['mem_type'],
        "mem_type mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_type'],mem_type))
    assertEqual(int(num_of_dimms), inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'],
        "num_of_dimms mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'],int(num_of_dimms)))

    if (float(swap_size_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']) <= float(swap_size_mb)*1.02):
        self.logger.info("swap_size_mb matched inventory data.")
    else:
        self.logger.error("swap_size_mb for node %s = inventory_data - %s, vmstat data - %s --- Not in range 98% to 102%"
            % (node_name,float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']),float(swap_size_mb)))
        return False

    if (float(total_mem_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['total_mem_mb']) <= float(total_mem_mb)*1.02):
        self.logger.info("total_mem_mb matched inventory data.")
    else:
        self.logger.error("total_mem_mb for node %s = inventory_data - %s, vmstat data - %s --- Not in range 98% to 102%"
            % (node_name,float(inventory_data['ServerInventoryInfo']['mem_state']['total_mem_mb']),float(total_mem_mb)))
        return False

    #Check for system related inventory information.
    with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True):
        board_manufacturer=run("dmidecode -t 3 | grep 'Manufacturer' | awk '{print $2}'")
        kernel_version=run("uname -r | cut -d '-' -f 1")
        name=run("uname -n")
        hardware_model=run("uname -i")
        node_os=run("uname -v | cut -d '-' -f 2 | awk '{print $1}'")

    assertEqual(board_manufacturer, inventory_data['ServerInventoryInfo']['fru_infos'][0]['board_manufacturer'],
        "board_manufacturer mismatch for node %s = inventory_data - %s, dmidecode data - %s"
            % (node_name,inventory_data['ServerInventoryInfo']['fru_infos'][0]['board_manufacturer'],board_manufacturer))
    assertEqual(kernel_version, inventory_data['ServerInventoryInfo']['kernel_version'],
        "kernel_version mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['kernel_version'],kernel_version))
    assertEqual(name, inventory_data['ServerInventoryInfo']['name'],
        "name mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['name'],name))
    assertEqual(hardware_model, inventory_data['ServerInventoryInfo']['hardware_model'],
        "hardware_model mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['hardware_model'],hardware_model))
    assertEqual(node_os, inventory_data['ServerInventoryInfo']['os'],
        "os mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['os'],node_os))

    os.remove('working_db.txt')
    self.logger.info("------------END OF INVENTORY TEST FOR NODE %s------------" % node_name)
    return True
Exemplo n.º 15
0
    def test_policy_modify_vn_policy(self):
        """ Configure policies based on topology;
        """
        ###
        # Get config for test from topology
        # very simple topo will do, one vn, one vm, one policy, 3 rules
        topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config

        self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name, username=self.project.username, password=self.project.password
            )
        except NameError:
            topo = topology_class_name()
        ###
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out["result"], True, out["msg"])
        if out["result"]:
            topo, config_topo = out["data"]
        ###
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
        ###
        # Test procedure:
        # Test adding new policy to VN's exiting policy list
        state = "add policy: "
        test_vm = topo.vmc_list[0]
        test_vn = topo.vn_of_vm[test_vm]
        # Init test data, take backup of current topology
        initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn])
        new_policy_to_add = policy_test_utils.get_policy_not_in_vn(initial_vn_policy_list, topo.policy_list)
        if not new_policy_to_add:
            result = "False"
            msg = "test %s cannot be run as required config not available in topology; aborting test"
            self.logger.info(msg)
            assertEqual(result, True, msg)
        initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add])
        new_vn_policy_list = copy.copy(initial_vn_policy_list)
        new_policy_vn_list = copy.copy(initial_policy_vn_list)
        new_vn_policy_list.append(new_policy_to_add)
        new_policy_vn_list.append(test_vn)
        test_vn_fix = config_topo["vn"][test_vn]
        test_vn_id = test_vn_fix.vn_id
        # configure new policy
        config_topo["policy"][new_policy_to_add] = self.useFixture(
            PolicyFixture(
                policy_name=new_policy_to_add,
                rules_list=topo.rules[new_policy_to_add],
                inputs=self.inputs,
                connections=self.connections,
            )
        )
        # get new policy_set to be pushed for the vn
        test_policy_fq_names = []
        for policy in new_vn_policy_list:
            name = config_topo["policy"][policy].policy_fq_name
            test_policy_fq_names.append(name)
        self.logger.info("adding policy %s to vn %s" % (new_policy_to_add, test_vn))
        test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        self.logger.info("New policy list of VN %s is %s" % (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        topo.policy_vn[new_policy_to_add] = new_policy_vn_list
        system_vna_verify_policy(self, config_topo["policy"][new_policy_to_add], topo, state)
        # Test unbinding all policies from VN
        state = "unbinding all policies"
        test_vn_fix.unbind_policies(test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        current_vn_policy_list = new_vn_policy_list
        new_vn_policy_list = []
        self.logger.info("New policy list of VN %s is %s" % (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        for policy in current_vn_policy_list:
            topo.policy_vn[policy].remove(test_vn)
        system_vna_verify_policy(self, config_topo["policy"][new_policy_to_add], topo, state)
        return True
Exemplo n.º 16
0
 def test_policy_with_multi_vn_in_vm(self):
     ''' Test to validate policy action in VM with vnic's in  multiple VN's with different policies.
     Test flow: vm1 in vn1 and vn2; vm3 in vn3. policy to allow traffic from vn2 to vn3 and deny from vn1 to vn3.
     Default route for vm1 in vn1, which has no reachability to vn3 - verify traffic - should fail.
     Add specific route to direct vn3 traffic through vn2 - verify traffic - should pass.
     '''
     vm1_name = 'vm_mine1'
     vm2_name = 'vm_mine2'
     vn1_name = 'vn221'
     vn1_subnets = ['11.1.1.0/24']
     vn2_name = 'vn222'
     vn2_subnets = ['22.1.1.0/24']
     vn3_gateway = '22.1.1.254'
     vn3_name = 'vn223'
     vn3_subnets = ['33.1.1.0/24']
     rules1 = [
         {
             'direction': '>', 'simple_action': 'deny',
             'protocol': 'icmp', 'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     rules2 = [
         {
             'direction': '<>', 'simple_action': 'pass',
             'protocol': 'any', 'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     policy1_name = 'p1'
     policy2_name = 'p2'
     policy1_fixture = self.useFixture(
         PolicyFixture(
             policy_name=policy1_name,
             rules_list=rules1,
             inputs=self.inputs,
             connections=self.connections))
     policy2_fixture = self.useFixture(
         PolicyFixture(
             policy_name=policy2_name,
             rules_list=rules2,
             inputs=self.inputs,
             connections=self.connections))
     vn1_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn1_name,
             inputs=self.inputs,
             subnets=vn1_subnets,
             policy_objs=[
                 policy1_fixture.policy_obj]))
     vn2_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn2_name,
             inputs=self.inputs,
             subnets=vn2_subnets,
             disable_gateway=True,
             policy_objs=[
                 policy2_fixture.policy_obj]))
     vn3_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn3_name,
             inputs=self.inputs,
             subnets=vn3_subnets,
             policy_objs=[
                 policy2_fixture.policy_obj]))
     assert vn1_fixture.verify_on_setup()
     assert vn2_fixture.verify_on_setup()
     assert vn3_fixture.verify_on_setup()
     assert vn1_fixture.verify_vn_policy_in_api_server()
     assert vn2_fixture.verify_vn_policy_in_api_server()
     assert vn3_fixture.verify_vn_policy_in_api_server()
     vm1_fixture = self.useFixture(
         VMFixture(
             connections=self.connections,
             vn_objs=[
                 vn1_fixture.obj,
                 vn2_fixture.obj],
             vm_name=vm1_name,
             project_name=self.inputs.project_name))
     vm2_fixture = self.useFixture(
         VMFixture(
             connections=self.connections,
             vn_objs=[
                 vn3_fixture.obj],
             vm_name=vm2_name,
             project_name=self.inputs.project_name))
     assert vm1_fixture.verify_on_setup()
     assert vm2_fixture.verify_on_setup()
     self.nova_h.wait_till_vm_is_up(vm1_fixture.vm_obj)
     self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
     # For multi-vn vm, configure ip address for 2nd interface
     multivn_vm_ip_list = vm1_fixture.vm_ips
     intf_conf_cmd = "ifconfig eth1 %s netmask 255.255.255.0" % multivn_vm_ip_list[
         1]
     vm_cmds = (intf_conf_cmd, 'ifconfig -a')
     for cmd in vm_cmds:
         cmd_to_output = [cmd]
         vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
         output = vm1_fixture.return_output_cmd_dict[cmd]
     for ip in multivn_vm_ip_list:
         if ip not in output:
             self.logger.error(
                 "IP %s not assigned to any eth intf of %s" %
                 (ip, vm1_fixture.vm_name))
             assert False
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw of VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail")
     result = vm1_fixture.ping_with_certainty(
         vm2_fixture.vm_ip,
         expectation=False)
     assertEqual(result, True, "ping passed which is not expected")
     # Configure VM to reroute traffic to interface belonging to different
     # VN
     self.logger.info(
         "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now")
     i = ' route add -net %s netmask 255.255.255.0 gw %s dev eth1' % (
         vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1])
     cmd_to_output = [i]
     vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
     output = vm1_fixture.return_output_cmd_dict[i]
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw for VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass")
     result = vm1_fixture.ping_with_certainty(
         vm2_fixture.vm_ip,
         expectation=True)
     assertEqual(result, True, "ping failed which is not expected")
     return True
Exemplo n.º 17
0
 def repeated_policy_update_test_with_ping(self, topo):
     """ Pick 2 VM's for testing, test with ping; modify policy of one VN [in which VM is
     present] and verify the rule functionality with ping.
     """
     result = True
     msg = []
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(
         sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     #out= setup_obj.topo_setup(vm_verify='yes', skip_cleanup='yes')
     self.logger.info("Setup completed with result %s" % (out['result']))
     assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     # 1. Define Traffic Params
     test_vm1 = topo.vmc_list[0]  # 'vmc0'
     test_vm2 = topo.vmc_list[1]  # 'vmc1'
     test_vm1_fixture = config_topo['vm'][test_vm1]
     test_vm2_fixture = config_topo['vm'][test_vm2]
     test_vn = topo.vn_of_vm[test_vm1]  # 'vnet0'
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     test_proto = 'icmp'
     for policy in topo.policy_test_order:
         # 2. set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo['policy'][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for " + test_vn + " updated to " + policy
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" %
                          (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         self.logger.info("Starting Verifications after %s" % (state))
         policy_info = "policy in effect is : %s" % (topo.rules[policy])
         self.logger.info(policy_info)
         # 3. set expectation to verify..
         matching_rule_action = {}
         # Topology guide: There is only one policy assigned to test_vn and there is one rule affecting traffic proto.
         # For ping test, set expected result based on action - pass or deny
         # if action = 'pass', expectedResult= True, else Fail;
         num_rules = len(topo.rules[policy])
         for i in range(num_rules):
             proto = topo.rules[policy][i]['protocol']
             matching_rule_action[proto] = topo.rules[
                 policy][i]['simple_action']
         if num_rules == 0:
             matching_rule_action[test_proto] = 'deny'
         self.logger.info("matching_rule_action: %s" %
                          matching_rule_action)
         # 4. Test with ping
         expectedResult = True if matching_rule_action[
             test_proto] == 'pass' else False
         ret = test_vm1_fixture.ping_with_certainty(
             test_vm2_fixture.vm_ip, expectation=expectedResult,
             dst_vm_fixture=test_vm2_fixture)
         result_msg = "vm ping test result after %s is: %s" % (state, ret)
         self.logger.info(result_msg)
         if not ret:
             result = False
             msg.extend([result_msg, policy_info])
             all_policy_verify(
                 self, config_topo, updated_topo, state, fixture_only='yes')
     assertEqual(result, True, msg)
     test_vn_fix.unbind_policies(test_vn_id)
     return result
Exemplo n.º 18
0
    def test_policy_rules_scaling_with_ping(self):
        ''' Test to validate scaling of policy and rules.
            Test to validate multiple policy scaling with
            10 rules each. These policies will be attached
            to two VN's and 2 VM's will be spawned in each
            of the VN's to verify exact number of acls are
            created in the agent introspect.
            Expected ace id's = 150 policy * 10 distinct rules
            + 1 valid rule + 2 default rules = 1503 ace id's.
        '''
        result = True
        msg = []
        vn1_name = 'vn1'
        vn2_name = 'vn2'
        vn1_subnets = [get_random_cidr(af='v4')]
        vn2_subnets = ['20.1.1.0/24']
        number_of_policy = 150
        number_of_dummy_rules = 10
        number_of_valid_rules = 1
        number_of_default_rules = 2
        total_number_of_rules = (
            number_of_dummy_rules *
            number_of_policy) + number_of_valid_rules + number_of_default_rules
        no_of_rules_exp = total_number_of_rules
        valid_rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]

        self.logger.info(
            'Creating %d policy and %d rules to test policy scalability' %
            (number_of_policy, number_of_dummy_rules + len(valid_rules)))
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self,
            number_of_policy,
            valid_rules,
            number_of_dummy_rules,
            verify=False)
        time.sleep(5)
        self.logger.info('Create VN and associate %d policy' %
                         (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn1_name,
                      inputs=self.inputs,
                      subnets=vn1_subnets,
                      policy_objs=policy_objs_list))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn2_name,
                      inputs=self.inputs,
                      subnets=vn2_subnets,
                      policy_objs=policy_objs_list))
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = 'vm1'
        vn1_vm2_name = 'vm2'
        vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn1_fixture.obj,
                      vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn2_fixture.obj,
                      vm_name=vn1_vm2_name))
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip,
                                              expectation=True,
                                              dst_vm_fixture=vm2_fixture)
        result_msg = "vm ping test result to vm %s is: %s" % (vn1_vm2_name,
                                                              ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(
                ["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        if self.inputs.get_af() == af_test:
            #In v6 test, new rule is added for proto 58 corresponding to v4 icmp rule,
            #so expected no. of rules should be increamented by 1
            no_of_rules_exp = total_number_of_rules + 1

        vn1_acl_count = len(
            self.agent_inspect[vm1_fixture._vm_node_ip].get_vna_acl_by_vn(
                vn1_fixture.vn_fq_name)['entries'])
        vn2_acl_count = len(
            self.agent_inspect[vm2_fixture._vm_node_ip].get_vna_acl_by_vn(
                vn2_fixture.vn_fq_name)['entries'])
        self.assertEqual(
            vn1_acl_count, no_of_rules_exp,
            "Mismatch in number of ace ID's and total number of rules in agent introspect \
                for vn %s" % vn1_fixture.vn_fq_name)
        self.assertEqual(
            vn2_acl_count, no_of_rules_exp,
            "Mismatch in number of ace ID's and total number of rules in agent introspect \
                for vn %s" % vn2_fixture.vn_fq_name)
        self.logger.info(
            'Verified ace Id\'s were created for %d rules, to test policy scalability'
            % no_of_rules_exp)
        return True
Exemplo n.º 19
0
def system_vna_verify_policy(self, policy_fixt, topo, state):
    # Verify all policies in all compute nodes..
    self.logger.info("Starting Verifications after %s" % (state))
    ret = policy_fixt.verify_policy_in_vna(topo)
    assertEqual(ret['result'], True, ret['msg'])
Exemplo n.º 20
0
    def test_policy_rules_scaling_with_ping(self):
        ''' Test to validate scaling of policy and rules
        '''
        result = True
        msg = []
        vn1_name = 'vn1'
        vn2_name = 'vn2'
        vn1_subnets = ['10.1.1.0/24']
        vn2_subnets = ['20.1.1.0/24']
        number_of_policy = 10
        # adding workaround to pass the test with less number of rules till
        # 1006, 1184 fixed
        number_of_dummy_rules = 148
        valid_rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'udp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]

        self.logger.info(
            'Creating %d policy and %d rules to test policy scalability' %
            (number_of_policy, number_of_dummy_rules + len(valid_rules)))
        # for now we are creating limited number of policy and rules
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self, number_of_policy, valid_rules, number_of_dummy_rules)
        time.sleep(5)
        self.logger.info('Create VN and associate %d policy' %
                         (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=policy_objs_list))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn2_name,
                inputs=self.inputs,
                subnets=vn2_subnets,
                policy_objs=policy_objs_list))
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = 'vm1'
        vn1_vm2_name = 'vm2'
        vm1_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn1_fixture.obj,
                vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn2_fixture.obj,
                vm_name=vn1_vm2_name))
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip, expectation=True,
            dst_vm_fixture=vm2_fixture)
        result_msg = "vm ping test result to vm %s is: %s" % (
            vn1_vm2_name, ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(
                ["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        return True
Exemplo n.º 21
0
    def test_policy_rules_scaling_with_ping(self):
        """ Test to validate scaling of policy and rules
        """
        result = True
        msg = []
        vn1_name = "vn1"
        vn2_name = "vn2"
        vn1_subnets = ["10.1.1.0/24"]
        vn2_subnets = ["20.1.1.0/24"]
        number_of_policy = 10
        # adding workaround to pass the test with less number of rules till
        # 1006, 1184 fixed
        number_of_dummy_rules = 148
        valid_rules = [
            {
                "direction": "<>",
                "simple_action": "pass",
                "protocol": "icmp",
                "src_ports": "any",
                "dst_ports": "any",
                "source_network": "any",
                "dest_network": "any",
            },
            {
                "direction": "<>",
                "simple_action": "pass",
                "protocol": "udp",
                "src_ports": "any",
                "dst_ports": "any",
                "source_network": "any",
                "dest_network": "any",
            },
        ]

        self.logger.info(
            "Creating %d policy and %d rules to test policy scalability"
            % (number_of_policy, number_of_dummy_rules + len(valid_rules))
        )
        # for now we are creating limited number of policy and rules
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self, number_of_policy, valid_rules, number_of_dummy_rules
        )
        time.sleep(5)
        self.logger.info("Create VN and associate %d policy" % (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=policy_objs_list,
            )
        )
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn2_name,
                inputs=self.inputs,
                subnets=vn2_subnets,
                policy_objs=policy_objs_list,
            )
        )
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = "vm1"
        vn1_vm2_name = "vm2"
        vm1_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn1_fixture.obj,
                vm_name=vn1_vm1_name,
            )
        )
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn2_fixture.obj,
                vm_name=vn1_vm2_name,
            )
        )
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip, expectation=True)
        result_msg = "vm ping test result to vm %s is: %s" % (vn1_vm2_name, ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        return True
Exemplo n.º 22
0
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[
                self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('begin_flow_count: %s' % (begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(
                    flow_test_utils.get_max_flow_removal_time(
                        begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
                # which will vary with agent's poll, which is done at regular intervals..
                self.logger.info('Sleeping for %s secs' % (flow_teardown_time))
                sleep(flow_teardown_time)
                # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown
                current_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                self.logger.debug('current_flow_count: %s' %
                                  (current_flow_count))
                if current_flow_count['all'] > (0.5 * begin_flow_count['all']):
                    msg = [
                        'Flow removal not happening as expected in node %s' %
                        self.cmp_node
                    ]
                    msg.append(
                        'Flow count before wait: %s, after wait of %s secs, its: %s'
                        % (begin_flow_count['all'], flow_teardown_time,
                           current_flow_count['all']))
                    assert False, msg
                if current_flow_count['all'] < (0.1 * begin_flow_count['all']):
                    break
            # end of while loop
            elapsed_time = time.time() - start_time
            self.logger.info(
                "Flows aged out as expected in configured flow_cache_timeout")
        # end of profile for loop
        return True
Exemplo n.º 23
0
    def test_policy_modify_vn_policy(self):
        """ Configure policies based on topology;
        """
        ###
        # Get config for test from topology
        # very simple topo will do, one vn, one vm, one policy, 3 rules
        topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(project=self.project.project_name,
                                       username=self.project.username,
                                       password=self.project.password)
        except NameError:
            topo = topology_class_name()
        ###
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        ###
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(self, config_topo['policy'][policy_name],
                                 topo, 'setup')
        ###
        # Test procedure:
        # Test adding new policy to VN's exiting policy list
        state = "add policy: "
        test_vm = topo.vmc_list[0]
        test_vn = topo.vn_of_vm[test_vm]
        # Init test data, take backup of current topology
        initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn])
        new_policy_to_add = policy_test_utils.get_policy_not_in_vn(
            initial_vn_policy_list, topo.policy_list)
        if not new_policy_to_add:
            result = 'False'
            msg = "test %s cannot be run as required config not available in topology; aborting test"
            self.logger.info(msg)
            assertEqual(result, True, msg)
        initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add])
        new_vn_policy_list = copy.copy(initial_vn_policy_list)
        new_policy_vn_list = copy.copy(initial_policy_vn_list)
        new_vn_policy_list.append(new_policy_to_add)
        new_policy_vn_list.append(test_vn)
        test_vn_fix = config_topo['vn'][test_vn]
        test_vn_id = test_vn_fix.vn_id
        # configure new policy
        config_topo['policy'][new_policy_to_add] = self.useFixture(
            PolicyFixture(policy_name=new_policy_to_add,
                          rules_list=topo.rules[new_policy_to_add],
                          inputs=self.inputs,
                          connections=self.connections))
        # get new policy_set to be pushed for the vn
        test_policy_fq_names = []
        for policy in new_vn_policy_list:
            name = config_topo['policy'][policy].policy_fq_name
            test_policy_fq_names.append(name)
        self.logger.info("adding policy %s to vn %s" %
                         (new_policy_to_add, test_vn))
        test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        self.logger.info("New policy list of VN %s is %s" %
                         (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        topo.policy_vn[new_policy_to_add] = new_policy_vn_list
        system_vna_verify_policy(self,
                                 config_topo['policy'][new_policy_to_add],
                                 topo, state)
        # Test unbinding all policies from VN
        state = "unbinding all policies"
        test_vn_fix.unbind_policies(test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        current_vn_policy_list = new_vn_policy_list
        new_vn_policy_list = []
        self.logger.info("New policy list of VN %s is %s" %
                         (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        for policy in current_vn_policy_list:
            topo.policy_vn[policy].remove(test_vn)
        system_vna_verify_policy(self,
                                 config_topo['policy'][new_policy_to_add],
                                 topo, state)
        return True
Exemplo n.º 24
0
    def test_agent_flow_settings(self):
        """Basic systest with single project with many features & traffic..
        """
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        # import mini_flow_test_topo
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        topology_class_name = flow_test_topo.systest_topo_single_project
        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        topo = topology_class_name(compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            config_topo = out['data'][1]
        self.proj = list(config_topo.keys())[0]
        self.topo, self.config_topo = topo, config_topo

        # 2. set agent flow_cache_timeout to 60s
        # set max_vm_flows to 1% of 500k, comes to 5000
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 60
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(
                ComputeNodeFixture(self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()

        # 3. Start Traffic
        for profile, details in self.topo.traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %
                             (profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = self.proj
            self.dst_proj = self.proj
            # Set num_flows to fixed, smaller value but > 1% of
            # system max flows
            num_flows = 5555
            self.generated_flows = 2 * num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            # 3a. Set max_vm_flows to 1% in TX VM node
            self.max_vm_flows = 1
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            flow_test_utils.update_vm_mdata_ip(self.cmp_node, self)
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 4. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 5. Increase max_vm_flows to 50% in TX VM node
            self.max_vm_flows = 50
            self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            # 6. Poll live traffic
            self.verify_node_flow_setup()
            # 7. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 8. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing"
            )
            sleep(self.flow_cache_timeout)
            retries = 0
            retry_wait_time = 10
            flow_teardown_time = math.ceil(
                flow_test_utils.get_max_flow_removal_time(
                    self.generated_flows, self.flow_cache_timeout))
            self.logger.debug("flow tear down time based on calcualtion: %s" %
                              flow_teardown_time)
            max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time)
            while retries < max_retries:
                actual_flows = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                        self.flow_data)
                actual_flows = int(actual_flows['all'])
                if actual_flows > 10:
                    self.logger.info("Waiting for flows to age out")
                    sleep(retry_wait_time)
                    retries += 1
                else:
                    break
            elapsed_time = time.time() - start_time
            if actual_flows > 50:
                msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % (
                    elapsed_time, self.cmp_node, actual_flows)
                assert False, msg
            else:
                self.logger.info(
                    "Flows aged out as expected in configured flow_cache_timeout"
                )
                self.logger.info(
                    "elapsed_time after stopping traffic is %s, flow_count is %s"
                    % (elapsed_time, actual_flows))
Exemplo n.º 25
0
def system_vna_verify_policy(self, policy_fixt, topo, state):
    # Verify all policies in all compute nodes..
    self.logger.debug("Starting Verifications after %s" % (state))
    ret = policy_fixt.verify_policy_in_vna(topo)
    assertEqual(ret['result'], True, ret['msg'])
Exemplo n.º 26
0
 def test_policy_with_multi_vn_in_vm(self):
     ''' Test to validate policy action in VM with vnic's in  multiple VN's with different policies.
     Test flow: vm1 in vn1 and vn2; vm3 in vn3. policy to allow traffic from vn2 to vn3 and deny from vn1 to vn3.
     Default route for vm1 in vn1, which has no reachability to vn3 - verify traffic - should fail.
     Add specific route to direct vn3 traffic through vn2 - verify traffic - should pass.
     '''
     vm1_name = 'vm_mine1'
     vm2_name = 'vm_mine2'
     vn1_name = 'vn221'
     vn1_subnets = ['11.1.1.0/24']
     vn2_name = 'vn222'
     vn2_subnets = ['22.1.1.0/24']
     vn3_gateway = '22.1.1.254'
     vn3_name = 'vn223'
     vn3_subnets = ['33.1.1.0/24']
     rules1 = [
         {
             'direction': '>',
             'simple_action': 'deny',
             'protocol': 'icmp',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     rules2 = [
         {
             'direction': '<>',
             'simple_action': 'pass',
             'protocol': 'any',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     policy1_name = 'p1'
     policy2_name = 'p2'
     policy1_fixture = self.useFixture(
         PolicyFixture(policy_name=policy1_name,
                       rules_list=rules1,
                       inputs=self.inputs,
                       connections=self.connections))
     policy2_fixture = self.useFixture(
         PolicyFixture(policy_name=policy2_name,
                       rules_list=rules2,
                       inputs=self.inputs,
                       connections=self.connections))
     vn1_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn1_name,
                   inputs=self.inputs,
                   subnets=vn1_subnets,
                   policy_objs=[policy1_fixture.policy_obj]))
     vn2_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn2_name,
                   inputs=self.inputs,
                   subnets=vn2_subnets,
                   disable_gateway=True,
                   policy_objs=[policy2_fixture.policy_obj]))
     vn3_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn3_name,
                   inputs=self.inputs,
                   subnets=vn3_subnets,
                   policy_objs=[policy2_fixture.policy_obj]))
     assert vn1_fixture.verify_on_setup()
     assert vn2_fixture.verify_on_setup()
     assert vn3_fixture.verify_on_setup()
     assert vn1_fixture.verify_vn_policy_in_api_server()
     assert vn2_fixture.verify_vn_policy_in_api_server()
     assert vn3_fixture.verify_vn_policy_in_api_server()
     vm1_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn1_fixture.obj, vn2_fixture.obj],
                   vm_name=vm1_name,
                   project_name=self.inputs.project_name))
     vm2_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn3_fixture.obj],
                   vm_name=vm2_name,
                   project_name=self.inputs.project_name))
     assert vm1_fixture.verify_on_setup()
     assert vm2_fixture.verify_on_setup()
     self.nova_h.wait_till_vm_is_up(vm1_fixture.vm_obj)
     self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
     # For multi-vn vm, configure ip address for 2nd interface
     multivn_vm_ip_list = vm1_fixture.vm_ips
     intf_conf_cmd = "ifconfig eth1 %s netmask 255.255.255.0" % multivn_vm_ip_list[
         1]
     vm_cmds = (intf_conf_cmd, 'ifconfig -a')
     for cmd in vm_cmds:
         cmd_to_output = [cmd]
         vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
         output = vm1_fixture.return_output_cmd_dict[cmd]
     for ip in multivn_vm_ip_list:
         if ip not in output:
             self.logger.error("IP %s not assigned to any eth intf of %s" %
                               (ip, vm1_fixture.vm_name))
             assert False
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw of VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail"
     )
     result = vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip,
                                              expectation=False)
     assertEqual(result, True, "ping passed which is not expected")
     # Configure VM to reroute traffic to interface belonging to different
     # VN
     self.logger.info(
         "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now"
     )
     i = ' route add -net %s netmask 255.255.255.0 gw %s dev eth1' % (
         vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1])
     cmd_to_output = [i]
     vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
     output = vm1_fixture.return_output_cmd_dict[i]
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw for VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass"
     )
     result = vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip,
                                              expectation=True)
     assertEqual(result, True, "ping failed which is not expected")
     return True
Exemplo n.º 27
0
    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %(profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2*num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                self.logger.debug('begin_flow_count: %s' %(begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
                # which will vary with agent's poll, which is done at regular intervals..
                self.logger.info('Sleeping for %s secs' %(flow_teardown_time))
                sleep(flow_teardown_time)
                # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown
                current_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                self.logger.debug('current_flow_count: %s' %(current_flow_count))
                if current_flow_count['all'] > (0.5*begin_flow_count['all']):
                    msg = ['Flow removal not happening as expected in node %s' %self.cmp_node]
                    msg.append('Flow count before wait: %s, after wait of %s secs, its: %s' %
                        (begin_flow_count['all'], flow_teardown_time, current_flow_count['all']))
                    assert False, msg
                if current_flow_count['all'] < (0.1*begin_flow_count['all']):
                    break
            # end of while loop
            elapsed_time = time.time() - start_time
            self.logger.info(
                "Flows aged out as expected in configured flow_cache_timeout")
        # end of profile for loop
        return True
Exemplo n.º 28
0
    def test_one_policy_rules_scaling_with_ping(self):
        ''' Test to validate scaling of policy and rules.
            Test to validate rules scaling on a single
            policy. The policy will be attached to two
            VN's and 2 VM's will be spawned in each of
            the VN's to verify exact number of acls are
            created in the agent introspect.
            Expected ace id's = 1 policy * 1498 distinct rules
            + 2 valid rule + 2 default rules = 1504 ace id's.
        '''
        result = True
        msg = []
        vn1_name = 'vn1'
        vn2_name = 'vn2'
        vn1_subnets = ['10.1.1.0/24']
        vn2_subnets = ['20.1.1.0/24']
        number_of_policy = 1
        number_of_dummy_rules = 1498
        number_of_valid_rules = 2
        number_of_default_rules = 2
        total_number_of_rules=number_of_dummy_rules + number_of_valid_rules + number_of_default_rules
        no_of_rules_exp = total_number_of_rules
        valid_rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'udp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]

        self.logger.info(
            'Creating %d policy and %d rules to test policy scalability' %
            (number_of_policy, number_of_dummy_rules + len(valid_rules)))
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self, number_of_policy, valid_rules, number_of_dummy_rules)
        time.sleep(5)
        self.logger.info('Create VN and associate %d policy' %
                         (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=policy_objs_list))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn2_name,
                inputs=self.inputs,
                subnets=vn2_subnets,
                policy_objs=policy_objs_list))
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = 'vm1'
        vn1_vm2_name = 'vm2'
        vm1_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn1_fixture.obj,
                vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn2_fixture.obj,
                vm_name=vn1_vm2_name))
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip, expectation=True,
            dst_vm_fixture=vm2_fixture)
        result_msg = "vm ping test result to vm %s is: %s" % (
            vn1_vm2_name, ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(
                ["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        if self.inputs.get_af() == af_test:
            #In v6 test, new rule is added for proto 58 corresponding to v4 icmp rule,
            #so expected no. of rules should be increamented by 1
            no_of_rules_exp = total_number_of_rules + 1

        vn1_acl_count=len(self.agent_inspect[
            vm1_fixture._vm_node_ip].get_vna_acl_by_vn(vn1_fixture.vn_fq_name)['entries'])
        vn2_acl_count=len(self.agent_inspect[
            vm2_fixture._vm_node_ip].get_vna_acl_by_vn(vn2_fixture.vn_fq_name)['entries'])
        self.assertEqual(vn1_acl_count, no_of_rules_exp,
            "Mismatch in number of ace ID's and total number of rules in agent introspect \
                for vn %s" %vn1_fixture.vn_fq_name)
        self.assertEqual(vn2_acl_count, no_of_rules_exp,
            "Mismatch in number of ace ID's and total number of rules in agent introspect \
                for vn %s" %vn2_fixture.vn_fq_name)
        self.logger.info(
            'Verified ace Id\'s were created for %d rules, to test policy scalability' %
            no_of_rules_exp)
        return True
Exemplo n.º 29
0
    def test_agent_flow_settings(self):
        """Basic systest with single project with many features & traffic..
        """
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        # import mini_flow_test_topo
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        topology_class_name = flow_test_topo.systest_topo_single_project
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            config_topo = out['data'][1]
        self.proj = list(config_topo.keys())[0]
        self.topo, self.config_topo = topo, config_topo

        # 2. set agent flow_cache_timeout to 60s
        # set max_vm_flows to 1% of 500k, comes to 5000
        self.comp_node_fixt = {}
        self.flow_cache_timeout = 60
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
            self.comp_node_fixt[cmp_node].set_flow_aging_time(
                self.flow_cache_timeout)
            self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()

        # 3. Start Traffic
        for profile, details in self.topo.traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %(profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = self.proj
            self.dst_proj = self.proj
            # Set num_flows to fixed, smaller value but > 1% of
            # system max flows
            num_flows = 5555
            self.generated_flows = 2*num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            # 3a. Set max_vm_flows to 1% in TX VM node
            self.max_vm_flows = 1
            self.comp_node_fixt[
                self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            flow_test_utils.update_vm_mdata_ip(self.cmp_node, self)
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 4. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 5. Increase max_vm_flows to 50% in TX VM node
            self.max_vm_flows = 50
            self.comp_node_fixt[
                self.cmp_node].set_per_vm_flow_limit(
                self.max_vm_flows)
            self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart()
            self.logger.info(
                "Wait for 2s for flow setup to start after service restart")
            sleep(2)
            # 6. Poll live traffic
            self.verify_node_flow_setup()
            # 7. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 8. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
            sleep(self.flow_cache_timeout)
            retries = 0
            retry_wait_time = 10
            flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(self.generated_flows, self.flow_cache_timeout))
            self.logger.debug("flow tear down time based on calcualtion: %s" %flow_teardown_time)
            max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time)
            while retries < max_retries:
                actual_flows = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                actual_flows = int(actual_flows['all'])
                if actual_flows > 10:
                    self.logger.info("Waiting for flows to age out")
                    sleep(retry_wait_time)
                    retries += 1
                else:
                    break
            elapsed_time = time.time() - start_time
            if actual_flows > 50:
                msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % (
                    elapsed_time, self.cmp_node, actual_flows)
                assert False, msg
            else:
                self.logger.info(
                    "Flows aged out as expected in configured flow_cache_timeout")
                self.logger.info(
                    "elapsed_time after stopping traffic is %s, flow_count is %s" %
                    (elapsed_time, actual_flows))