def _testSanity(self, mock_meter_get_action_inst, mock_tc_get_action_inst, mock_meter_cls, mock_traffic_cls): ''' This test verifies that qos configuration gets programmed correctly for addition and deletion of a single subscriber and a single rule, We additionally verify if the clean_restart wipes out everything ''' # mock unclean state in qos prior_qids = { self.ul_intf: [ 2, ], self.dl_intf: [ 3, ] } if self.config["qos"]["impl"] == QosImplType.LINUX_TC: mock_traffic_cls.read_all_classes.side_effect = ( lambda intf: prior_qids[intf]) qos_mgr = QosManager(MagicMock, asyncio.new_event_loop(), self.config) qos_mgr._qos_store = {} qos_mgr._setupInternal() imsi, rule_num, d, qos_info = "imsi1234", 0, 0, QosInfo(100000, 100000) # add new subscriber qos queue qos_mgr.add_subscriber_qos(imsi, rule_num, d, qos_info) k = get_json(get_subscriber_key(imsi, rule_num, d)) exp_id = qos_mgr.qos_impl._start_idx self.assertTrue(qos_mgr._qos_store[k] == exp_id) self.assertTrue(qos_mgr._subscriber_map[imsi][rule_num] == (exp_id, d)) # add the same subscriber and ensure that we didn't create another # qos config for the subscriber qos_mgr.add_subscriber_qos(imsi, rule_num, d, qos_info) self.assertTrue(qos_mgr._subscriber_map[imsi][rule_num] == (exp_id, d)) # verify if traffic class was invoked properly if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos(mock_meter_get_action_inst, mock_meter_cls, d, exp_id, qos_info) self.verifyMeterCleanRestart(mock_meter_cls) else: self.verifyTcAddQos(mock_tc_get_action_inst, mock_traffic_cls, d, exp_id, qos_info) self.verifyTcCleanRestart(prior_qids, mock_traffic_cls) # remove the subscriber qos and verify things are cleaned up qos_mgr.remove_subscriber_qos(imsi, rule_num) self.assertTrue(len(qos_mgr._qos_store) == 0) self.assertTrue(imsi not in qos_mgr._subscriber_map) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, exp_id) else: self.verifyTcRemoveQos(mock_traffic_cls, d, exp_id)
def testApnAmbrSanity( self, mock_tc_get_action_inst, mock_traffic_cls, ): """ This test verifies that qos configuration gets programmed correctly for addition and deletion of a single subscriber and a single rule, We additionally verify if the clean_restart wipes out everything """ self.config["qos"]["impl"] = QosImplType.LINUX_TC # mock unclean state in qos prior_qids = {self.ul_intf: [(2, 0)], self.dl_intf: [(3, 0)]} mock_traffic_cls.read_all_classes.side_effect = lambda intf: prior_qids[intf] qos_mgr = QosManager(MagicMock, asyncio.new_event_loop(), self.config) qos_mgr._redis_store = {} qos_mgr._setupInternal() ambr_ul, ambr_dl = 250000, 500000 imsi, ip_addr, rule_num, qos_info = ("1234", '1.1.1.1', 1, QosInfo(50000, 100000)) # add new subscriber qos queue qos_mgr.add_subscriber_qos(imsi, ip_addr, ambr_ul, rule_num, FlowMatch.UPLINK, qos_info) qos_mgr.add_subscriber_qos(imsi, ip_addr, ambr_dl, rule_num, FlowMatch.DOWNLINK, qos_info) k1 = get_key_json(get_subscriber_key(imsi, ip_addr, rule_num, FlowMatch.UPLINK)) k2 = get_key_json(get_subscriber_key(imsi, ip_addr, rule_num, FlowMatch.DOWNLINK)) ambr_ul_exp_id = qos_mgr.impl._start_idx ul_exp_id = qos_mgr.impl._start_idx + 2 ambr_dl_exp_id = qos_mgr.impl._start_idx + 3 dl_exp_id = qos_mgr.impl._start_idx + 5 qid_info_ul = get_subscriber_data(ul_exp_id, ul_exp_id - 2, ul_exp_id - 1) qid_info_dl = get_subscriber_data(dl_exp_id, dl_exp_id - 2, dl_exp_id - 1) self.assertEqual(get_data(qos_mgr._redis_store[k1]), qid_info_ul) self.assertEqual(get_data(qos_mgr._redis_store[k2]), qid_info_dl) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][0] == (0, get_data_json(qid_info_ul))) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][1] == (1, get_data_json(qid_info_dl))) self.assertEqual(len(qos_mgr._subscriber_state[imsi].sessions), 1) self.assertEqual(qos_mgr._subscriber_state[imsi].sessions[ip_addr].ambr_dl, ambr_dl_exp_id) self.assertEqual(qos_mgr._subscriber_state[imsi].sessions[ip_addr].ambr_dl_leaf, ambr_dl_exp_id + 1) self.assertEqual(qos_mgr._subscriber_state[imsi].sessions[ip_addr].ambr_ul_leaf, ambr_ul_exp_id + 1) # add the same subscriber and ensure that we didn't create another # qos config for the subscriber qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.UPLINK, qos_info) qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.DOWNLINK, qos_info) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][0] == (0, get_data_json(qid_info_ul))) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][1] == (1, get_data_json(qid_info_dl))) # verify if traffic class was invoked properly self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, FlowMatch.UPLINK, ul_exp_id, qos_info, parent_qid=ambr_ul_exp_id, ) self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, FlowMatch.DOWNLINK, dl_exp_id, qos_info, parent_qid=ambr_dl_exp_id, ) self.verifyTcCleanRestart(prior_qids, mock_traffic_cls) # remove the subscriber qos and verify things are cleaned up qos_mgr.remove_subscriber_qos(imsi, rule_num) self.assertTrue(len(qos_mgr._redis_store) == 0) self.assertTrue(imsi not in qos_mgr._subscriber_state) self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.UPLINK, ambr_ul_exp_id, True) self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.UPLINK, ul_exp_id) self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.DOWNLINK, ambr_dl_exp_id, True) self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.DOWNLINK, dl_exp_id)
def _testUncleanRestartWithApnAMBR(self, mock_meter_cls, mock_traffic_cls): """This test verifies all tests cases from _testUncleanRestart for APN AMBR configs. """ loop = asyncio.new_event_loop() qos_mgr = QosManager(MagicMock, loop, self.config) qos_mgr._redis_store = {} def populate_db(qid_list, old_ambr_list, old_leaf_list, rule_list): qos_mgr._redis_store.clear() for i, t in enumerate(rule_list): k = get_key_json(get_subscriber_key(*t)) v = get_data_json( get_subscriber_data( qid_list[i], old_ambr_list[i], old_leaf_list[i], ), ) qos_mgr._redis_store[k] = v MockSt = namedtuple("MockSt", "meter_id") dummy_meter_ev_body = [MockSt(11), MockSt(13), MockSt(2), MockSt(15)] def tc_read(intf): if intf == self.ul_intf: return [ (30, 3000), (15, 1500), (300, 3000), (150, 1500), (3000, 65534), (1500, 65534), ] else: return [ (13, 1300), (11, 1100), (130, 1300), (110, 1100), (1300, 65534), (1100, 65534), ] # prepopulate qos_store old_qid_list = [2, 11, 13, 30] old_leaf_list = [20, 110, 130, 300] old_ambr_list = [200, 1100, 1300, 3000] old_rule_list = [ ("1", '1.1.1.1', 0, 0), ("1", '1.1.1.2', 1, 0), ("1", '1.1.1.3', 2, 1), ("2", '1.1.1.4', 0, 0), ] populate_db(old_qid_list, old_ambr_list, old_leaf_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read qos_mgr._initialized = False qos_mgr._setupInternal() # verify that qos_handle 20 not found in system is purged from map qid_list = [] for _, v in qos_mgr._redis_store.items(): _, qid, _, _ = get_data(v) qid_list.append(qid) logging.debug("qid_list %s", qid_list) self.assertNotIn(20, qid_list) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_called_with(MagicMock, 15) else: mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 15, False) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 150, False) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 1500, True) # add a new rule to the qos_mgr and check if it is assigned right id imsi, rule_num, d, qos_info = "3", 0, 0, QosInfo(100000, 100000) qos_mgr.impl.get_action_instruction = MagicMock qos_mgr.add_subscriber_qos(imsi, '', 10, rule_num, d, qos_info) k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) exp_id = 3 # since start_idx 2 is already used d3 = get_data_json(get_subscriber_data(exp_id + 1, exp_id - 1, exp_id)) self.assertEqual(qos_mgr._redis_store[k], d3) self.assertEqual(qos_mgr._subscriber_state[imsi].rules[rule_num][0], (d, d3)) # delete the restored rule - ensure that it gets cleaned properly purge_imsi = "1" purge_rule_num = 1 purge_qos_handle = 2 qos_mgr.remove_subscriber_qos(purge_imsi, purge_rule_num) self.assertTrue(purge_rule_num not in qos_mgr._subscriber_state[purge_imsi].rules) if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_called_with(MagicMock, purge_qos_handle) else: mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 11, False) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 110, False) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 1100, True) # case 2 - check with empty qos configs, qos_map gets purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() populate_db(old_qid_list, old_ambr_list, old_leaf_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: MockSt = namedtuple("MockSt", "meter_id") qos_mgr.impl._fut = loop.create_future() qos_mgr.impl.handle_meter_config_stats([]) else: mock_traffic_cls.read_all_classes.side_effect = lambda _: [] qos_mgr._initialized = False qos_mgr._setupInternal() self.assertTrue(not qos_mgr._redis_store) if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_not_called() else: mock_traffic_cls.delete_class.assert_not_called() # case 3 - check with empty qos_map, all qos configs get purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() qos_mgr._redis_store.clear() # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.impl._fut = loop.create_future() qos_mgr.impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read logging.debug("case three") qos_mgr._initialized = False qos_mgr._setupInternal() self.assertTrue(not qos_mgr._redis_store) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_any_call(MagicMock, 2) mock_meter_cls.del_meter.assert_any_call(MagicMock, 15) mock_meter_cls.del_meter.assert_any_call(MagicMock, 13) mock_meter_cls.del_meter.assert_any_call(MagicMock, 11) else: mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 15, False) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 150, False) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 13, False) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 130, False) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 11, False) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 110, False)
def _testMultipleSubscribers( self, mock_meter_get_action_inst, mock_tc_get_action_inst, mock_meter_cls, mock_traffic_cls, ): """ This test verifies that qos configuration gets programmed correctly for addition and deletion of a multiple subscribers and rules. we additionally run through different scenarios involving - deactivating a rule - deactivating a subscriber - creating gaps in deletion and verifying that new qos configs get programmed properly with appropriate qids - additionally we also verify the idempotency of deletion calls and ensure that code doesn't behave incorrectly when same items are deleted multiple times - Finally we delete everything and verify if that behavior is right""" if self.config["qos"]["impl"] == QosImplType.LINUX_TC: mock_traffic_cls.read_all_classes.side_effect = lambda intf: [] mock_traffic_cls.delete_class.side_effect = lambda *args: 0 qos_mgr = QosManager(MagicMock, asyncio.new_event_loop(), self.config) qos_mgr._redis_store = {} qos_mgr._setupInternal() rule_list1 = [ ("1", 0, 0), ("1", 1, 0), ("1", 2, 1), ("2", 0, 0), ] rule_list2 = [ ("2", 1, 0), ("3", 0, 0), ("4", 0, 0), ("5", 0, 0), ("5", 1, 1), ("6", 0, 0), ] start_idx, end_idx = 2, 2 + len(rule_list1) id_list = list(range(start_idx, end_idx)) qos_info = QosInfo(100000, 100000) exp_id_dict = {} # add new subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list1): qos_mgr.add_subscriber_qos(imsi, '', 0, rule_num, d, qos_info) exp_id = id_list[i] k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) exp_id_dict[k] = exp_id # self.assertTrue(qos_mgr._redis_store[k] == exp_id) qid_info = get_data_json(get_subscriber_data(exp_id, 0, 0)) self.assertEqual(qos_mgr._subscriber_state[imsi].rules[rule_num][0], (d, qid_info)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos( mock_meter_get_action_inst, mock_meter_cls, d, exp_id, qos_info, ) else: self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, d, exp_id, qos_info, ) # deactivate one rule # verify for imsi1 if rule num 0 gets cleaned up imsi, rule_num, d = rule_list1[0] k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) exp_id = exp_id_dict[k] qos_mgr.remove_subscriber_qos(imsi, rule_num) self.assertTrue(k not in qos_mgr._redis_store) self.assertTrue(not qos_mgr._subscriber_state[imsi].find_rule(rule_num)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, exp_id) else: self.verifyTcRemoveQos(mock_traffic_cls, d, exp_id) # deactivate same rule and check if we log properly with self.assertLogs("pipelined.qos.common", level="DEBUG") as cm: qos_mgr.remove_subscriber_qos(imsi, rule_num) error_msg = "unable to find rule_num 0 for imsi 1" self.assertTrue(cm.output[1].endswith(error_msg)) # deactivate imsi # verify for imsi1 if rule num 1 and 2 gets cleaned up qos_mgr.remove_subscriber_qos(imsi) remove_qos_args = [] for imsi, rule_num, d in rule_list1[1:]: if imsi != "1": continue k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) exp_id = exp_id_dict[k] self.assertTrue(k not in qos_mgr._redis_store) remove_qos_args.append((d, exp_id)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQosBulk(mock_meter_cls, remove_qos_args) else: self.verifyTcRemoveQosBulk(mock_traffic_cls, remove_qos_args) self.assertTrue("1" not in qos_mgr._subscriber_state) # deactivate same imsi again and ensure nothing bad happens logging.debug("removing qos") with self.assertLogs("pipelined.qos.common", level="DEBUG") as cm: qos_mgr.remove_subscriber_qos("1") logging.debug("removing qos: done") error_msg = "imsi 1 not found" self.assertTrue(error_msg in cm.output[-1]) # now only imsi2 should remain assert(len(qos_mgr._subscriber_state) == 1) assert(len(qos_mgr._subscriber_state['2'].rules) == 1) assert(len(qos_mgr._subscriber_state['2'].rules[0]) == 1) existing_qid = qos_mgr._subscriber_state['2'].rules[0][0][1] _, existing_qid, _, _ = get_data(existing_qid) # add second rule list and delete and verify if things work qos_info = QosInfo(100000, 200000) # additional 1 is for accomodating the existing imsi2RuleList1Qid start_idx, end_idx = 2, 2 + len(rule_list2) + 1 id_list = [i for i in range(start_idx, end_idx) if i != existing_qid] # add new subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list2): qos_mgr.add_subscriber_qos(imsi, '', 0, rule_num, d, qos_info) exp_id = id_list[i] qid_info = get_data_json(get_subscriber_data(exp_id, 0, 0)) k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) self.assertEqual(qos_mgr._redis_store[k], qid_info) self.assertEqual(qos_mgr._subscriber_state[imsi].rules[rule_num][0], (d, qid_info)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos( mock_meter_get_action_inst, mock_meter_cls, d, exp_id, qos_info, ) else: self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, d, exp_id, qos_info, ) # delete the subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list2): qos_mgr.remove_subscriber_qos(imsi, rule_num) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, id_list[i]) else: self.verifyTcRemoveQos(mock_traffic_cls, d, id_list[i]) self.assertTrue(len(qos_mgr._subscriber_state) == 1) self.assertTrue('2' in qos_mgr._subscriber_state) # delete everything qos_mgr.remove_subscriber_qos(imsi='2') # imsi2 from rule_list1 alone wasn't removed imsi, rule_num, d = rule_list1[3] k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) self.assertTrue(not qos_mgr._redis_store) self.assertTrue(not qos_mgr._subscriber_state) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, existing_qid) else: self.verifyTcRemoveQos(mock_traffic_cls, d, existing_qid)
def _testSanity( self, mock_meter_get_action_inst, mock_tc_get_action_inst, mock_meter_cls, mock_traffic_cls, ): """ This test verifies that qos configuration gets programmed correctly for addition and deletion of a single subscriber and a single rule, We additionally verify if the clean_restart wipes out everything """ # mock unclean state in qos prior_qids = {self.ul_intf: [(2, 0)], self.dl_intf: [(3, 0)]} if self.config["qos"]["impl"] == QosImplType.LINUX_TC: mock_traffic_cls.read_all_classes.side_effect = lambda intf: prior_qids[intf] qos_mgr = QosManager(MagicMock, asyncio.new_event_loop(), self.config) qos_mgr._redis_store = {} qos_mgr._setupInternal() imsi, ip_addr, rule_num, qos_info = "1234", '1.1.1.1', 0, QosInfo(100000, 100000) # add new subscriber qos queue qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.UPLINK, qos_info) qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.DOWNLINK, qos_info) k1 = get_key_json(get_subscriber_key(imsi, ip_addr, rule_num, FlowMatch.UPLINK)) k2 = get_key_json(get_subscriber_key(imsi, ip_addr, rule_num, FlowMatch.DOWNLINK)) ul_exp_id = qos_mgr.impl._start_idx dl_exp_id = qos_mgr.impl._start_idx + 1 ul_qid_info = get_data_json(get_subscriber_data(ul_exp_id, 0, 0)) dl_qid_info = get_data_json(get_subscriber_data(dl_exp_id, 0, 0)) self.assertTrue(qos_mgr._redis_store[k1] == ul_qid_info) self.assertTrue(qos_mgr._redis_store[k2] == dl_qid_info) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][0] == (0, ul_qid_info)) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][1] == (1, dl_qid_info)) # add the same subscriber and ensure that we didn't create another # qos config for the subscriber qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.UPLINK, qos_info) qos_mgr.add_subscriber_qos(imsi, ip_addr, 0, rule_num, FlowMatch.DOWNLINK, qos_info) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][0] == (0, ul_qid_info)) self.assertTrue(qos_mgr._subscriber_state[imsi].rules[rule_num][1] == (1, dl_qid_info)) # verify if traffic class was invoked properly if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos( mock_meter_get_action_inst, mock_meter_cls, FlowMatch.UPLINK, ul_exp_id, qos_info, ) self.verifyMeterAddQos( mock_meter_get_action_inst, mock_meter_cls, FlowMatch.DOWNLINK, dl_exp_id, qos_info, ) self.verifyMeterCleanRestart(mock_meter_cls) else: self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, FlowMatch.UPLINK, ul_exp_id, qos_info, ) self.verifyTcAddQos( mock_tc_get_action_inst, mock_traffic_cls, FlowMatch.DOWNLINK, dl_exp_id, qos_info, ) self.verifyTcCleanRestart(prior_qids, mock_traffic_cls) # remove the subscriber qos and verify things are cleaned up qos_mgr.remove_subscriber_qos(imsi, rule_num) self.assertTrue(len(qos_mgr._redis_store) == 0) self.assertTrue(imsi not in qos_mgr._subscriber_state) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, FlowMatch.UPLINK, ul_exp_id) self.verifyMeterRemoveQos(mock_meter_cls, FlowMatch.DOWNLINK, dl_exp_id) else: self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.UPLINK, ul_exp_id) self.verifyTcRemoveQos(mock_traffic_cls, FlowMatch.DOWNLINK, dl_exp_id)
class EnforcementController(PolicyMixin, MagmaController): """ EnforcementController The enforcement controller installs flows for policy enforcement and classification. Each flow installed matches on a rule and an IMSI and then classifies the packet with the rule. The flow also redirects and drops the packet as specified in the policy. NOTE: Enforcement currently relies on the fact that policies do not overlap. In this implementation, there is the idea of a 'default rule' which is the catch-all. This rule is treated specially and tagged with a specific priority. """ APP_NAME = "enforcement" APP_TYPE = ControllerType.LOGICAL ENFORCE_DROP_PRIORITY = flows.MINIMUM_PRIORITY + 1 # For allowing unlcassified flows for app/service type rules. UNCLASSIFIED_ALLOW_PRIORITY = ENFORCE_DROP_PRIORITY + 1 # Should not overlap with the drop flow as drop matches all packets. MIN_ENFORCE_PROGRAMMED_FLOW = UNCLASSIFIED_ALLOW_PRIORITY + 1 MAX_ENFORCE_PRIORITY = flows.MAXIMUM_PRIORITY # Effectively range is 3 -> 65535 ENFORCE_PRIORITY_RANGE = MAX_ENFORCE_PRIORITY - MIN_ENFORCE_PROGRAMMED_FLOW def __init__(self, *args, **kwargs): super(EnforcementController, self).__init__(*args, **kwargs) self._config = kwargs['config'] self.tbl_num = self._service_manager.get_table_num(self.APP_NAME) self.next_main_table = self._service_manager.get_next_table_num( self.APP_NAME) self._enforcement_stats_scratch = self._service_manager.get_table_num( EnforcementStatsController.APP_NAME) self.loop = kwargs['loop'] self._msg_hub = MessageHub(self.logger) self._redirect_scratch = \ self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0] self._bridge_ip_address = kwargs['config']['bridge_ip_address'] self._redirect_manager = None self._qos_mgr = None self._clean_restart = kwargs['config']['clean_restart'] def initialize_on_connect(self, datapath): """ Install the default flows on datapath connect event. Args: datapath: ryu datapath struct """ self._datapath = datapath self._qos_mgr = QosManager(datapath, self.loop, self._config) self._qos_mgr.setup() self._redirect_manager = RedirectionManager( self._bridge_ip_address, self.logger, self.tbl_num, self._enforcement_stats_scratch, self._redirect_scratch, self._session_rule_version_mapper) def cleanup_on_disconnect(self, datapath): """ Cleanup flows on datapath disconnect event. Args: datapath: ryu datapath struct """ if self._clean_restart: self.delete_all_flows(datapath) def delete_all_flows(self, datapath): flows.delete_all_flows_from_table(datapath, self.tbl_num) flows.delete_all_flows_from_table(datapath, self._redirect_scratch) def cleanup_state(self): pass @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER) def _handle_barrier(self, ev): self._msg_hub.handle_barrier(ev) @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER) def _handle_error(self, ev): self._msg_hub.handle_error(ev) @set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER) def meter_config_stats_reply_handler(self, ev): if not self._qos_mgr: return qos_impl = self._qos_mgr.qos_impl if qos_impl and isinstance(qos_impl, MeterManager): qos_impl.handle_meter_config_stats(ev.msg.body) @set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER) def meter_features_stats_reply_handler(self, ev): if not self._qos_mgr: return qos_impl = self._qos_mgr.qos_impl if qos_impl and isinstance(qos_impl, MeterManager): qos_impl.handle_meter_feature_stats(ev.msg.body) def _install_default_flows_if_not_installed( self, datapath, existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]: """ For each direction set the default flows to just forward to next app. The enforcement flows for each subscriber would be added when the IP session is created, by reaching out to the controller/PCRF. If default flows are already installed, do nothing. Args: datapath: ryu datapath struct Returns: The list of flows that remain after inserting default flows """ inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP, direction=Direction.IN) outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP, direction=Direction.OUT) inbound_msg = flows.get_add_resubmit_next_service_flow_msg( datapath, self.tbl_num, inbound_match, [], priority=flows.MINIMUM_PRIORITY, resubmit_table=self.next_main_table) outbound_msg = flows.get_add_resubmit_next_service_flow_msg( datapath, self.tbl_num, outbound_match, [], priority=flows.MINIMUM_PRIORITY, resubmit_table=self.next_main_table) msgs, remaining_flows = self._msg_hub \ .filter_msgs_if_not_in_flow_list([inbound_msg, outbound_msg], existing_flows) if msgs: chan = self._msg_hub.send(msgs, datapath) self._wait_for_responses(chan, len(msgs)) return remaining_flows def get_of_priority(self, precedence): """ Lower the precedence higher the importance of the flow in 3GPP. Higher the priority higher the importance of the flow in openflow. Convert precedence to priority: 1 - Flows with precedence > 65534 will have min priority which is the min priority for a programmed flow = (default drop + 1) 2 - Flows in the precedence range 0-65534 will have priority 65535 - Precedence :param precedence: :return: """ if precedence >= self.ENFORCE_PRIORITY_RANGE: self.logger.warning( "Flow precedence is higher than OF range using min priority %d", self.MIN_ENFORCE_PROGRAMMED_FLOW) return self.MIN_ENFORCE_PROGRAMMED_FLOW return self.MAX_ENFORCE_PRIORITY - precedence def _get_rule_match_flow_msgs(self, imsi, rule): """ Get flow msgs to get stats for a particular rule. Flows will match on IMSI, cookie (the rule num), in/out direction Args: imsi (string): subscriber to install rule for ip_addr (string): subscriber session ipv4 address rule (PolicyRule): policy rule proto """ rule_num = self._rule_mapper.get_or_create_rule_num(rule.id) priority = self.get_of_priority(rule.priority) flow_adds = [] for flow in rule.flow_list: try: flow_adds.extend( self._get_classify_rule_flow_msgs(imsi, flow, rule_num, priority, rule.qos, rule.hard_timeout, rule.id, rule.app_name, rule.app_service_type)) except FlowMatchError as err: # invalid match self.logger.error( "Failed to get flow msg '%s' for subscriber %s: %s", rule.id, imsi, err) raise err return flow_adds def _install_flow_for_rule(self, imsi, ip_addr, rule): """ Install a flow to get stats for a particular rule. Flows will match on IMSI, cookie (the rule num), in/out direction Args: imsi (string): subscriber to install rule for ip_addr (string): subscriber session ipv4 address rule (PolicyRule): policy rule proto """ if rule.redirect.support == rule.redirect.ENABLED: return self._install_redirect_flow(imsi, ip_addr, rule) if not rule.flow_list: self.logger.error( 'The flow list for imsi %s, rule.id - %s' 'is empty, this shoudn\'t happen', imsi, rule.id) return RuleModResult.FAILURE flow_adds = [] try: flow_adds = self._get_rule_match_flow_msgs(imsi, rule) except FlowMatchError: return RuleModResult.FAILURE chan = self._msg_hub.send(flow_adds, self._datapath) return self._wait_for_rule_responses(imsi, rule, chan) def _wait_for_rule_responses(self, imsi, rule, chan): def fail(err): self.logger.error( "Failed to install rule %s for subscriber %s: %s", rule.id, imsi, err) self._deactivate_flow_for_rule(imsi, rule.id) return RuleModResult.FAILURE for _ in range(len(rule.flow_list)): try: result = chan.get() except MsgChannel.Timeout: return fail("No response from OVS") if not result.ok(): return fail(result.exception()) return RuleModResult.SUCCESS def _get_classify_rule_flow_msgs(self, imsi, flow, rule_num, priority, qos, hard_timeout, rule_id, app_name, app_service_type): """ Install a flow from a rule. If the flow action is DENY, then the flow will drop the packet. Otherwise, the flow classifies the packet with its matched rule and injects the rule num into the packet's register. """ flow_match = flow_match_to_magma_match(flow.match) flow_match.imsi = encode_imsi(imsi) flow_match_actions, instructions = self._get_classify_rule_of_actions( flow, rule_num, imsi, qos, rule_id) msgs = [] if app_name: # We have to allow initial traffic to pass through, before it gets # classified by DPI, flow match set app_id to unclassified flow_match.app_id = UNCLASSIFIED_PROTO_ID # Set parser = self._datapath.ofproto_parser passthrough_actions = flow_match_actions + \ [parser.NXActionRegLoad2(dst=SCRATCH_REGS[1], value=IGNORE_STATS)] msgs.append( flows.get_add_resubmit_current_service_flow_msg( self._datapath, self.tbl_num, flow_match, passthrough_actions, hard_timeout=hard_timeout, priority=self.UNCLASSIFIED_ALLOW_PRIORITY, cookie=rule_num, resubmit_table=self._enforcement_stats_scratch)) flow_match.app_id = get_app_id( PolicyRule.AppName.Name(app_name), PolicyRule.AppServiceType.Name(app_service_type), ) if flow.action == flow.DENY: msgs.append( flows.get_add_drop_flow_msg(self._datapath, self.tbl_num, flow_match, flow_match_actions, hard_timeout=hard_timeout, priority=priority, cookie=rule_num)) else: msgs.append( flows.get_add_resubmit_current_service_flow_msg( self._datapath, self.tbl_num, flow_match, flow_match_actions, instructions=instructions, hard_timeout=hard_timeout, priority=priority, cookie=rule_num, resubmit_table=self._enforcement_stats_scratch)) return msgs def _install_redirect_flow(self, imsi, ip_addr, rule): rule_num = self._rule_mapper.get_or_create_rule_num(rule.id) priority = self.get_of_priority(rule.priority) redirect_request = RedirectionManager.RedirectRequest( imsi=imsi, ip_addr=ip_addr, rule=rule, rule_num=rule_num, priority=priority) try: self._redirect_manager.handle_redirection(self._datapath, self.loop, redirect_request) return RuleModResult.SUCCESS except RedirectException as err: self.logger.error( 'Redirect Exception for imsi %s, rule.id - %s : %s', imsi, rule.id, err) return RuleModResult.FAILURE def _get_classify_rule_of_actions(self, flow, rule_num, imsi, qos, rule_id): parser = self._datapath.ofproto_parser instructions = [] # encode the rule id in hex of_note = parser.NXActionNote(list(rule_id.encode())) actions = [of_note] if flow.action == flow.DENY: return actions, instructions ul_qos = qos.max_req_bw_ul dl_qos = qos.max_req_bw_dl qos_info = None d = flow.match.direction if ul_qos != 0 and d == flow.match.UPLINK: qos_info = QosInfo(gbr=None, mbr=ul_qos) elif dl_qos != 0 and d == flow.match.DOWNLINK: qos_info = QosInfo(gbr=None, mbr=dl_qos) if qos_info: action, inst = self._qos_mgr.add_subscriber_qos( imsi, rule_num, d, qos_info) self.logger.debug("adding Actions %s instruction %s ", action, inst) if action: actions.append(action) if inst: instructions.append(inst) version = self._session_rule_version_mapper.get_version(imsi, rule_id) actions.extend([ parser.NXActionRegLoad2(dst='reg2', value=rule_num), parser.NXActionRegLoad2(dst=RULE_VERSION_REG, value=version) ]) return actions, instructions def _get_default_flow_msg_for_subscriber(self, imsi): match = MagmaMatch(imsi=encode_imsi(imsi)) actions = [] return flows.get_add_drop_flow_msg(self._datapath, self.tbl_num, match, actions, priority=self.ENFORCE_DROP_PRIORITY) def _install_default_flow_for_subscriber(self, imsi): """ Add a low priority flow to drop a subscriber's traffic in the event that all rules have been deactivated. Args: imsi (string): subscriber id """ match = MagmaMatch(imsi=encode_imsi(imsi)) actions = [] # empty options == drop flows.add_drop_flow(self._datapath, self.tbl_num, match, actions, priority=self.ENFORCE_DROP_PRIORITY) def _deactivate_flow_for_rule(self, imsi, rule_id): """ Deactivate a specific rule using the flow cookie for a subscriber """ try: num = self._rule_mapper.get_rule_num(rule_id) except KeyError: self.logger.error('Could not find rule id %s', rule_id) return cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL) match = MagmaMatch(imsi=encode_imsi(imsi)) flows.delete_flow(self._datapath, self.tbl_num, match, cookie=cookie, cookie_mask=mask) self._redirect_manager.deactivate_flow_for_rule( self._datapath, imsi, num) self._qos_mgr.remove_subscriber_qos(imsi, num) def _deactivate_flows_for_subscriber(self, imsi): """ Deactivate all rules for a subscriber, ending any enforcement """ match = MagmaMatch(imsi=encode_imsi(imsi)) flows.delete_flow(self._datapath, self.tbl_num, match) self._redirect_manager.deactivate_flows_for_subscriber( self._datapath, imsi) self._qos_mgr.remove_subscriber_qos(imsi) def deactivate_rules(self, imsi, rule_ids): """ Deactivate flows for a subscriber. If only imsi is present, delete all rule flows for a subscriber (i.e. end its session). If rule_ids are present, delete the rule flows for that subscriber. Args: imsi (string): subscriber id rule_ids (list of strings): policy rule ids """ if not self.init_finished: self.logger.error('Pipelined is not initialized') return RuleModResult.FAILURE if self._datapath is None: self.logger.error('Datapath not initialized') return if not imsi: self.logger.error('No subscriber specified') return if not rule_ids: self._deactivate_flows_for_subscriber(imsi) else: for rule_id in rule_ids: self._deactivate_flow_for_rule(imsi, rule_id)
def _testUncleanRestart(self, mock_meter_cls, mock_traffic_cls): """This test verifies the case when we recover the state upon restart. We verify the base case of reconciling differences between system qos configs and qos_store configs(real code uses redis hash, we simply use dict). Additionally we test cases when system qos configs were wiped out and qos store state was wiped out and ensure that eventually the system and qos store state remains consistent""" loop = asyncio.new_event_loop() qos_mgr = QosManager(loop, self.config, fakeredis.FakeStrictRedis()) qos_mgr.init_impl(MagicMock) qos_mgr._redis_store = {} def populate_db(qid_list, rule_list): qos_mgr._redis_store.clear() for i, t in enumerate(rule_list): k = get_key_json(get_subscriber_key(*t)) v = get_data_json(get_subscriber_data(qid_list[i], 0, 0)) qos_mgr._redis_store[k] = v MockSt = namedtuple("MockSt", "meter_id") dummy_meter_ev_body = [MockSt(11), MockSt(13), MockSt(2), MockSt(15)] def tc_read(intf): if intf == self.ul_intf: return [(2, 65534), (15, 65534)] else: return [(13, 65534), (11, 65534)] # prepopulate qos_store old_qid_list = [2, 11, 13, 20] old_rule_list = [ ("1", '1.1.1.1', 0, 0), ("1", '1.1.1.2', 1, 0), ("1", '1.1.1.3', 2, 1), ("2", '1.1.1.4', 0, 0), ] populate_db(old_qid_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read qos_mgr._initialized = False qos_mgr._setupInternal() # verify that qos_handle 20 not found in system is purged from map qid_list = [] for _, v in qos_mgr._redis_store.items(): _, qid, _, _ = get_data(v) qid_list.append(qid) logging.debug("qid_list %s", qid_list) self.assertNotIn(20, qid_list) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_called_with(MagicMock, 15) else: mock_traffic_cls.delete_class.assert_called_with( self.ul_intf, 15, False) # add a new rule to the qos_mgr and check if it is assigned right id imsi, rule_num, d, qos_info = "3", 0, 0, QosInfo(100000, 100000) qos_mgr.impl.get_action_instruction = MagicMock qos_mgr.add_subscriber_qos(imsi, '', 0, rule_num, d, qos_info) k = get_key_json(get_subscriber_key(imsi, '', rule_num, d)) exp_id = 3 # since start_idx 2 is already used d3 = get_data_json(get_subscriber_data(exp_id, 0, 0)) self.assertEqual(qos_mgr._redis_store[k], d3) self.assertEqual(qos_mgr._subscriber_state[imsi].rules[rule_num][0], (d, d3)) # delete the restored rule - ensure that it gets cleaned properly purge_imsi = "1" purge_rule_num = 0 purge_qos_handle = 2 qos_mgr.remove_subscriber_qos(purge_imsi, purge_rule_num) self.assertTrue( purge_rule_num not in qos_mgr._subscriber_state[purge_imsi].rules) if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_called_with( MagicMock, purge_qos_handle) else: mock_traffic_cls.delete_class.assert_called_with( self.ul_intf, purge_qos_handle, False, ) # case 2 - check with empty qos configs, qos_map gets purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() populate_db(old_qid_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: MockSt = namedtuple("MockSt", "meter_id") qos_mgr.impl._fut = loop.create_future() qos_mgr.impl.handle_meter_config_stats([]) else: mock_traffic_cls.read_all_classes.side_effect = lambda _: [] qos_mgr._initialized = False qos_mgr._setupInternal() self.assertTrue(not qos_mgr._redis_store) if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_not_called() else: mock_traffic_cls.delete_class.assert_not_called() # case 3 - check with empty qos_map, all qos configs get purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() qos_mgr._redis_store.clear() # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.impl._fut = loop.create_future() qos_mgr.impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read qos_mgr._initialized = False qos_mgr._setupInternal() self.assertTrue(not qos_mgr._redis_store) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_any_call(MagicMock, 2) mock_meter_cls.del_meter.assert_any_call(MagicMock, 15) mock_meter_cls.del_meter.assert_any_call(MagicMock, 13) mock_meter_cls.del_meter.assert_any_call(MagicMock, 11) else: mock_traffic_cls.delete_class.assert_any_call( self.ul_intf, 2, False) mock_traffic_cls.delete_class.assert_any_call( self.ul_intf, 15, False) mock_traffic_cls.delete_class.assert_any_call( self.dl_intf, 13, False) mock_traffic_cls.delete_class.assert_any_call( self.dl_intf, 11, False)
def _testUncleanRestart(self, mock_meter_cls, mock_traffic_cls): """This test verifies the case when we recover the state upon restart. We verify the base case of reconciling differences between system qos configs and qos_store configs(real code uses redis hash, we simply use dict). Additionally we test cases when system qos configs were wiped out and qos store state was wiped out and ensure that eventually the system and qos store state remains consistent""" loop = asyncio.new_event_loop() qos_mgr = QosManager(MagicMock, loop, self.config) qos_mgr._qos_store = {} def populate_db(qid_list, rule_list): qos_mgr._qos_store.clear() for i, t in enumerate(rule_list): k = get_json(get_subscriber_key(*t)) qos_mgr._qos_store[k] = qid_list[i] MockSt = namedtuple("MockSt", "meter_id") dummy_meter_ev_body = [MockSt(11), MockSt(13), MockSt(2), MockSt(15)] def tc_read(intf): if intf == self.ul_intf: return [2, 15] else: return [13, 11] # prepopulate qos_store old_qid_list = [2, 11, 13, 20] old_rule_list = [("1", 0, 0), ("1", 1, 0), ("1", 2, 1), ("2", 0, 0)] populate_db(old_qid_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.qos_impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read qos_mgr._setupInternal() # run async loop once to ensure ready items are cleared loop._run_once() # verify that qos_handle 20 not found in system is purged from map self.assertFalse([v for _, v in qos_mgr._qos_store.items() if v == 20]) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_called_with(MagicMock, 15) else: mock_traffic_cls.delete_class.assert_called_with(self.ul_intf, 15) # add a new rule to the qos_mgr and check if it is assigned right id imsi, rule_num, d, qos_info = "3", 0, 0, QosInfo(100000, 100000) qos_mgr.qos_impl.get_action_instruction = MagicMock qos_mgr.add_subscriber_qos(imsi, rule_num, d, qos_info) k = get_json(get_subscriber_key(imsi, rule_num, d)) exp_id = 3 # since start_idx 2 is already used self.assertTrue(qos_mgr._qos_store[k] == exp_id) self.assertTrue(qos_mgr._subscriber_map[imsi][rule_num][0] == (exp_id, d)) # case 2 - check with empty qos configs, qos_map gets purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() populate_db(old_qid_list, old_rule_list) # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: MockSt = namedtuple("MockSt", "meter_id") qos_mgr.qos_impl._fut = loop.create_future() qos_mgr.qos_impl.handle_meter_config_stats([]) else: mock_traffic_cls.read_all_classes.side_effect = lambda _: [] qos_mgr._setupInternal() # run async loop once to ensure ready items are cleared loop._run_once() self.assertTrue(not qos_mgr._qos_store) if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_not_called() else: mock_traffic_cls.delete_class.assert_not_called() # case 3 - check with empty qos_map, all qos configs get purged mock_meter_cls.reset_mock() mock_traffic_cls.reset_mock() qos_mgr._qos_store.clear() # mock future state if self.config["qos"]["impl"] == QosImplType.OVS_METER: qos_mgr.qos_impl._fut = loop.create_future() qos_mgr.qos_impl.handle_meter_config_stats(dummy_meter_ev_body) else: mock_traffic_cls.read_all_classes.side_effect = tc_read qos_mgr._setupInternal() # run async loop once to ensure ready items are cleared loop._run_once() self.assertTrue(not qos_mgr._qos_store) # verify that unreferenced qos configs are purged from the system if self.config["qos"]["impl"] == QosImplType.OVS_METER: mock_meter_cls.del_meter.assert_any_call(MagicMock, 2) mock_meter_cls.del_meter.assert_any_call(MagicMock, 15) mock_meter_cls.del_meter.assert_any_call(MagicMock, 13) mock_meter_cls.del_meter.assert_any_call(MagicMock, 11) else: mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 2) mock_traffic_cls.delete_class.assert_any_call(self.ul_intf, 15) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 13) mock_traffic_cls.delete_class.assert_any_call(self.dl_intf, 11)
def _testMultipleSubscribers(self, mock_meter_get_action_inst, mock_tc_get_action_inst, mock_meter_cls, mock_traffic_cls): ''' This test verifies that qos configuration gets programmed correctly for addition and deletion of a multiple subscribers and rules. we additionally run through different scenarios involving - deactivating a rule - deactivating a subscriber - creating gaps in deletion and verifying that new qos configs get programmed properly with appropriate qids - additionally we also verify the idempotency of deletion calls and ensure that code doesn't behave incorrectly when same items are deleted multiple times - Finally we delete everything and verify if that behavior is right''' qos_mgr = QosManager(MagicMock, asyncio.new_event_loop(), self.config) qos_mgr._qos_store = {} qos_mgr._setupInternal() rule_list1 = [("imsi1", 0, 0), ("imsi1", 1, 0), ("imsi1", 2, 1), ("imsi2", 0, 0)] rule_list2 = [("imsi2", 1, 0), ("imsi3", 0, 0), ("imsi4", 0, 0), ("imsi5", 0, 0), ("imsi5", 1, 1), ("imsi6", 0, 0)] start_idx, end_idx = 2, 2 + len(rule_list1) id_list = list(range(start_idx, end_idx)) qos_info = QosInfo(100000, 100000) exp_id_dict = {} # add new subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list1): qos_mgr.add_subscriber_qos(imsi, rule_num, d, qos_info) exp_id = id_list[i] k = get_json(get_subscriber_key(imsi, rule_num, d)) exp_id_dict[k] = exp_id self.assertTrue(qos_mgr._qos_store[k] == exp_id) self.assertTrue(qos_mgr._subscriber_map[imsi][rule_num] == (exp_id, d)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos(mock_meter_get_action_inst, mock_meter_cls, d, exp_id, qos_info) else: self.verifyTcAddQos(mock_tc_get_action_inst, mock_traffic_cls, d, exp_id, qos_info) # deactivate one rule # verify for imsi1 if rule num 0 gets cleaned up imsi, rule_num, d = rule_list1[0] k = get_json(get_subscriber_key(imsi, rule_num, d)) exp_id = exp_id_dict[k] qos_mgr.remove_subscriber_qos(imsi, rule_num) self.assertTrue(k not in qos_mgr._qos_store) self.assertTrue(rule_num not in qos_mgr._subscriber_map[imsi]) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, exp_id) else: self.verifyTcRemoveQos(mock_traffic_cls, d, exp_id) # deactivate same rule and check if we log properly with self.assertLogs('pipelined.qos.common', level='ERROR') as cm: qos_mgr.remove_subscriber_qos(imsi, rule_num) error_msg = "unable to find rule_num 0 for imsi imsi1" self.assertTrue(cm.output[0].endswith(error_msg)) # deactivate imsi # verify for imsi1 if rule num 1 and 2 gets cleaned up qos_mgr.remove_subscriber_qos(imsi) remove_qos_args = [] for imsi, rule_num, d in rule_list1[1:]: if imsi != "imsi1": continue k = get_json(get_subscriber_key(imsi, rule_num, d)) exp_id = exp_id_dict[k] self.assertTrue(k not in qos_mgr._qos_store) remove_qos_args.append((d, exp_id)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQosBulk(mock_meter_cls, remove_qos_args) else: self.verifyTcRemoveQosBulk(mock_traffic_cls, remove_qos_args) self.assertTrue("imsi1" not in qos_mgr._subscriber_map) # deactivate same imsi again and ensure nothing bad happens with self.assertLogs('pipelined.qos.common', level='DEBUG') as cm: qos_mgr.remove_subscriber_qos("imsi1") error_msg = "unable to find imsi imsi1" self.assertTrue(error_msg in cm.output[-1]) # now only imsi2 should remain self.assertTrue(len(qos_mgr._qos_store) == 1) existing_qid = list(qos_mgr._qos_store.values())[0] # add second rule list and delete and verify if things work qos_info = QosInfo(100000, 200000) # additional 1 is for accomodating the existing imsi2RuleList1Qid start_idx, end_idx = 2, 2 + len(rule_list2) + 1 id_list = [i for i in range(start_idx, end_idx) if i != existing_qid] # add new subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list2): qos_mgr.add_subscriber_qos(imsi, rule_num, d, qos_info) exp_id = id_list[i] k = get_json(get_subscriber_key(imsi, rule_num, d)) self.assertTrue(qos_mgr._qos_store[k] == exp_id) self.assertTrue(qos_mgr._subscriber_map[imsi][rule_num] == (exp_id, d)) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterAddQos(mock_meter_get_action_inst, mock_meter_cls, d, exp_id, qos_info) else: self.verifyTcAddQos(mock_tc_get_action_inst, mock_traffic_cls, d, exp_id, qos_info) # delete the subscriber qos queues for i, (imsi, rule_num, d) in enumerate(rule_list2): qos_mgr.remove_subscriber_qos(imsi, rule_num) k = get_json(get_subscriber_key(imsi, rule_num, d)) self.assertTrue(k not in qos_mgr._qos_store) self.assertTrue(rule_num not in qos_mgr._subscriber_map[imsi]) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, id_list[i]) else: self.verifyTcRemoveQos(mock_traffic_cls, d, id_list[i]) # delete everything qos_mgr.remove_subscriber_qos() # imsi2 from rule_list1 alone wasn't removed imsi, rule_num, d = rule_list1[3] k = get_json(get_subscriber_key(imsi, rule_num, d)) self.assertTrue(not qos_mgr._qos_store) self.assertTrue(not qos_mgr._subscriber_map) if self.config["qos"]["impl"] == QosImplType.OVS_METER: self.verifyMeterRemoveQos(mock_meter_cls, d, existing_qid) else: self.verifyTcRemoveQos(mock_traffic_cls, d, existing_qid)