Example #1
0
    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self._config = kwargs['config']
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            EnforcementStatsController.APP_NAME)
        self._enforcement_stats_tbl = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']

        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._redirect_manager = None
        self._qos_mgr = None
        self._clean_restart = kwargs['config']['clean_restart']
        self._redirect_manager = RedirectionManager(
            self._bridge_ip_address,
            self.logger,
            self.tbl_num,
            self._enforcement_stats_tbl,
            self.next_main_table,
            self._redirect_scratch,
            self._session_rule_version_mapper)
Example #2
0
    def test_ue_flows_del(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1

        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_flows(ue_ip1, tun_id1, dest_server1, rule1,
                                                          ['abc.com'], 'IMSI01', b'1')

        ue_ip2 = '10.10.10.20'
        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg2 = cls.he_controller.get_subscriber_flows(ue_ip2, tun_id2, dest_server2, rule2,
                                                          ['abc.com'], 'IMSI01', b'1')
        flow_msg.extend(flow_msg2)
        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg), HeTableTest.he_controller.logger)

        cls.he_controller.remove_subscriber_flow(ue_ip2, rule2)

        snapshot_verifier = SnapshotVerifier(self,
                                             self.BRIDGE,
                                             self.service_manager,
                                             max_sleep_time=20,
                                             datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass
Example #3
0
    def __init__(self, *args, **kwargs):
        super(InOutController, self).__init__(*args, **kwargs)
        self.config = self._get_config(kwargs['config'])
        self._li_port = None
        # TODO Alex do we want this to be cofigurable from swagger?
        if self.config.mtr_ip:
            self._mtr_service_enabled = True
        else:
            self._mtr_service_enabled = False

        if (self._service_manager.is_app_enabled(LIMirrorController.APP_NAME)
                and self.config.li_port_name):
            self._li_port = BridgeTools.get_ofport(self.config.li_port_name)
            self._li_table = self._service_manager.get_table_num(
                LIMirrorController.APP_NAME)
        self._ingress_tbl_num = self._service_manager.get_table_num(INGRESS)
        self._midle_tbl_num = \
            self._service_manager.get_table_num(PHYSICAL_TO_LOGICAL)
        self._egress_tbl_num = self._service_manager.get_table_num(EGRESS)
        # following fields are only used in Non Nat config
        self._tbls = [
            self._ingress_tbl_num, self._midle_tbl_num, self._egress_tbl_num
        ]
        self._gw_mac_monitor = None
        self._current_upstream_mac_map = {}  # maps vlan to upstream gw mac
        self._clean_restart = kwargs['config']['clean_restart']
        self._msg_hub = MessageHub(self.logger)
        self._datapath = None
Example #4
0
 def __init__(self, *args, **kwargs):
     super(EnforcementStatsController, self).__init__(*args, **kwargs)
     # No need to report usage if relay mode is not enabled.
     self._relay_enabled = kwargs['mconfig'].relay_enabled
     if not self._relay_enabled:
         self.logger.info('Relay mode is not enabled. '
                          'enforcement_stats will not report usage.')
         return
     self.tbl_num = \
         self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
     self.next_table = \
         self._service_manager.get_next_table_num(self.APP_NAME)
     self.dpset = kwargs['dpset']
     self.loop = kwargs['loop']
     # Spawn a thread to poll for flow stats
     poll_interval = kwargs['config']['enforcement']['poll_interval']
     self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
     # Create a rpc channel to sessiond
     self.sessiond = kwargs['rpc_stubs']['sessiond']
     self._msg_hub = MessageHub(self.logger)
     self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
     self.total_usage = {}  # Store total usage
     # Store last usage excluding deleted flows for calculating deltas
     self.last_usage_for_delta = {}
     self.failed_usage = {}  # Store failed usage to retry rpc to sessiond
Example #5
0
 def __init__(self, *args, **kwargs):
     super(EnforcementStatsController, self).__init__(*args, **kwargs)
     self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
     self.next_table = \
         self._service_manager.get_next_table_num(self.APP_NAME)
     self.dpset = kwargs['dpset']
     self.loop = kwargs['loop']
     # Spawn a thread to poll for flow stats
     poll_interval = kwargs['config']['enforcement']['poll_interval']
     # Create a rpc channel to sessiond
     self.sessiond = kwargs['rpc_stubs']['sessiond']
     self._msg_hub = MessageHub(self.logger)
     self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
     self.total_usage = {}  # Store total usage
     self._clean_restart = kwargs['config']['clean_restart']
     self._redis_enabled = kwargs['config'].get('redis_enabled', False)
     self._unmatched_bytes = 0  # Store bytes matched by default rule if any
     self._default_drop_flow_name = \
         kwargs['config']['enforcement']['default_drop_flow_name']
     self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
     self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
     self._last_poll_time = datetime.now()
     self._last_report_timestamp = datetime.now()
     self._bridge_name = kwargs['config']['bridge_name']
     self._periodic_stats_reporting = kwargs['config']['enforcement'].get(
         'periodic_stats_reporting', True)
     if self._print_grpc_payload is None:
         self._print_grpc_payload = \
             kwargs['config'].get('magma_print_grpc_payload', False)
     self._restart_info_store = kwargs['restart_info_store']
     self._ovs_restarted = self._was_ovs_restarted()
     self.ng_config = self._get_ng_config(kwargs['config'],
                                          kwargs['rpc_stubs'])
     self._prefix_mapper = kwargs['interface_to_prefix_mapper']
Example #6
0
    def test_ue_flows_add_direction_in(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)

        ue_ip = '1.1.1.1'
        tun_id = 1
        dest_server = '2.2.2.2'
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            "rule1", Direction.IN, ue_ip, tun_id, dest_server, 123,
            ['abc.com'], 'IMSI01', b'1')
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip), 0)
        chan = self._msg_hub.send(
            flow_msg,
            HeTableTest.he_controller._datapath,
        )
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass
Example #7
0
File: gy.py Project: go-magma/magma
 def __init__(self, *args, **kwargs):
     super(GYController, self).__init__(*args, **kwargs)
     self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
     self.next_main_table = self._service_manager.get_next_table_num(
         self.APP_NAME)
     self.loop = kwargs['loop']
     self._msg_hub = MessageHub(self.logger)
     self._internal_ip_allocator = kwargs['internal_ip_allocator']
     tbls = \
         self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
     self._redirect_scratch = tbls[0]
     self._mac_rewr = \
         self._service_manager.INTERNAL_MAC_IP_REWRITE_TBL_NUM
     self._bridge_ip_address = kwargs['config']['bridge_ip_address']
     self._clean_restart = kwargs['config']['clean_restart']
     self._redirect_manager = \
         RedirectionManager(
             self._bridge_ip_address,
             self.logger,
             self.tbl_num,
             self._service_manager.get_table_num(EGRESS),
             self._redirect_scratch,
             self._session_rule_version_mapper
         ).set_cwf_args(
             internal_ip_allocator=kwargs['internal_ip_allocator'],
             arp=kwargs['app_futures']['arpd'],
             mac_rewrite=self._mac_rewr,
             bridge_name=kwargs['config']['bridge_name'],
             egress_table=self._service_manager.get_table_num(EGRESS)
         )
Example #8
0
 def __init__(self, *args, **kwargs):
     super(EnforcementStatsController, self).__init__(*args, **kwargs)
     self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
     self.next_table = \
         self._service_manager.get_next_table_num(self.APP_NAME)
     self.dpset = kwargs['dpset']
     self.loop = kwargs['loop']
     # Spawn a thread to poll for flow stats
     poll_interval = kwargs['config']['enforcement']['poll_interval']
     # Create a rpc channel to sessiond
     self.sessiond = kwargs['rpc_stubs']['sessiond']
     self._msg_hub = MessageHub(self.logger)
     self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
     self.total_usage = {}  # Store total usage
     self._clean_restart = kwargs['config']['clean_restart']
     self._redis_enabled = kwargs['config'].get('redis_enabled', False)
     # Store last usage excluding deleted flows for calculating deltas
     if self._redis_enabled:
         self.last_usage_for_delta = UsageDeltaDict()
     else:
         self.last_usage_for_delta = {}
     self.failed_usage = {}  # Store failed usage to retry rpc to sessiond
     self._unmatched_bytes = 0  # Store bytes matched by default rule if any
     self._default_drop_flow_name = \
         kwargs['config']['enforcement']['default_drop_flow_name']
     self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
     self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
     if self._print_grpc_payload is None:
         self._print_grpc_payload = \
             kwargs['config'].get('magma_print_grpc_payload', False)
Example #9
0
    def test_ue_flows_del2(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1
        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            'rule1', Direction.OUT, ue_ip1, tun_id1, dest_server1, rule1,
            ['abc.com'], 'IMSI01', b'1')

        ue_ip2 = '10.10.10.20'
        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule2', Direction.OUT,
                                                      ue_ip2, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc.com'], 'IMSI01',
                                                      b'1'))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 1)
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip2), 1)

        ue_ip2 = '10.10.10.20'
        dest_server2 = '20.20.40.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule2', Direction.OUT,
                                                      ue_ip2, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc.com'], 'IMSI01',
                                                      None))

        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2))

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass
        # verify multiple remove works.
        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip2), 0)
Example #10
0
 def __init__(self, *args, **kwargs):
     super(EgressController, self).__init__(*args, **kwargs)
     self.config = self._get_config(kwargs['config'])
     self.logger.info("egress config: %s", self.config)
     self._egress_tbl_num = self._service_manager.get_table_num(EGRESS)
     # following fields are only used in Non Nat config
     self._clean_restart = kwargs['config']['clean_restart']
     self._msg_hub = MessageHub(self.logger)
     self._datapath = None
     self.tbl_num = self._egress_tbl_num
     self._gw_mac_monitor = None
     self._current_upstream_mac_map = {}  # maps vlan to upstream gw mac
     self._gw_mac_monitor_on = False
Example #11
0
    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self.loop = kwargs['loop']
        self._qos_map = QosQueueMap(kwargs['config']['nat_iface'],
                                    kwargs['config']['enodeb_iface'],
                                    kwargs['config']['enable_queue_pgm'])
        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']

        self._redirect_manager = None
Example #12
0
    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self._datapath = None
        self._rule_mapper = kwargs['rule_id_mapper']
        self.loop = kwargs['loop']
        self._policy_dict = PolicyRuleDict()
        self._qos_map = QosQueueMap(kwargs['config']['nat_iface'],
                                    kwargs['config']['enodeb_iface'],
                                    kwargs['config']['enable_queue_pgm'])
        self._msg_hub = MessageHub(self.logger)

        self._redirect_manager = RedirectionManager(
            kwargs['config']['bridge_ip_address'], self.logger, self.tbl_num,
            self.next_table)
Example #13
0
    def __init__(self, *args, **kwargs):
        super(MiddleController, self).__init__(*args, **kwargs)
        self.config = self._get_config(kwargs['config'])
        self.logger.info("middle config: %s", self.config)

        # TODO Alex do we want this to be cofigurable from swagger?
        if self.config.mtr_ip:
            self._mtr_service_enabled = True
        else:
            self._mtr_service_enabled = False

        self._midle_tbl_num = self._service_manager.get_table_num(
            PHYSICAL_TO_LOGICAL)
        self._egress_tbl_num = self._service_manager.get_table_num(EGRESS)
        # following fields are only used in Non Nat config
        self._clean_restart = kwargs['config']['clean_restart']
        self._msg_hub = MessageHub(self.logger)
        self._datapath = None
        self.tbl_num = self._midle_tbl_num
Example #14
0
 def __init__(self, *args, **kwargs):
     super(EnforcementController, self).__init__(*args, **kwargs)
     self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
     self.next_main_table = self._service_manager.get_next_table_num(
         self.APP_NAME)
     self.loop = kwargs['loop']
     self._qos_map = QosQueueMap(kwargs['config']['nat_iface'],
                                 kwargs['config']['enodeb_iface'],
                                 kwargs['config']['enable_queue_pgm'])
     self._msg_hub = MessageHub(self.logger)
     self._redirect_scratch = \
         self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
     self._bridge_ip_address = kwargs['config']['bridge_ip_address']
     self._redirect_manager = None
     self._clean_restart = kwargs['config']['clean_restart']
     self._relay_enabled = kwargs['mconfig'].relay_enabled
     if not self._relay_enabled:
         self.logger.info('Relay mode is not enabled, enforcement will not'
                          ' wait for sessiond to push flows.')
 def __init__(self, *args, **kwargs):
     super(EnforcementStatsController, self).__init__(*args, **kwargs)
     self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
     self.next_table = \
         self._service_manager.get_next_table_num(self.APP_NAME)
     self.dpset = kwargs['dpset']
     self.loop = kwargs['loop']
     # Spawn a thread to poll for flow stats
     poll_interval = kwargs['config']['enforcement']['poll_interval']
     # Create a rpc channel to sessiond
     self.sessiond = kwargs['rpc_stubs']['sessiond']
     self._msg_hub = MessageHub(self.logger)
     self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
     self.total_usage = {}  # Store total usage
     # Store last usage excluding deleted flows for calculating deltas
     self.last_usage_for_delta = {}
     self.failed_usage = {}  # Store failed usage to retry rpc to sessiond
     self._unmatched_bytes = 0  # Store bytes matched by default rule if any
     self._clean_restart = kwargs['config']['clean_restart']
     self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
Example #16
0
    def __init__(self, *args, **kwargs):
        super(IngressController, self).__init__(*args, **kwargs)
        self.config = self._get_config(kwargs['config'])
        self.logger.info("ingress config: %s", self.config)

        self._li_port = None
        # TODO Alex do we want this to be cofigurable from swagger?
        if self.config.mtr_ip:
            self._mtr_service_enabled = True
        else:
            self._mtr_service_enabled = False

        if (self._service_manager.is_app_enabled(LIMirrorController.APP_NAME)
                and self.config.li_port_name):
            self._li_port = BridgeTools.get_ofport(self.config.li_port_name)
            self._li_table = self._service_manager.get_table_num(
                LIMirrorController.APP_NAME, )
        self._ingress_tbl_num = self._service_manager.get_table_num(INGRESS)
        # following fields are only used in Non Nat config
        self._clean_restart = kwargs['config']['clean_restart']
        self._msg_hub = MessageHub(self.logger)
        self._datapath = None
        self.tbl_num = self._ingress_tbl_num
Example #17
0
class EnforcementController(PolicyMixin, MagmaController):
    """
    EnforcementController

    The enforcement controller installs flows for policy enforcement and
    classification. Each flow installed matches on a rule and an IMSI and then
    classifies the packet with the rule. The flow also redirects and drops
    the packet as specified in the policy.

    NOTE: Enforcement currently relies on the fact that policies do not
    overlap. In this implementation, there is the idea of a 'default rule'
    which is the catch-all. This rule is treated specially and tagged with a
    specific priority.
    """

    APP_NAME = "enforcement"
    APP_TYPE = ControllerType.LOGICAL
    ENFORCE_DROP_PRIORITY = flows.MINIMUM_PRIORITY + 1
    # Should not overlap with the drop flow as drop matches all packets.
    MIN_ENFORCE_PROGRAMMED_FLOW = ENFORCE_DROP_PRIORITY + 1
    MAX_ENFORCE_PRIORITY = flows.MAXIMUM_PRIORITY
    # Effectively range is 2 -> 65535
    ENFORCE_PRIORITY_RANGE = MAX_ENFORCE_PRIORITY - MIN_ENFORCE_PROGRAMMED_FLOW

    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self._enforcement_stats_scratch = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']
        self._relay_enabled = kwargs['mconfig'].relay_enabled
        self._qos_map = QosQueueMap(kwargs['config']['nat_iface'],
                                    kwargs['config']['enodeb_iface'],
                                    kwargs['config']['enable_queue_pgm'])
        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._redirect_manager = None
        self._clean_restart = kwargs['config']['clean_restart']
        self._relay_enabled = kwargs['mconfig'].relay_enabled
        if not self._relay_enabled:
            self.logger.info('Relay mode is not enabled, enforcement will not'
                             ' wait for sessiond to push flows.')

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath

        if not self._relay_enabled:
            self._install_default_flows_if_not_installed(datapath, [])

        self._redirect_manager = RedirectionManager(
            self._bridge_ip_address, self.logger, self.tbl_num,
            self._enforcement_stats_scratch, self._redirect_scratch,
            self._session_rule_version_mapper)

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    def _install_default_flows_if_not_installed(
            self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.
        If default flows are already installed, do nothing.

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of flows that remain after inserting default flows
        """
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)

        inbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath,
            self.tbl_num,
            inbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        outbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath,
            self.tbl_num,
            outbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        msgs, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list([inbound_msg, outbound_msg],
                                             existing_flows)
        if msgs:
            chan = self._msg_hub.send(msgs, datapath)
            self._wait_for_responses(chan, len(msgs))

        return remaining_flows

    def get_of_priority(self, precedence):
        """
        Lower the precedence higher the importance of the flow in 3GPP.
        Higher the priority higher the importance of the flow in openflow.
        Convert precedence to priority:
        1 - Flows with precedence > 65534 will have min priority which is the
        min priority for a programmed flow = (default drop + 1)
        2 - Flows in the precedence range 0-65534 will have priority 65535 -
        Precedence
        :param precedence:
        :return:
        """
        if precedence >= self.ENFORCE_PRIORITY_RANGE:
            self.logger.warning(
                "Flow precedence is higher than OF range using min priority %d",
                self.MIN_ENFORCE_PROGRAMMED_FLOW)
            return self.MIN_ENFORCE_PROGRAMMED_FLOW
        return self.MAX_ENFORCE_PRIORITY - precedence

    def _get_rule_match_flow_msgs(self, imsi, rule):
        """
        Get a flow msg to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = self.get_of_priority(rule.priority)
        ul_qos = rule.qos.max_req_bw_ul
        dl_qos = rule.qos.max_req_bw_dl

        flow_adds = []
        for flow in rule.flow_list:
            try:
                flow_adds.append(
                    self._get_classify_rule_flow_msg(imsi, flow, rule_num,
                                                     priority, ul_qos, dl_qos,
                                                     rule.hard_timeout,
                                                     rule.id))

            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to get flow msg '%s' for subscriber %s: %s",
                    rule.id, imsi, err)
                raise err
        return flow_adds

    def _install_flow_for_rule(self, imsi, ip_addr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """

        if rule.redirect.support == rule.redirect.ENABLED:
            return self._install_redirect_flow(imsi, ip_addr, rule)

        if not rule.flow_list:
            self.logger.error(
                'The flow list for imsi %s, rule.id - %s'
                'is empty, this shoudn\'t happen', imsi, rule.id)
            return RuleModResult.FAILURE

        flow_adds = []
        try:
            flow_adds = self._get_rule_match_flow_msgs(imsi, rule)
        except FlowMatchError:
            return RuleModResult.FAILURE

        chan = self._msg_hub.send(flow_adds, self._datapath)

        return self._wait_for_rule_responses(imsi, rule, chan)

    def _wait_for_rule_responses(self, imsi, rule, chan):
        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s", rule.id,
                imsi, err)
            self._deactivate_flow_for_rule(imsi, rule.id)
            return RuleModResult.FAILURE

        for _ in range(len(rule.flow_list)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())
        return RuleModResult.SUCCESS

    def _get_classify_rule_flow_msg(self, imsi, flow, rule_num, priority,
                                    ul_qos, dl_qos, hard_timeout, rule_id):
        """
        Install a flow from a rule. If the flow action is DENY, then the flow
        will drop the packet. Otherwise, the flow classifies the packet with
        its matched rule and injects the rule num into the packet's register.
        """
        flow_match = flow_match_to_magma_match(flow.match)
        flow_match.imsi = encode_imsi(imsi)
        flow_match_actions = self._get_classify_rule_of_actions(
            flow, rule_num, imsi, ul_qos, dl_qos, rule_id)
        if flow.action == flow.DENY:
            return flows.get_add_drop_flow_msg(self._datapath,
                                               self.tbl_num,
                                               flow_match,
                                               flow_match_actions,
                                               hard_timeout=hard_timeout,
                                               priority=priority,
                                               cookie=rule_num)

        if self._enforcement_stats_scratch:
            return flows.get_add_resubmit_current_service_flow_msg(
                self._datapath,
                self.tbl_num,
                flow_match,
                flow_match_actions,
                hard_timeout=hard_timeout,
                priority=priority,
                cookie=rule_num,
                resubmit_table=self._enforcement_stats_scratch)

        # If enforcement stats has not claimed a scratch table, resubmit
        # directly to the next app.
        return flows.get_add_resubmit_next_service_flow_msg(
            self._datapath,
            self.tbl_num,
            flow_match,
            flow_match_actions,
            hard_timeout=hard_timeout,
            priority=priority,
            cookie=rule_num,
            resubmit_table=self.next_main_table)

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = self.get_of_priority(rule.priority)
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr,
            rule=rule,
            rule_num=rule_num,
            priority=priority)
        try:
            self._redirect_manager.handle_redirection(self._datapath,
                                                      self.loop,
                                                      redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s', imsi,
                rule.id, err)
            return RuleModResult.FAILURE

    def _get_classify_rule_of_actions(self, flow, rule_num, imsi, ul_qos,
                                      dl_qos, rule_id):
        parser = self._datapath.ofproto_parser
        # encode the rule id in hex
        of_note = parser.NXActionNote(list(rule_id.encode()))
        actions = [of_note]
        if flow.action == flow.DENY:
            return actions

        # QoS Rate-Limiting is currently supported for uplink traffic
        qid = 0
        if ul_qos != 0 and flow.match.direction == flow.match.UPLINK:
            qid = self._qos_map.map_flow_to_queue(imsi, rule_num, ul_qos, True)
        elif dl_qos != 0 and flow.match.direction == flow.match.DOWNLINK:
            qid = self._qos_map.map_flow_to_queue(imsi, rule_num, dl_qos,
                                                  False)

        if qid != 0:
            actions.append(parser.OFPActionSetField(pkt_mark=qid))

        version = self._session_rule_version_mapper.get_version(imsi, rule_id)
        actions.extend([
            parser.NXActionRegLoad2(dst='reg2', value=rule_num),
            parser.NXActionRegLoad2(dst=RULE_VERSION_REG, value=version)
        ])

        return actions

    def _get_default_flow_msg_for_subscriber(self, imsi):
        match = MagmaMatch(imsi=encode_imsi(imsi))
        actions = []
        return flows.get_add_drop_flow_msg(self._datapath,
                                           self.tbl_num,
                                           match,
                                           actions,
                                           priority=self.ENFORCE_DROP_PRIORITY)

    def _install_default_flow_for_subscriber(self, imsi):
        """
        Add a low priority flow to drop a subscriber's traffic in the event
        that all rules have been deactivated.

        Args:
            imsi (string): subscriber id
        """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        actions = []  # empty options == drop
        flows.add_drop_flow(self._datapath,
                            self.tbl_num,
                            match,
                            actions,
                            priority=self.ENFORCE_DROP_PRIORITY)

    def _deactivate_flow_for_rule(self, imsi, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(
            self._datapath, imsi, num)
        self._qos_map.del_queue_for_flow(imsi, num)

    def _deactivate_flows_for_subscriber(self, imsi):
        """ Deactivate all rules for a subscriber, ending any enforcement """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match)
        self._redirect_manager.deactivate_flows_for_subscriber(
            self._datapath, imsi)
        self._qos_map.del_subscriber_queues(imsi)

    def deactivate_rules(self, imsi, rule_ids):
        """
        Deactivate flows for a subscriber. If only imsi is present, delete all
        rule flows for a subscriber (i.e. end its session). If rule_ids are
        present, delete the rule flows for that subscriber.

        Args:
            imsi (string): subscriber id
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, rule_id)
Example #18
0
class EnforcementStatsController(PolicyMixin, RestartMixin, MagmaController):
    """
    This openflow controller installs flows for aggregating policy usage
    statistics, which are sent to sessiond for tracking.

    It periodically polls OVS for flow stats on the its table and reports the
    usage records to session manager via RPC. Flows are deleted when their
    version (reg4 match) is different from the current version of the rule for
    the subscriber maintained by the rule version mapper.
    """

    APP_NAME = 'enforcement_stats'
    APP_TYPE = ControllerType.LOGICAL
    SESSIOND_RPC_TIMEOUT = 10
    # 0xffffffffffffffff is reserved in openflow
    DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe
    INIT_SLEEP_TIME = 3
    MAX_DELAY_INTERVALS = 20

    ng_config = namedtuple(
        'ng_config',
        ['ng_service_enabled', 'sessiond_setinterface'],
    )
    _CONTEXTS = {
        'dpset': dpset.DPSet,
    }

    def __init__(self, *args, **kwargs):
        super(EnforcementStatsController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_table = \
            self._service_manager.get_next_table_num(self.APP_NAME)
        self.dpset = kwargs['dpset']
        self.loop = kwargs['loop']
        # Spawn a thread to poll for flow stats
        poll_interval = kwargs['config']['enforcement']['poll_interval']
        # Create a rpc channel to sessiond
        self.sessiond = kwargs['rpc_stubs']['sessiond']
        self._msg_hub = MessageHub(self.logger)
        self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
        self.total_usage = {}  # Store total usage
        self._clean_restart = kwargs['config']['clean_restart']
        self._redis_enabled = kwargs['config'].get('redis_enabled', False)
        self._unmatched_bytes = 0  # Store bytes matched by default rule if any
        self._default_drop_flow_name = \
            kwargs['config']['enforcement']['default_drop_flow_name']
        self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
        self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
        self._last_poll_time = datetime.now()
        self._last_report_timestamp = datetime.now()
        self._bridge_name = kwargs['config']['bridge_name']
        self._periodic_stats_reporting = kwargs['config']['enforcement'].get(
            'periodic_stats_reporting', True)
        if self._print_grpc_payload is None:
            self._print_grpc_payload = \
                kwargs['config'].get('magma_print_grpc_payload', False)
        self._restart_info_store = kwargs['restart_info_store']
        self._ovs_restarted = self._was_ovs_restarted()
        self.ng_config = self._get_ng_config(kwargs['config'],
                                             kwargs['rpc_stubs'])
        self._prefix_mapper = kwargs['interface_to_prefix_mapper']

    def _get_ng_config(self, config_dict, rpc_stub_dict):
        ng_service_enabled = config_dict.get('enable5g_features', None)

        sessiond_setinterface = rpc_stub_dict.get('sessiond_setinterface')

        return self.ng_config(ng_service_enabled=ng_service_enabled,
                              sessiond_setinterface=sessiond_setinterface)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)

    def cleanup_state(self):
        """
        When we remove/reinsert flows we need to remove old usage maps as new
        flows will have reset stat counters
        """
        self.unhandled_stats_msgs = []
        self.total_usage = {}
        self._unmatched_bytes = 0

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath

    def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
        """
        Gets the default flow msg that drops traffic

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of default msgs to add
        """
        match = MagmaMatch()
        msg = flows.get_add_drop_flow_msg(
            datapath,
            self.tbl_num,
            match,
            priority=flows.MINIMUM_PRIORITY,
            cookie=self.DEFAULT_FLOW_COOKIE,
        )

        return {self.tbl_num: [msg]}

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def _install_flow_for_rule(
        self,
        imsi,
        msisdn: bytes,
        uplink_tunnel: int,
        ip_addr,
        apn_ambr,
        rule,
        version,
        shard_id,
        local_f_teid_ng: int,
    ):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            msisdn (bytes): subscriber MSISDN
            uplink_tunnel (int): tunnel ID of the subscriber.
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s",
                rule.id,
                imsi,
                err,
            )
            return RuleModResult.FAILURE

        msgs = self._get_rule_match_flow_msgs(
            imsi,
            msisdn,
            uplink_tunnel,
            ip_addr,
            apn_ambr,
            rule,
            version,
            shard_id,
            local_f_teid_ng,
        )
        try:
            chan = self._msg_hub.send(msgs, self._datapath)
        except MagmaDPDisconnectedError:
            self.logger.error(
                "Datapath disconnected, failed to install rule %s"
                "for imsi %s",
                rule,
                imsi,
            )
            return RuleModResult.FAILURE
        for _ in range(len(msgs)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())

        return RuleModResult.SUCCESS

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    # pylint: disable=protected-access,unused-argument
    def _get_rule_match_flow_msgs(self, imsi, _, __, ip_addr, ambr, rule,
                                  version, shard_id, local_f_teid_ng):
        """
        Returns flow add messages used for rule matching.
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        self.logger.debug(
            'Installing flow for %s with rule num %s (version %s)',
            imsi,
            rule_num,
            version,
        )
        inbound_rule_match = _generate_rule_match(
            imsi,
            ip_addr,
            rule_num,
            version,
            Direction.IN,
            local_f_teid_ng,
        )
        outbound_rule_match = _generate_rule_match(
            imsi,
            ip_addr,
            rule_num,
            version,
            Direction.OUT,
            local_f_teid_ng,
        )

        flow_actions = [flow.action for flow in rule.flow_list]
        msgs = []
        if FlowDescription.PERMIT in flow_actions:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
            outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
            ])
        else:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
            outbound_rule_match._match_kwargs[
                SCRATCH_REGS[1]] = DROP_FLOW_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
            ])

        if rule.app_name:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
            outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=shard_id,
                ),
            ])
        return msgs

    def _get_default_flow_msgs_for_subscriber(self, imsi, ip_addr,
                                              local_f_teid_ng):
        match_in = _generate_rule_match(
            imsi,
            ip_addr,
            0,
            0,
            Direction.IN,
            local_f_teid_ng,
        )
        match_out = _generate_rule_match(
            imsi,
            ip_addr,
            0,
            0,
            Direction.OUT,
            local_f_teid_ng,
        )

        return [
            flows.get_add_drop_flow_msg(
                self._datapath,
                self.tbl_num,
                match_in,
                priority=Utils.DROP_PRIORITY,
            ),
            flows.get_add_drop_flow_msg(
                self._datapath,
                self.tbl_num,
                match_out,
                priority=Utils.DROP_PRIORITY,
            ),
        ]

    def _install_redirect_flow(self, imsi, ip_addr, rule, version):
        pass

    def _install_default_flow_for_subscriber(self, imsi, ip_addr,
                                             local_f_teid_ng):
        """
        Add a low priority flow to drop a subscriber's traffic.

        Args:
            imsi (string): subscriber id
            ip_addr (string): subscriber ip_addr
        """
        msgs = self._get_default_flow_msgs_for_subscriber(
            imsi, ip_addr, local_f_teid_ng)
        if msgs:
            chan = self._msg_hub.send(msgs, self._datapath)
            self._wait_for_responses(chan, len(msgs))

    def get_policy_usage(self, fut):
        record_table = RuleRecordTable(
            records=self.total_usage.values(),
            epoch=global_epoch,
        )
        fut.set_result(record_table)

    def _monitor(self, poll_interval):
        """
        Main thread that sends a stats request at the configured interval in
        seconds.
        """
        while not self.init_finished:
            # Still send an empty report -> for pipelined setup
            self._report_usage({})
            hub.sleep(self.INIT_SLEEP_TIME)
        if not self._periodic_stats_reporting:
            return
        while True:
            hub.sleep(poll_interval)
            now = datetime.now()
            delta = get_adjusted_delta(self._last_report_timestamp, now)
            if delta > poll_interval * self.MAX_DELAY_INTERVALS:
                self.logger.info(
                    'Previous update missing, current time %s, last '
                    'report timestamp %s, last poll timestamp %s',
                    now.strftime("%H:%M:%S"),
                    self._last_report_timestamp.strftime("%H:%M:%S"),
                    self._last_poll_time.strftime("%H:%M:%S"),
                )
                self._last_report_timestamp = now
                hub.sleep(poll_interval / 2)
                continue
            if delta < poll_interval:
                continue
            self._last_poll_time = now
            self.logger.debug(
                'Started polling: %s',
                now.strftime("%H:%M:%S"),
            )
            self._poll_stats(self._datapath)

    def _poll_stats(self, datapath, cookie: int = 0, cookie_mask: int = 0):
        """
        Send a FlowStatsRequest message to the datapath
        Raises:
        MagmaOFError: if we can't poll datapath stats
        """
        try:
            flows.send_stats_request(
                datapath,
                self.tbl_num,
                cookie,
                cookie_mask,
            )
        except MagmaOFError as e:
            self.logger.warning("Couldn't poll datapath stats: %s", e)
        except Exception as e:  # pylint: disable=broad-except
            self.logger.warning("Couldn't poll datapath stats: %s", e)

    @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
    def _flow_stats_reply_handler(self, ev):
        """
        Schedule the flow stats handling in the main event loop, so as to
        unblock the ryu event loop
        """
        if not self.init_finished:
            self.logger.debug('Setup not finished, skipping stats reply')
            return

        if self._datapath_id != ev.msg.datapath.id:
            self.logger.debug('Ignoring stats from different bridge')
            return

        self.unhandled_stats_msgs.append(ev.msg.body)
        if ev.msg.flags == OFPMPF_REPLY_MORE:
            # Wait for more multi-part responses thats received for the
            # single stats request.
            return
        self.loop.call_soon_threadsafe(
            self._handle_flow_stats,
            self.unhandled_stats_msgs,
        )
        self.unhandled_stats_msgs = []

    def _handle_flow_stats(self, stats_msgs):
        """
        Aggregate flow stats by rule, and report to session manager
        """
        stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)
        if stat_count == 0:
            return

        self.logger.debug("Processing %s stats responses", len(stats_msgs))
        # Aggregate flows into rule records
        aggregated_msgs = []
        for flow_stats in stats_msgs:
            aggregated_msgs += flow_stats

        self.logger.debug("Processing stats of %d flows", len(aggregated_msgs))
        try:
            current_usage = self._get_usage_from_flow_stat(aggregated_msgs)
        except ConnectionError:
            self.logger.error('Failed processing stats, redis unavailable')
            self.unhandled_stats_msgs.append(stats_msgs)
            return
        # Send report even if usage is empty. Sessiond uses empty reports to
        # recognize when flows have ended
        self._report_usage(current_usage)

        # This is done primarily for CWF integration tests, TODO rm
        self.total_usage = current_usage
        # Report only if their is no change in version
        if self.ng_config.ng_service_enabled == True:
            self._prepare_session_config_report(stats_msgs)

    def deactivate_default_flow(self, imsi, ip_addr, local_f_teid_ng=0):
        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        match_in = _generate_rule_match(
            imsi,
            ip_addr,
            0,
            0,
            Direction.IN,
            local_f_teid_ng,
        )
        match_out = _generate_rule_match(
            imsi,
            ip_addr,
            0,
            0,
            Direction.OUT,
            local_f_teid_ng,
        )

        flows.delete_flow(self._datapath, self.tbl_num, match_in)
        flows.delete_flow(self._datapath, self.tbl_num, match_out)

    def _report_usage(self, usage):
        """
        Report usage to sessiond using rpc
        """
        record_table = RuleRecordTable(
            records=usage.values(),
            epoch=global_epoch,
            update_rule_versions=self._ovs_restarted,
        )
        if self._print_grpc_payload:
            record_msg = 'Sending RPC payload: {0}{{\n{1}}}'.format(
                record_table.DESCRIPTOR.name,
                str(record_table),
            )
            self.logger.info(record_msg)
        future = self.sessiond.ReportRuleStats.future(
            record_table,
            self.SESSIOND_RPC_TIMEOUT,
        )
        future.add_done_callback(
            lambda future: self.loop.call_soon_threadsafe(
                self._report_usage_done,
                future,
                usage.values(),
            ), )

    def _report_usage_done(self, future, records):
        """
        Callback after sessiond RPC completion
        """
        self._last_report_timestamp = datetime.now()
        self.logger.debug(
            'Finished reporting: %s',
            self._last_report_timestamp.strftime("%H:%M:%S"),
        )
        err = future.exception()
        if err:
            self.logger.error('Couldnt send flow records to sessiond: %s', err)
            return
        try:
            self._delete_old_flows(records)
        except ConnectionError:
            self.logger.error('Failed remove old flows, redis unavailable')
            return

    def _get_usage_from_flow_stat(self, flow_stats):
        """
        Update the rule record map with the flow stat and return the
        updated map.
        """
        current_usage = defaultdict(RuleRecord)
        for flow_stat in flow_stats:
            if flow_stat.table_id != self.tbl_num:
                # this update is not intended for policy
                continue
            rule_id = self._get_rule_id(flow_stat)
            # Rule not found, must be default flow
            if rule_id == "":
                default_flow_matched = \
                    flow_stat.cookie == self.DEFAULT_FLOW_COOKIE
                if default_flow_matched:
                    if flow_stat.byte_count != 0 and \
                       self._unmatched_bytes != flow_stat.byte_count:
                        self.logger.debug(
                            '%s bytes total not reported.',
                            flow_stat.byte_count,
                        )
                        self._unmatched_bytes = flow_stat.byte_count
                    continue
                else:
                    # This must be the default drop flow
                    rule_id = self._default_drop_flow_name
            # If this is a pass through app name flow ignore stats
            if _get_policy_type(flow_stat.match) == IGNORE_STATS:
                continue
            sid = _get_sid(flow_stat)
            if not sid:
                continue
            ipv4_addr = _get_ipv4(flow_stat)
            ipv6_addr = self._get_ipv6(flow_stat)

            local_f_teid_ng = _get_ng_local_f_id(flow_stat)

            # use a compound key to separate flows for the same rule but for
            # different subscribers
            key = sid + "|" + rule_id

            if ipv4_addr:
                key += "|" + ipv4_addr
            elif ipv6_addr:
                key += "|" + ipv6_addr

            rule_version = _get_version(flow_stat)
            if not rule_version:
                rule_version = 0

            key += "|" + str(rule_version)

            current_usage[key].rule_id = rule_id
            current_usage[key].sid = sid

            current_usage[key].rule_version = rule_version

            if ipv4_addr:
                current_usage[key].ue_ipv4 = ipv4_addr
            elif ipv6_addr:
                current_usage[key].ue_ipv6 = ipv6_addr
            if local_f_teid_ng:
                current_usage[key].teid = local_f_teid_ng

            bytes_rx = 0
            bytes_tx = 0
            if flow_stat.match[DIRECTION_REG] == Direction.IN:
                # HACK decrement byte count for downlink packets by the length
                # of an ethernet frame. Only IP and below should be counted towards
                # a user's data. Uplink does this already because the GTP port is
                # an L3 port.
                bytes_rx = _get_downlink_byte_count(flow_stat)
            else:
                bytes_tx = flow_stat.byte_count

            if _get_policy_type(flow_stat.match) == PROCESS_STATS:
                current_usage[key].bytes_rx += bytes_rx
                current_usage[key].bytes_tx += bytes_tx
            else:
                current_usage[key].dropped_rx += bytes_rx
                current_usage[key].dropped_tx += bytes_tx
        return current_usage

    def _delete_old_flows(self, records):
        """
        Check if the version of any record is older than the current version.
        If so, delete the flow.
        """
        for record in records:
            ip_addr = None
            if record.ue_ipv4:
                ip_addr = convert_ipv4_str_to_ip_proto(record.ue_ipv4)
            elif record.ue_ipv6:
                ip_addr = convert_ipv6_str_to_ip_proto(record.ue_ipv6)

            current_ver = self._session_rule_version_mapper.get_version(
                record.sid,
                ip_addr,
                record.rule_id,
            )
            local_f_teid_ng = 0
            if record.teid:
                local_f_teid_ng = record.teid

            if current_ver == record.rule_version:
                continue

            try:
                self._delete_flow(
                    record.sid,
                    ip_addr,
                    record.rule_id,
                    record.rule_version,
                    local_f_teid_ng,
                )
            except MagmaOFError as e:
                self.logger.error(
                    'Failed to delete rule %s for subscriber %s ('
                    'version: %s): %s',
                    record.rule_id,
                    record.sid,
                    record.rule_version,
                    e,
                )

    def _delete_flow(self,
                     imsi,
                     ip_addr,
                     rule_id,
                     rule_version,
                     local_f_teid_ng=0):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule_id)
        match_in = _generate_rule_match(
            imsi,
            ip_addr,
            rule_num,
            rule_version,
            Direction.IN,
            local_f_teid_ng,
        )
        match_out = _generate_rule_match(
            imsi,
            ip_addr,
            rule_num,
            rule_version,
            Direction.OUT,
            local_f_teid_ng,
        )
        flows.delete_flow(
            self._datapath,
            self.tbl_num,
            match_in,
        )
        flows.delete_flow(
            self._datapath,
            self.tbl_num,
            match_out,
        )

    def _was_ovs_restarted(self):
        try:
            ovs_pid = int(check_output(["pidof", "ovs-vswitchd"]).decode())
        except Exception as e:  # pylint: disable=broad-except
            self.logger.warning("Couldn't get ovs pid: %s", e)
            ovs_pid = 0
        stored_ovs_pid = self._restart_info_store["ovs-vswitchd"]
        self._restart_info_store["ovs-vswitchd"] = ovs_pid
        self.logger.info(
            "Stored ovs_pid %d, new ovs pid %d",
            stored_ovs_pid,
            ovs_pid,
        )
        return ovs_pid != stored_ovs_pid

    def _get_rule_id(self, flow):
        """
        Return the rule id from the rule cookie
        """
        # the default rule will have a cookie of 0
        rule_num = flow.match.get(RULE_NUM_REG, 0)
        if rule_num == 0 or rule_num == self.DEFAULT_FLOW_COOKIE:
            return ""
        try:
            return self._rule_mapper.get_rule_id(rule_num)
        except KeyError as e:
            self.logger.error(
                'Could not find rule id for num %d: %s',
                rule_num,
                e,
            )
            return ""

    def get_stats(self, cookie: int = 0, cookie_mask: int = 0):
        """
        Use Ryu API to send a stats request containing cookie and cookie mask, retrieve a response and
        convert to a Rule Record Table and remove old flows
        """
        if not self._datapath:
            self.logger.error(
                "Could not initialize datapath for stats retrieval")
            return RuleRecordTable()
        parser = self._datapath.ofproto_parser
        message = parser.OFPFlowStatsRequest(
            datapath=self._datapath,
            table_id=self.tbl_num,
            cookie=cookie,
            cookie_mask=cookie_mask,
        )
        try:
            response = ofctl_api.send_msg(
                self,
                message,
                reply_cls=parser.OFPFlowStatsReply,
                reply_multi=True,
            )
            if not response:
                self.logger.error(
                    "No rule records match the specified cookie and cookie mask"
                )
                return RuleRecordTable()

            aggregated_msgs = []
            for r in response:
                aggregated_msgs += r.body

            usage = self._get_usage_from_flow_stat(aggregated_msgs)
            self.loop.call_soon_threadsafe(self._delete_old_flows,
                                           usage.values())
            record_table = RuleRecordTable(
                records=usage.values(),
                epoch=global_epoch,
            )
            return record_table
        except (InvalidDatapath, OFError, UnexpectedMultiReply):
            self.logger.error(
                "Could not obtain rule records due to either InvalidDatapath, OFError or UnexpectedMultiReply"
            )
            return RuleRecordTable()

    def _prepare_session_config_report(self, stats_msgs):
        session_config_dict = {}

        for flow_stats in stats_msgs:
            for stat in flow_stats:
                if stat.table_id != self.tbl_num:
                    continue

                local_f_teid_ng = _get_ng_local_f_id(stat)
                if not local_f_teid_ng or local_f_teid_ng == REG_ZERO_VAL:
                    continue
                # Already present
                if local_f_teid_ng in session_config_dict:
                    if local_f_teid_ng != session_config_dict[
                            local_f_teid_ng].local_f_teid:
                        self.logger.error(
                            "Mismatch local TEID value. Need to investigate")

                    continue

                sid = _get_sid(stat)
                if not sid:
                    continue

                rule_version = _get_version(stat)
                if rule_version == 0:
                    continue

                session_config_dict[local_f_teid_ng] = \
                                             UPFSessionState(
                                                 subscriber_id=sid,
                                                 session_version=rule_version,
                                                 local_f_teid=local_f_teid_ng,
                                             )

        SessionStateManager.report_session_config_state(
            session_config_dict,
            self.ng_config.sessiond_setinterface,
        )

    def _get_ipv6(self, flow):
        if DIRECTION_REG not in flow.match:
            return None
        if flow.match[DIRECTION_REG] == Direction.OUT:
            ip_register = 'ipv6_src'
        else:
            ip_register = 'ipv6_dst'
        if ip_register not in flow.match:
            return None
        ipv6 = flow.match[ip_register]
        # masked value returned as tuple

        if type(ipv6) is tuple:
            ipv6_addr = ipv6[0]
        else:
            ipv6_addr = ipv6

        prefix = get_ipv6_prefix(ipv6_addr)
        interface = self._prefix_mapper.get_interface(prefix)
        if interface is None:
            return ipv6_addr
        # Rebuild UE IPv6 address from prefix map
        subnet = ipaddress.ip_address(prefix)
        host_id = ipaddress.ip_address(interface)
        ue_ip = ipaddress.ip_address(int(subnet) | int(host_id))

        self.logger.debug("recalc ue_ip: %s sub: %s host: %s", ue_ip, prefix,
                          host_id)
        return str(ue_ip)
Example #19
0
class EnforcementController(PolicyMixin, MagmaController):
    """
    EnforcementController

    The enforcement controller installs flows for policy enforcement and
    classification. Each flow installed matches on a rule and an IMSI and then
    classifies the packet with the rule. The flow also redirects and drops
    the packet as specified in the policy.

    NOTE: Enforcement currently relies on the fact that policies do not
    overlap. In this implementation, there is the idea of a 'default rule'
    which is the catch-all. This rule is treated specially and tagged with a
    specific priority.
    """

    APP_NAME = "enforcement"
    APP_TYPE = ControllerType.LOGICAL

    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self._config = kwargs['config']
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self._enforcement_stats_tbl = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']

        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._redirect_manager = None
        self._qos_mgr = None
        self._clean_restart = kwargs['config']['clean_restart']
        self._redirect_manager = RedirectionManager(
            self._bridge_ip_address,
            self.logger,
            self.tbl_num,
            self._enforcement_stats_tbl,
            self._redirect_scratch,
            self._session_rule_version_mapper)

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        self._qos_mgr = QosManager(datapath, self.loop, self._config)
        self._qos_mgr.setup()

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)

    def cleanup_state(self):
        pass

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    @set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER)
    def meter_config_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_config_stats(ev.msg.body)

    @set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
    def meter_features_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_feature_stats(ev.msg.body)

    def _install_default_flows_if_not_installed(self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.
        If default flows are already installed, do nothing.

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of flows that remain after inserting default flows
        """
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)

        inbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath, self.tbl_num, inbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        outbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath, self.tbl_num, outbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        msgs, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list([inbound_msg, outbound_msg],
                                             existing_flows)
        if msgs:
            chan = self._msg_hub.send(msgs, datapath)
            self._wait_for_responses(chan, len(msgs))

        return remaining_flows

    def _get_rule_match_flow_msgs(self, imsi, ip_addr, apn_ambr, rule):
        """
        Get flow msgs to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = Utils.get_of_priority(rule.priority)

        flow_adds = []
        for flow in rule.flow_list:
            try:
                version = self._session_rule_version_mapper.get_version(imsi, ip_addr,
                                                                        rule.id)
                flow_adds.extend(self._get_classify_rule_flow_msgs(
                    imsi, ip_addr, apn_ambr, flow, rule_num, priority,
                    rule.qos, rule.hard_timeout, rule.id, rule.app_name,
                    rule.app_service_type, self._enforcement_stats_tbl,
                    version, self._qos_mgr))

            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to get flow msg '%s' for subscriber %s: %s",
                    rule.id, imsi, err)
                raise err
        return flow_adds

    def _install_flow_for_rule(self, imsi, ip_addr, apn_ambr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        if rule.redirect.support == rule.redirect.ENABLED:
            return self._install_redirect_flow(imsi, ip_addr, rule)

        if not rule.flow_list:
            self.logger.error('The flow list for imsi %s, rule.id - %s'
                              'is empty, this shoudn\'t happen', imsi, rule.id)
            return RuleModResult.FAILURE

        flow_adds = []
        try:
            flow_adds = self._get_rule_match_flow_msgs(imsi, ip_addr, apn_ambr, rule)
        except FlowMatchError:
            return RuleModResult.FAILURE

        chan = self._msg_hub.send(flow_adds, self._datapath)
        return self._wait_for_rule_responses(imsi, rule, chan)

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        rule_version = self._session_rule_version_mapper.get_version(imsi,
                                                                     ip_addr,
                                                                     rule.id)
        priority = Utils.get_of_priority(rule.priority)
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr.address.decode('utf-8'),
            rule=rule,
            rule_num=rule_num,
            rule_version=rule_version,
            priority=priority)
        try:
            self._redirect_manager.setup_lte_redirect(
                self._datapath, self.loop, redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s',
                imsi, rule.id, err
            )
            return RuleModResult.FAILURE

    def _get_default_flow_msgs_for_subscriber(self, imsi, ip_addr):
        ip_match_in = get_ue_ip_match_args(ip_addr, Direction.IN)
        match_in = MagmaMatch(eth_type=get_eth_type(ip_addr),
                              imsi=encode_imsi(imsi), **ip_match_in)
        ip_match_out = get_ue_ip_match_args(ip_addr, Direction.OUT)
        match_out = MagmaMatch(eth_type=get_eth_type(ip_addr),
                               imsi=encode_imsi(imsi), **ip_match_out)

        actions = []
        return [
            flows.get_add_drop_flow_msg(
                self._datapath, self.tbl_num,  match_in, actions,
                priority=Utils.DROP_PRIORITY),
            flows.get_add_drop_flow_msg(
                self._datapath, self.tbl_num,  match_out, actions,
                priority=Utils.DROP_PRIORITY)]

    def _install_default_flow_for_subscriber(self, imsi, ip_addr):
        """
        Add a low priority flow to drop a subscriber's traffic in the event
        that all rules have been deactivated.

        Args:
            imsi (string): subscriber id
        """
        msgs = self._get_default_flow_msgs_for_subscriber(imsi, ip_addr)
        if msgs:
            chan = self._msg_hub.send(msgs, self._datapath)
            self._wait_for_responses(chan, len(msgs))

    def _deactivate_flow_for_rule(self, imsi, ip_addr, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)

        ip_match_in = get_ue_ip_match_args(ip_addr, Direction.IN)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi), **ip_match_in)
        flows.delete_flow(self._datapath, self.tbl_num, match,
                          cookie=cookie, cookie_mask=mask)
        ip_match_out = get_ue_ip_match_args(ip_addr, Direction.OUT)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi), **ip_match_out)
        flows.delete_flow(self._datapath, self.tbl_num, match,
                          cookie=cookie, cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(self._datapath, imsi,
                                                        num)
        self._qos_mgr.remove_subscriber_qos(imsi, num)

    def _deactivate_flows_for_subscriber(self, imsi, ip_addr):
        """ Deactivate all rules for specified subscriber session """
        ip_match_in = get_ue_ip_match_args(ip_addr, Direction.IN)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi), **ip_match_in)
        flows.delete_flow(self._datapath, self.tbl_num, match)
        ip_match_out = get_ue_ip_match_args(ip_addr, Direction.OUT)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi), **ip_match_out)
        flows.delete_flow(self._datapath, self.tbl_num, match)

        self._redirect_manager.deactivate_flows_for_subscriber(self._datapath,
                                                               imsi)
        self._qos_mgr.remove_subscriber_qos(imsi)

    def deactivate_rules(self, imsi, ip_addr, rule_ids):
        """
        Deactivate flows for a subscriber.
            Only imsi -> remove all rules for imsi
            imsi+ipv4 -> remove all rules for imsi session
            imsi+rule_ids -> remove specific rules for imsi (for all sessions)
            imsi+ipv4+rule_ids -> remove rules for specific imsi session

        Args:
            imsi (string): subscriber id
            ip_addr (string): subscriber ip address
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi, ip_addr)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, ip_addr, rule_id)
Example #20
0
class GYController(PolicyMixin, MagmaController):
    """
    GYController

    The GY controller installs flows for enforcement of GY final actions, this
    includes redirection and QoS(currently not supported)
    """
    APP_NAME = "gy"
    APP_TYPE = ControllerType.LOGICAL

    def __init__(self, *args, **kwargs):
        super(GYController, self).__init__(*args, **kwargs)
        self._config = kwargs['config']
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self.next_service_table = self._service_manager.get_next_table_num(
            EnforcementStatsController.APP_NAME)
        self._enforcement_stats_tbl = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']
        self._msg_hub = MessageHub(self.logger)
        self._internal_ip_allocator = kwargs['internal_ip_allocator']
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)[0]
        self._mac_rewr = \
            self._service_manager.INTERNAL_MAC_IP_REWRITE_TBL_NUM
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._clean_restart = kwargs['config']['clean_restart']
        self._qos_mgr = None
        self._setup_type = self._config['setup_type']
        self._redirect_manager = \
            RedirectionManager(
                self._bridge_ip_address,
                self.logger,
                self.tbl_num,
                self._service_manager.get_table_num(EGRESS),
                self._redirect_scratch,
                self._session_rule_version_mapper
            )
        if self._setup_type == 'CWF':
            self._redirect_manager.set_cwf_args(
                internal_ip_allocator=kwargs['internal_ip_allocator'],
                arp=kwargs['app_futures']['arpd'],
                mac_rewrite=self._mac_rewr,
                bridge_name=kwargs['config']['bridge_name'],
                egress_table=self._service_manager.get_table_num(EGRESS)
            )

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        self._qos_mgr = QosManager(datapath, self.loop, self._config)
        self._qos_mgr.setup()

        self._delete_all_flows(datapath)
        self._install_default_flows(datapath)

    def deactivate_rules(self, imsi, ip_addr, rule_ids):
        """
        Deactivate flows for a subscriber. If only imsi is present, delete all
        rule flows for a subscriber (i.e. end its session). If rule_ids are
        present, delete the rule flows for that subscriber.

        Args:
            imsi (string): subscriber id
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi, ip_addr)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, ip_addr, rule_id)

    def cleanup_state(self):
        pass

    def _deactivate_flows_for_subscriber(self, imsi, ip_addr):
        """
        Deactivate all rules for a subscriber, ending any enforcement

        Args:
            imsi (string): subscriber id
            ip_addr(IPAddress): session IP address
        """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match)
        self._redirect_manager.deactivate_flows_for_subscriber(self._datapath,
                                                               imsi)
        self._qos_mgr.remove_subscriber_qos(imsi)
        self._remove_he_flows(ip_addr, None)

    def _deactivate_flow_for_rule(self, imsi, ip_addr, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber

        Args:
            imsi (string): subscriber id
            rule_id (string): policy rule id
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match,
                          cookie=cookie, cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(self._datapath, imsi,
                                                        num)
        self._qos_mgr.remove_subscriber_qos(imsi, num)
        self._remove_he_flows(ip_addr, rule_id)

    def _install_flow_for_rule(self, imsi, msisdn:bytes, uplink_tunnel: int, ip_addr, apn_ambr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            apn_ambr (integer): maximum bandwidth for non-GBR EPS bearers
            rule (PolicyRule): policy rule proto
        """
        if rule.redirect.support == rule.redirect.ENABLED:
            self._install_redirect_flow(imsi, ip_addr, rule)
            return RuleModResult.SUCCESS

        if not rule.flow_list:
            self.logger.error('The flow list for imsi %s, rule.id - %s'
                              'is empty, this shoudn\'t happen', imsi, rule.id)
            return RuleModResult.FAILURE

        flow_adds = []
        try:
            flow_adds = self._get_rule_match_flow_msgs(imsi, msisdn, uplink_tunnel, ip_addr, apn_ambr, rule)
        except FlowMatchError:
            return RuleModResult.FAILURE

        chan = self._msg_hub.send(flow_adds, self._datapath)
        return self._wait_for_rule_responses(imsi, ip_addr, rule, chan)

    def _get_default_flow_msgs_for_subscriber(self, *_):
        return None

    def _install_default_flow_for_subscriber(self, *_):
        pass

    def _delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)
        flows.delete_all_flows_from_table(datapath, self._mac_rewr)

    def _install_default_flows(self, datapath):
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.

        Args:
            datapath: ryu datapath struct
        """
        match = MagmaMatch()
        flows.add_resubmit_next_service_flow(
            datapath, self.tbl_num, match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        rule_version = self._session_rule_version_mapper.get_version(imsi,
                                                                     ip_addr,
                                                                     rule.id)
        priority = rule.priority
        # TODO currently if redirection is enabled we ignore other flows
        # from rule.flow_list, confirm that this is the expected behaviour
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr.address.decode('utf-8'),
            rule=rule,
            rule_num=rule_num,
            rule_version=rule_version,
            priority=priority)
        try:
            if self._setup_type == 'CWF':
                self._redirect_manager.setup_cwf_redirect(
                    self._datapath, self.loop, redirect_request)
            else:
                self._redirect_manager.setup_lte_redirect(
                    self._datapath, self.loop, redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s',
                imsi, rule.id, err
            )
            return RuleModResult.FAILURE

    def _install_default_flows_if_not_installed(self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.
        If default flows are already installed, do nothing.

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of flows that remain after inserting default flows
        """
        match = MagmaMatch()

        msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath, self.tbl_num, match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        msgs, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list([msg], existing_flows)
        if msgs:
            chan = self._msg_hub.send(msgs, datapath)
            self._wait_for_responses(chan, len(msgs))

        return remaining_flows

    def _get_rule_match_flow_msgs(self, imsi, msisdn: bytes, uplink_tunnel: int, ip_addr, apn_ambr, rule):
        """
        Get flow msgs to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            msisdn (bytes): subscriber ISDN
            ip_addr (string): subscriber session ipv4 address
            apn_ambr (integer): maximum bandwidth for non-GBR EPS bearers
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = Utils.get_of_priority(rule.priority)

        flow_adds = []
        for flow in rule.flow_list:
            try:
                version = self._session_rule_version_mapper.get_version(imsi, ip_addr,
                                                                        rule.id)
                flow_adds.extend(self._get_classify_rule_flow_msgs(
                    imsi, msisdn, uplink_tunnel, ip_addr, apn_ambr, flow, rule_num, priority,
                    rule.qos, rule.hard_timeout, rule.id, rule.app_name,
                    rule.app_service_type, self.next_service_table,
                    version, self._qos_mgr, self._enforcement_stats_tbl))

            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to get flow msg '%s' for subscriber %s: %s",
                    rule.id, imsi, err)
                raise err
        return flow_adds

    @set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER)
    def meter_config_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_config_stats(ev.msg.body)

    @set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
    def meter_features_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_feature_stats(ev.msg.body)

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)
Example #21
0
class EnforcementController(PolicyMixin, MagmaController):
    """
    EnforcementController

    The enforcement controller installs flows for policy enforcement and
    classification. Each flow installed matches on a rule and an IMSI and then
    classifies the packet with the rule. The flow also redirects and drops
    the packet as specified in the policy.

    NOTE: Enforcement currently relies on the fact that policies do not
    overlap. In this implementation, there is the idea of a 'default rule'
    which is the catch-all. This rule is treated specially and tagged with a
    specific priority.
    """

    APP_NAME = "enforcement"
    APP_TYPE = ControllerType.LOGICAL
    ENFORCE_DROP_PRIORITY = flows.MINIMUM_PRIORITY + 1
    # For allowing unlcassified flows for app/service type rules.
    UNCLASSIFIED_ALLOW_PRIORITY = ENFORCE_DROP_PRIORITY + 1
    # Should not overlap with the drop flow as drop matches all packets.
    MIN_ENFORCE_PROGRAMMED_FLOW = UNCLASSIFIED_ALLOW_PRIORITY + 1
    MAX_ENFORCE_PRIORITY = flows.MAXIMUM_PRIORITY
    # Effectively range is 3 -> 65535
    ENFORCE_PRIORITY_RANGE = MAX_ENFORCE_PRIORITY - MIN_ENFORCE_PROGRAMMED_FLOW

    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self._config = kwargs['config']
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self._enforcement_stats_scratch = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']

        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._redirect_manager = None
        self._qos_mgr = None
        self._clean_restart = kwargs['config']['clean_restart']

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        self._qos_mgr = QosManager(datapath, self.loop, self._config)
        self._qos_mgr.setup()

        self._redirect_manager = RedirectionManager(
            self._bridge_ip_address,
            self.logger,
            self.tbl_num,
            self._enforcement_stats_scratch,
            self._redirect_scratch,
            self._session_rule_version_mapper)

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)

    def cleanup_state(self):
        pass

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    @set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER)
    def meter_config_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_config_stats(ev.msg.body)

    @set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
    def meter_features_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_feature_stats(ev.msg.body)

    def _install_default_flows_if_not_installed(self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.
        If default flows are already installed, do nothing.

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of flows that remain after inserting default flows
        """
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)

        inbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath, self.tbl_num, inbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        outbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath, self.tbl_num, outbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        msgs, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list([inbound_msg, outbound_msg],
                                             existing_flows)
        if msgs:
            chan = self._msg_hub.send(msgs, datapath)
            self._wait_for_responses(chan, len(msgs))

        return remaining_flows

    def get_of_priority(self, precedence):
        """
        Lower the precedence higher the importance of the flow in 3GPP.
        Higher the priority higher the importance of the flow in openflow.
        Convert precedence to priority:
        1 - Flows with precedence > 65534 will have min priority which is the
        min priority for a programmed flow = (default drop + 1)
        2 - Flows in the precedence range 0-65534 will have priority 65535 -
        Precedence
        :param precedence:
        :return:
        """
        if precedence >= self.ENFORCE_PRIORITY_RANGE:
            self.logger.warning(
                "Flow precedence is higher than OF range using min priority %d",
                self.MIN_ENFORCE_PROGRAMMED_FLOW)
            return self.MIN_ENFORCE_PROGRAMMED_FLOW
        return self.MAX_ENFORCE_PRIORITY - precedence

    def _get_rule_match_flow_msgs(self, imsi, ip_addr, apn_ambr, rule):
        """
        Get flow msgs to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = self.get_of_priority(rule.priority)

        flow_adds = []
        for flow in rule.flow_list:
            try:
                flow_adds.extend(self._get_classify_rule_flow_msgs(
                    imsi, ip_addr, apn_ambr, flow, rule_num, priority,
                    rule.qos, rule.hard_timeout, rule.id, rule.app_name,
                    rule.app_service_type))

            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to get flow msg '%s' for subscriber %s: %s",
                    rule.id, imsi, err)
                raise err
        return flow_adds

    def _install_flow_for_rule(self, imsi, ip_addr, apn_ambr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """

        if rule.redirect.support == rule.redirect.ENABLED:
            return self._install_redirect_flow(imsi, ip_addr, rule)

        if not rule.flow_list:
            self.logger.error('The flow list for imsi %s, rule.id - %s'
                              'is empty, this shoudn\'t happen', imsi, rule.id)
            return RuleModResult.FAILURE

        flow_adds = []
        try:
            flow_adds = self._get_rule_match_flow_msgs(imsi, ip_addr, apn_ambr, rule)
        except FlowMatchError:
            return RuleModResult.FAILURE

        chan = self._msg_hub.send(flow_adds, self._datapath)

        return self._wait_for_rule_responses(imsi, rule, chan)

    def _wait_for_rule_responses(self, imsi, rule, chan):
        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s",
                rule.id, imsi, err)
            self._deactivate_flow_for_rule(imsi, rule.id)
            return RuleModResult.FAILURE

        for _ in range(len(rule.flow_list)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())
        return RuleModResult.SUCCESS

    def _get_classify_rule_flow_msgs(self, imsi, ip_addr, apn_ambr, flow, rule_num,
                                     priority, qos, hard_timeout, rule_id,
                                     app_name, app_service_type):
        """
        Install a flow from a rule. If the flow action is DENY, then the flow
        will drop the packet. Otherwise, the flow classifies the packet with
        its matched rule and injects the rule num into the packet's register.
        """
        flow_match = flow_match_to_magma_match(flow.match)
        flow_match.imsi = encode_imsi(imsi)
        flow_match_actions, instructions = self._get_classify_rule_of_actions(
            flow, rule_num, imsi, ip_addr, apn_ambr, qos, rule_id)
        msgs = []
        if app_name:
            # We have to allow initial traffic to pass through, before it gets
            # classified by DPI, flow match set app_id to unclassified
            flow_match.app_id = UNCLASSIFIED_PROTO_ID
            # Set
            parser = self._datapath.ofproto_parser
            passthrough_actions = flow_match_actions + \
                [parser.NXActionRegLoad2(dst=SCRATCH_REGS[1],
                                         value=IGNORE_STATS)]
            msgs.append(
                flows.get_add_resubmit_current_service_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    flow_match,
                    passthrough_actions,
                    hard_timeout=hard_timeout,
                    priority=self.UNCLASSIFIED_ALLOW_PRIORITY,
                    cookie=rule_num,
                    resubmit_table=self._enforcement_stats_scratch
                )
            )
            flow_match.app_id = get_app_id(
                PolicyRule.AppName.Name(app_name),
                PolicyRule.AppServiceType.Name(app_service_type),
            )

        if flow.action == flow.DENY:
            msgs.append(flows.get_add_drop_flow_msg(
                self._datapath,
                self.tbl_num,
                flow_match,
                flow_match_actions,
                hard_timeout=hard_timeout,
                priority=priority,
                cookie=rule_num)
            )
        else:
            msgs.append(flows.get_add_resubmit_current_service_flow_msg(
                self._datapath,
                self.tbl_num,
                flow_match,
                flow_match_actions,
                instructions=instructions,
                hard_timeout=hard_timeout,
                priority=priority,
                cookie=rule_num,
                resubmit_table=self._enforcement_stats_scratch)
            )
        return msgs

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = self.get_of_priority(rule.priority)
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr,
            rule=rule,
            rule_num=rule_num,
            priority=priority)
        try:
            self._redirect_manager.handle_redirection(
                self._datapath, self.loop, redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s',
                imsi, rule.id, err
            )
            return RuleModResult.FAILURE

    def _get_classify_rule_of_actions(self, flow, rule_num, imsi, ip_addr,
                                      apn_ambr, qos, rule_id):
        parser = self._datapath.ofproto_parser
        instructions = []

        # encode the rule id in hex
        of_note = parser.NXActionNote(list(rule_id.encode()))
        actions = [of_note]
        if flow.action == flow.DENY:
            return actions, instructions

        mbr_ul = qos.max_req_bw_ul
        mbr_dl = qos.max_req_bw_dl
        qos_info = None
        ambr = None
        d = flow.match.direction
        if d == flow.match.UPLINK:
            if apn_ambr:
                ambr = apn_ambr.max_bandwidth_ul
            if mbr_ul != 0:
                qos_info = QosInfo(gbr=qos.gbr_ul, mbr=mbr_ul)

        if d == flow.match.DOWNLINK:
            if apn_ambr:
                ambr = apn_ambr.max_bandwidth_dl
            if mbr_dl != 0:
                qos_info = QosInfo(gbr=qos.gbr_dl, mbr=mbr_dl)

        if qos_info or ambr:
            action, inst = self._qos_mgr.add_subscriber_qos(
                imsi, ip_addr, ambr, rule_num, d, qos_info)

            self.logger.debug("adding Actions %s instruction %s ", action, inst)
            if action:
                actions.append(action)

            if inst:
                instructions.append(inst)

        version = self._session_rule_version_mapper.get_version(imsi, rule_id)
        actions.extend(
            [parser.NXActionRegLoad2(dst='reg2', value=rule_num),
             parser.NXActionRegLoad2(dst=RULE_VERSION_REG, value=version)
             ])

        return actions, instructions

    def _get_default_flow_msg_for_subscriber(self, imsi):
        match = MagmaMatch(imsi=encode_imsi(imsi))
        actions = []
        return flows.get_add_drop_flow_msg(self._datapath, self.tbl_num,
            match, actions, priority=self.ENFORCE_DROP_PRIORITY)

    def _install_default_flow_for_subscriber(self, imsi):
        """
        Add a low priority flow to drop a subscriber's traffic in the event
        that all rules have been deactivated.

        Args:
            imsi (string): subscriber id
        """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        actions = []  # empty options == drop
        flows.add_drop_flow(self._datapath, self.tbl_num, match, actions,
                            priority=self.ENFORCE_DROP_PRIORITY)

    def _deactivate_flow_for_rule(self, imsi, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match,
                          cookie=cookie, cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(self._datapath, imsi,
                                                        num)
        self._qos_mgr.remove_subscriber_qos(imsi, num)

    def _deactivate_flows_for_subscriber(self, imsi):
        """ Deactivate all rules for a subscriber, ending any enforcement """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match)
        self._redirect_manager.deactivate_flows_for_subscriber(self._datapath,
                                                               imsi)
        self._qos_mgr.remove_subscriber_qos(imsi)

    def deactivate_rules(self, imsi, rule_ids):
        """
        Deactivate flows for a subscriber. If only imsi is present, delete all
        rule flows for a subscriber (i.e. end its session). If rule_ids are
        present, delete the rule flows for that subscriber.

        Args:
            imsi (string): subscriber id
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, rule_id)
Example #22
0
class EnforcementStatsController(PolicyMixin, RestartMixin, MagmaController):
    """
    This openflow controller installs flows for aggregating policy usage
    statistics, which are sent to sessiond for tracking.

    It periodically polls OVS for flow stats on the its table and reports the
    usage records to session manager via RPC. Flows are deleted when their
    version (reg4 match) is different from the current version of the rule for
    the subscriber maintained by the rule version mapper.
    """

    APP_NAME = 'enforcement_stats'
    APP_TYPE = ControllerType.LOGICAL
    SESSIOND_RPC_TIMEOUT = 10
    # 0xffffffffffffffff is reserved in openflow
    DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe

    _CONTEXTS = {
        'dpset': dpset.DPSet,
    }

    def __init__(self, *args, **kwargs):
        super(EnforcementStatsController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_table = \
            self._service_manager.get_next_table_num(self.APP_NAME)
        self.dpset = kwargs['dpset']
        self.loop = kwargs['loop']
        # Spawn a thread to poll for flow stats
        poll_interval = kwargs['config']['enforcement']['poll_interval']
        # Create a rpc channel to sessiond
        self.sessiond = kwargs['rpc_stubs']['sessiond']
        self._msg_hub = MessageHub(self.logger)
        self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
        self.total_usage = {}  # Store total usage
        self._clean_restart = kwargs['config']['clean_restart']
        self._redis_enabled = kwargs['config'].get('redis_enabled', False)
        # Store last usage excluding deleted flows for calculating deltas
        if self._redis_enabled:
            self.last_usage_for_delta = UsageDeltaDict()
        else:
            self.last_usage_for_delta = {}
        self.failed_usage = {}  # Store failed usage to retry rpc to sessiond
        self._unmatched_bytes = 0  # Store bytes matched by default rule if any
        self._default_drop_flow_name = \
            kwargs['config']['enforcement']['default_drop_flow_name']
        self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
        self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
        if self._print_grpc_payload is None:
            self._print_grpc_payload = \
                kwargs['config'].get('magma_print_grpc_payload', False)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)

    def cleanup_state(self):
        """
        When we remove/reinsert flows we need to remove old usage maps as new
        flows will have reset stat counters
        """
        self.unhandled_stats_msgs = []
        self.total_usage = {}
        self.failed_usage = {}
        self._unmatched_bytes = 0

        if self._redis_enabled:
            keys = self.last_usage_for_delta.keys()
            for key in keys:
                self.last_usage_for_delta[key] = None
        else:
            self.last_usage_for_delta = {}

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath

    def _install_default_flows_if_not_installed(self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        """
        Install default flows(if not already installed) to forward the traffic,
        If no other flows are matched.

        Returns:
            The list of flows that remain after inserting default flows
        """
        match = MagmaMatch()
        msg = flows.get_add_drop_flow_msg(
            datapath, self.tbl_num, match,
            priority=flows.MINIMUM_PRIORITY,
            cookie=self.DEFAULT_FLOW_COOKIE)

        msg, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list(self._datapath, [msg],
                                             existing_flows[self.tbl_num])
        if msg:
            chan = self._msg_hub.send(msg, datapath)
            self._wait_for_responses(chan, 1)

        return {self.tbl_num: remaining_flows}

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def _install_flow_for_rule(self, imsi, msisdn: bytes, uplink_tunnel: int, ip_addr, apn_ambr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            msisdn (bytes): subscriber MSISDN
            uplink_tunnel (int): tunnel ID of the subscriber.
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s",
                rule.id, imsi, err)
            return RuleModResult.FAILURE

        msgs = self._get_rule_match_flow_msgs(imsi, msisdn, uplink_tunnel, ip_addr, apn_ambr, rule)

        try:
            chan = self._msg_hub.send(msgs, self._datapath)
        except MagmaDPDisconnectedError:
            self.logger.error("Datapath disconnected, failed to install rule %s"
                              "for imsi %s", rule, imsi)
            return RuleModResult.FAILURE
        for _ in range(len(msgs)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())

        return RuleModResult.SUCCESS

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    # pylint: disable=protected-access,unused-argument
    def _get_rule_match_flow_msgs(self, imsi, _, __, ip_addr, ambr, rule):
        """
        Returns flow add messages used for rule matching.
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        version = self._session_rule_version_mapper.get_version(imsi, ip_addr,
                                                                rule.id)
        self.logger.debug(
            'Installing flow for %s with rule num %s (version %s)', imsi,
            rule_num, version)
        inbound_rule_match = _generate_rule_match(imsi, ip_addr, rule_num,
                                                  version, Direction.IN)
        outbound_rule_match = _generate_rule_match(imsi, ip_addr, rule_num,
                                                   version, Direction.OUT)

        flow_actions = [flow.action for flow in rule.flow_list]
        msgs = []
        if FlowDescription.PERMIT in flow_actions:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
            outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
            ])
        else:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
            outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
            ])

        if rule.app_name:
            inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
            outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
            msgs.extend([
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    inbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
                flows.get_add_drop_flow_msg(
                    self._datapath,
                    self.tbl_num,
                    outbound_rule_match,
                    priority=flows.DEFAULT_PRIORITY,
                    cookie=rule_num),
            ])
        return msgs

    def _get_default_flow_msgs_for_subscriber(self, imsi, ip_addr):
        match_in = _generate_rule_match(imsi, ip_addr, 0, 0, Direction.IN)
        match_out = _generate_rule_match(imsi, ip_addr, 0, 0,
                                              Direction.OUT)

        return [
            flows.get_add_drop_flow_msg(self._datapath, self.tbl_num, match_in,
                                        priority=Utils.DROP_PRIORITY),
            flows.get_add_drop_flow_msg(self._datapath, self.tbl_num, match_out,
                                        priority=Utils.DROP_PRIORITY)]

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        pass

    def _install_default_flow_for_subscriber(self, imsi, ip_addr):
        """
        Add a low priority flow to drop a subscriber's traffic.

        Args:
            imsi (string): subscriber id
            ip_addr (string): subscriber ip_addr
        """
        msgs = self._get_default_flow_msgs_for_subscriber(imsi, ip_addr)
        if msgs:
            chan = self._msg_hub.send(msgs, self._datapath)
            self._wait_for_responses(chan, len(msgs))

    def get_policy_usage(self, fut):
        record_table = RuleRecordTable(
            records=self.total_usage.values(),
            epoch=global_epoch)
        fut.set_result(record_table)

    def _monitor(self, poll_interval):
        """
        Main thread that sends a stats request at the configured interval in
        seconds.
        """
        while True:
            for _, datapath in self.dpset.get_all():
                if self.init_finished:
                    self._poll_stats(datapath)
                else:
                    # Still send an empty report -> needed for pipelined setup
                    self._report_usage({})
            hub.sleep(poll_interval)

    def _poll_stats(self, datapath):
        """
        Send a FlowStatsRequest message to the datapath
        """
        ofproto, parser = datapath.ofproto, datapath.ofproto_parser
        req = parser.OFPFlowStatsRequest(
            datapath,
            table_id=self.tbl_num,
            out_group=ofproto.OFPG_ANY,
            out_port=ofproto.OFPP_ANY,
        )
        try:
            messages.send_msg(datapath, req)
        except MagmaOFError as e:
            self.logger.warning("Couldn't poll datapath stats: %s", e)

    @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
    def _flow_stats_reply_handler(self, ev):
        """
        Schedule the flow stats handling in the main event loop, so as to
        unblock the ryu event loop
        """
        if not self.init_finished:
            self.logger.debug('Setup not finished, skipping stats reply')
            return

        if self._datapath_id != ev.msg.datapath.id:
            self.logger.debug('Ignoring stats from different bridge')
            return

        self.unhandled_stats_msgs.append(ev.msg.body)
        if ev.msg.flags == OFPMPF_REPLY_MORE:
            # Wait for more multi-part responses thats received for the
            # single stats request.
            return
        self.loop.call_soon_threadsafe(
            self._handle_flow_stats, self.unhandled_stats_msgs)
        self.unhandled_stats_msgs = []

    def _handle_flow_stats(self, stats_msgs):
        """
        Aggregate flow stats by rule, and report to session manager
        """
        stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)
        if stat_count == 0:
            return

        self.logger.debug("Processing %s stats responses", len(stats_msgs))
        # Aggregate flows into rule records
        current_usage = defaultdict(RuleRecord)
        for flow_stats in stats_msgs:
            self.logger.debug("Processing stats of %d flows", len(flow_stats))
            for stat in flow_stats:
                if stat.table_id != self.tbl_num:
                    # this update is not intended for policy
                    return
                current_usage = self._update_usage_from_flow_stat(
                    current_usage, stat)

        # Calculate the delta values from last stat update
        delta_usage = self._delta_usage_maps(current_usage,
                                             self.last_usage_for_delta)
        self.total_usage = current_usage

        # Append any records which we couldn't send to session manager earlier
        delta_usage = _merge_usage_maps(delta_usage, self.failed_usage)
        self.failed_usage = {}

        # Send report even if usage is empty. Sessiond uses empty reports to
        # recognize when flows have ended
        self._report_usage(delta_usage)

        self._delete_old_flows(stats_msgs)

    def deactivate_default_flow(self, imsi, ip_addr):
        match_in = _generate_rule_match(imsi, ip_addr, 0, 0, Direction.IN)
        match_out = _generate_rule_match(imsi, ip_addr, 0, 0, Direction.OUT)

        flows.delete_flow(self._datapath, self.tbl_num, match_in)
        flows.delete_flow(self._datapath, self.tbl_num, match_out)

    def _report_usage(self, delta_usage):
        """
        Report usage to sessiond using rpc
        """
        record_table = RuleRecordTable(records=delta_usage.values(),
                                       epoch=global_epoch)
        if self._print_grpc_payload:
            record_msg = 'Sending RPC payload: {0}{{\n{1}}}'.format(
                record_table.DESCRIPTOR.name, str(record_table))
            self.logger.info(record_msg)
        future = self.sessiond.ReportRuleStats.future(
            record_table, self.SESSIOND_RPC_TIMEOUT)
        future.add_done_callback(
            lambda future: self.loop.call_soon_threadsafe(
                self._report_usage_done, future, delta_usage))

    def _report_usage_done(self, future, delta_usage):
        """
        Callback after sessiond RPC completion
        """
        err = future.exception()
        if err:
            self.logger.error('Couldnt send flow records to sessiond: %s', err)
            self.failed_usage = _merge_usage_maps(
                delta_usage, self.failed_usage)

    def _update_usage_from_flow_stat(self, current_usage, flow_stat):
        """
        Update the rule record map with the flow stat and return the
        updated map.
        """
        rule_id = self._get_rule_id(flow_stat)
        # Rule not found, must be default flow
        if rule_id == "":
            default_flow_matched = \
                flow_stat.cookie == self.DEFAULT_FLOW_COOKIE
            if default_flow_matched:
                if flow_stat.byte_count != 0 and \
                   self._unmatched_bytes != flow_stat.byte_count:
                    self.logger.error('%s bytes total not reported.',
                                      flow_stat.byte_count)
                    self._unmatched_bytes = flow_stat.byte_count
                return current_usage
            else:
                # This must be the default drop flow
                rule_id = self._default_drop_flow_name
        # If this is a pass through app name flow ignore stats
        if _get_policy_type(flow_stat.match) == IGNORE_STATS:
            return current_usage
        sid = _get_sid(flow_stat)
        if not sid:
            return current_usage
        ipv4_addr = _get_ipv4(flow_stat)
        ipv6_addr = _get_ipv6(flow_stat)

        # use a compound key to separate flows for the same rule but for
        # different subscribers
        key = sid + "|" + rule_id
        if ipv4_addr:
            key += "|" + ipv4_addr
        elif ipv6_addr:
            key += "|" + ipv6_addr
        record = current_usage[key]
        record.rule_id = rule_id
        record.sid = sid

        if ipv4_addr:
            record.ue_ipv4 = ipv4_addr
        elif ipv6_addr:
            record.ue_ipv6 = ipv6_addr
        bytes_rx = 0
        bytes_tx = 0
        if flow_stat.match[DIRECTION_REG] == Direction.IN:
            # HACK decrement byte count for downlink packets by the length
            # of an ethernet frame. Only IP and below should be counted towards
            # a user's data. Uplink does this already because the GTP port is
            # an L3 port.
            bytes_rx = _get_downlink_byte_count(flow_stat)
        else:
            bytes_tx = flow_stat.byte_count

        if _get_policy_type(flow_stat.match) == PROCESS_STATS:
            record.bytes_rx += bytes_rx
            record.bytes_tx += bytes_tx
        else:
            record.dropped_rx += bytes_rx
            record.dropped_tx += bytes_tx
        current_usage[key] = record
        return current_usage

    def _delete_old_flows(self, stats_msgs):
        """
        Check if the version of any flow is older than the current version. If
        so, delete the flow and update last_usage_for_delta so we calculate the
        correct usage delta for the next poll.
        """
        deleted_flow_usage = defaultdict(RuleRecord)
        for deletable_stat in self._old_flow_stats(stats_msgs):
            stat_rule_id = self._get_rule_id(deletable_stat)
            stat_sid = _get_sid(deletable_stat)
            ipv4_addr_str = _get_ipv4(deletable_stat)
            ipv6_addr_str = _get_ipv6(deletable_stat)
            ip_addr = None
            if ipv4_addr_str:
                ip_addr = IPAddress(version=IPAddress.IPV4,
                                    address=ipv4_addr_str.encode('utf-8'))
            elif ipv6_addr_str:
                ip_addr = IPAddress(version=IPAddress.IPV6,
                                    address=ipv6_addr_str.encode('utf-8'))
            rule_version = _get_version(deletable_stat)

            try:
                self._delete_flow(deletable_stat, stat_sid, ip_addr,
                                  rule_version)
                # Only remove the usage of the deleted flow if deletion
                # is successful.
                self._update_usage_from_flow_stat(deleted_flow_usage,
                                                  deletable_stat)
            except MagmaOFError as e:
                self.logger.error(
                    'Failed to delete rule %s for subscriber %s '
                    '(version: %s): %s', stat_rule_id,
                    stat_sid, rule_version, e)

        new_last_usage = self._delta_usage_maps(self.total_usage,
                                                deleted_flow_usage)
        # Save new purge old from redis
        old = self.last_usage_for_delta.keys()
        new = new_last_usage.keys()
        for key, value in new_last_usage.items():
            self.last_usage_for_delta[key] = value
        for key in [k for k in old if k not in new]:
            self.last_usage_for_delta[key] = None

    def _old_flow_stats(self, stats_msgs):
        """
        Generator function to filter the flow stats that should be deleted from
        the stats messages.
        """
        for flow_stats in stats_msgs:
            for stat in flow_stats:
                if stat.table_id != self.tbl_num:
                    # this update is not intended for policy
                    return

                rule_id = self._get_rule_id(stat)
                sid = _get_sid(stat)
                ipv4_addr_str = _get_ipv4(stat)
                ipv4_addr = None
                if ipv4_addr_str:
                    ipv4_addr = IPAddress(version=IPAddress.IPV4,
                                          address=ipv4_addr_str.encode('utf-8'))
                rule_version = _get_version(stat)
                if rule_id == "":
                    continue

                current_ver = \
                    self._session_rule_version_mapper.get_version(sid,
                                                                  ipv4_addr,
                                                                  rule_id)
                if current_ver != rule_version:
                    yield stat

    def _delete_flow(self, flow_stat, sid, ip_addr, version):
        cookie, mask = (
            flow_stat.cookie, flows.OVS_COOKIE_MATCH_ALL)
        match = _generate_rule_match(
            sid, ip_addr, flow_stat.cookie, version,
            Direction(flow_stat.match[DIRECTION_REG]))
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)

    def _get_rule_id(self, flow):
        """
        Return the rule id from the rule cookie
        """
        # the default rule will have a cookie of 0
        rule_num = flow.cookie
        if rule_num == 0 or rule_num == self.DEFAULT_FLOW_COOKIE:
            return ""
        try:
            return self._rule_mapper.get_rule_id(rule_num)
        except KeyError as e:
            self.logger.error('Could not find rule id for num %d: %s',
                              rule_num, e)
            return ""

    def _delta_usage_maps(self, current_usage, last_usage):
        """
        Calculate the delta between the 2 usage maps and returns a new
        usage map.
        """
        if len(last_usage) == 0:
            return current_usage
        new_usage = {}
        for key, current in current_usage.items():
            last = last_usage.get(key, None)
            if last is not None:
                rec = RuleRecord()
                rec.MergeFrom(current)  # copy metadata
                if current.bytes_rx < last.bytes_rx or \
                        current.bytes_tx < last.bytes_tx:
                    self.logger.error(
                        'Resetting usage for rule %s, for subscriber %s, '
                        'current usage(rx/tx) %d/%d, last usage %d/%d',
                        rec.sid, rec.rule_id, current.bytes_rx,
                        current.bytes_tx, last.bytes_rx, last.bytes_tx)
                    rec.bytes_rx = last.bytes_rx
                    rec.bytes_tx = last.bytes_tx
                else:
                    rec.bytes_rx = current.bytes_rx - last.bytes_rx
                    rec.bytes_tx = current.bytes_tx - last.bytes_tx
                new_usage[key] = rec
            else:
                new_usage[key] = current
        return new_usage
Example #23
0
class InOutController(RestartMixin, MagmaController):
    """
    A controller that sets up an openflow pipeline for Magma.

    The EPC controls table 0 which is the first table every packet touches.
    This controller owns the ingress and output portions of the pipeline, the
    first table a packet hits after the EPC controller's table 0 and the last
    table a packet hits before exiting the pipeline.
    """

    APP_NAME = "inout"

    InOutConfig = namedtuple(
        'InOutConfig',
        [
            'gtp_port', 'uplink_port', 'mtr_ip', 'mtr_port', 'li_port_name',
            'enable_nat', 'non_nat_gw_probe_frequency',
            'non_nat_arp_egress_port', 'setup_type', 'uplink_gw_mac',
            'he_proxy_port', 'he_proxy_eth_mac'
        ],
    )
    ARP_PROBE_FREQUENCY = 300
    NON_NAT_ARP_EGRESS_PORT = 'dhcp0'
    UPLINK_OVS_BRIDGE_NAME = 'uplink_br0'

    def __init__(self, *args, **kwargs):
        super(InOutController, self).__init__(*args, **kwargs)
        self.config = self._get_config(kwargs['config'])
        self._li_port = None
        # TODO Alex do we want this to be cofigurable from swagger?
        if self.config.mtr_ip:
            self._mtr_service_enabled = True
        else:
            self._mtr_service_enabled = False

        if (self._service_manager.is_app_enabled(LIMirrorController.APP_NAME)
                and self.config.li_port_name):
            self._li_port = BridgeTools.get_ofport(self.config.li_port_name)
            self._li_table = self._service_manager.get_table_num(
                LIMirrorController.APP_NAME)
        self._ingress_tbl_num = self._service_manager.get_table_num(INGRESS)
        self._midle_tbl_num = \
            self._service_manager.get_table_num(PHYSICAL_TO_LOGICAL)
        self._egress_tbl_num = self._service_manager.get_table_num(EGRESS)
        # following fields are only used in Non Nat config
        self._tbls = [
            self._ingress_tbl_num, self._midle_tbl_num, self._egress_tbl_num
        ]
        self._gw_mac_monitor = None
        self._current_upstream_mac_map = {}  # maps vlan to upstream gw mac
        self._clean_restart = kwargs['config']['clean_restart']
        self._msg_hub = MessageHub(self.logger)
        self._datapath = None

    def _get_config(self, config_dict):
        mtr_ip = None
        mtr_port = None
        li_port_name = None
        port_no = config_dict.get('uplink_port', None)
        setup_type = config_dict.get('setup_type', None)

        he_proxy_port = 0
        he_proxy_eth_mac = ''
        try:
            if 'proxy_port_name' in config_dict:
                he_proxy_port = BridgeTools.get_ofport(
                    config_dict.get('proxy_port_name'))
                he_proxy_eth_mac = config_dict.get('he_proxy_eth_mac',
                                                   PROXY_PORT_MAC)
        except DatapathLookupError:
            # ignore it
            self.logger.debug("could not parse proxy port config")

        if 'mtr_ip' in config_dict:
            self._mtr_service_enabled = True
            mtr_ip = config_dict['mtr_ip']
            mtr_port = config_dict['ovs_mtr_port_number']
        if 'li_local_iface' in config_dict:
            li_port_name = config_dict['li_local_iface']

        enable_nat = config_dict.get('enable_nat', True)
        non_nat_gw_probe_freq = config_dict.get('non_nat_gw_probe_frequency',
                                                self.ARP_PROBE_FREQUENCY)
        # In case of vlan tag on uplink_bridge, use separate port.
        sgi_vlan = config_dict.get('sgi_management_iface_vlan', "")
        if not sgi_vlan:
            non_nat_arp_egress_port = config_dict.get(
                'non_nat_arp_egress_port', self.UPLINK_OVS_BRIDGE_NAME)
        else:
            non_nat_arp_egress_port = config_dict.get(
                'non_nat_arp_egress_port', self.NON_NAT_ARP_EGRESS_PORT)
        uplink_gw_mac = config_dict.get('uplink_gw_mac', "ff:ff:ff:ff:ff:ff")
        return self.InOutConfig(
            gtp_port=config_dict['ovs_gtp_port_number'],
            uplink_port=port_no,
            mtr_ip=mtr_ip,
            mtr_port=mtr_port,
            li_port_name=li_port_name,
            enable_nat=enable_nat,
            non_nat_gw_probe_frequency=non_nat_gw_probe_freq,
            non_nat_arp_egress_port=non_nat_arp_egress_port,
            setup_type=setup_type,
            uplink_gw_mac=uplink_gw_mac,
            he_proxy_port=he_proxy_port,
            he_proxy_eth_mac=he_proxy_eth_mac)

    def initialize_on_connect(self, datapath):
        self._datapath = datapath
        self._setup_non_nat_monitoring()
        # TODO possibly investigate stateless XWF(no sessiond)
        if self.config.setup_type == 'XWF':
            self.delete_all_flows(datapath)
            self._install_default_flows(datapath)

    def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
        """
        Gets the default flow msgs for pkt routing

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of default msgs to add
        """
        return {
            self._ingress_tbl_num:
            self._get_default_ingress_flow_msgs(datapath),
            self._midle_tbl_num: self._get_default_middle_flow_msgs(datapath),
            self._egress_tbl_num: self._get_default_egress_flow_msgs(datapath),
        }

    def _install_default_flows(self, datapath):
        default_msg_map = self._get_default_flow_msgs(datapath)
        default_msgs = []

        for _, msgs in default_msg_map.items():
            default_msgs.extend(msgs)
        chan = self._msg_hub.send(default_msgs, datapath)
        self._wait_for_responses(chan, len(default_msgs))

    def cleanup_on_disconnect(self, datapath):
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self._ingress_tbl_num)
        flows.delete_all_flows_from_table(datapath, self._midle_tbl_num)
        flows.delete_all_flows_from_table(datapath, self._egress_tbl_num)

    def _get_default_middle_flow_msgs(self, dp):
        """
        Egress table is the last table that a packet touches in the pipeline.
        Output downlink traffic to gtp port, uplink trafic to LOCAL

        Raises:
            MagmaOFError if any of the default flows fail to install.
        """
        msgs = []
        next_tbl = self._service_manager.get_next_table_num(
            PHYSICAL_TO_LOGICAL)

        # Allow passthrough pkts(skip enforcement and send to egress table)
        ps_match = MagmaMatch(passthrough=PASSTHROUGH_REG_VAL)
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._midle_tbl_num,
                ps_match,
                actions=[],
                priority=flows.PASSTHROUGH_PRIORITY,
                resubmit_table=self._egress_tbl_num))

        match = MagmaMatch()
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._midle_tbl_num,
                match,
                actions=[],
                priority=flows.DEFAULT_PRIORITY,
                resubmit_table=next_tbl))

        if self._mtr_service_enabled:
            msgs.extend(
                _get_vlan_egress_flow_msgs(dp,
                                           self._midle_tbl_num,
                                           self.config.mtr_ip,
                                           self.config.mtr_port,
                                           priority=flows.UE_FLOW_PRIORITY,
                                           direction=Direction.OUT))
        return msgs

    def _get_default_egress_flow_msgs(self,
                                      dp,
                                      mac_addr: str = "",
                                      vlan: str = ""):
        """
        Egress table is the last table that a packet touches in the pipeline.
        Output downlink traffic to gtp port, uplink trafic to LOCAL
        Args:
            mac_addr: In Non NAT mode, this is upstream internet GW mac address
            vlan: in multi APN this is vlan_id of the upstream network.

        Raises:
            MagmaOFError if any of the default flows fail to install.
        """
        msgs = []

        if self.config.setup_type == 'LTE':
            msgs.extend(
                _get_vlan_egress_flow_msgs(dp, self._egress_tbl_num,
                                           "0.0.0.0/0"))
            msgs.extend(self._get_proxy_flow_msgs(dp))
        else:
            # Use regular match for Non LTE setup.
            downlink_match = MagmaMatch(direction=Direction.IN)
            msgs.append(
                flows.get_add_output_flow_msg(
                    dp,
                    self._egress_tbl_num,
                    downlink_match, [],
                    output_port=self.config.gtp_port))

        if vlan.isdigit():
            vid = 0x1000 | int(vlan)
            uplink_match = MagmaMatch(direction=Direction.OUT, vlan_vid=(vid))
        else:
            uplink_match = MagmaMatch(direction=Direction.OUT)

        actions = []
        # avoid resetting mac address on switch connect event.
        if mac_addr == "":
            mac_addr = self._current_upstream_mac_map.get(vlan, "")
        if mac_addr == "" and self.config.enable_nat is False and \
            self.config.setup_type == 'LTE':
            mac_addr = self.config.uplink_gw_mac

        if mac_addr != "":
            parser = dp.ofproto_parser
            actions.append(
                parser.NXActionRegLoad2(dst='eth_dst', value=mac_addr))
            if self._current_upstream_mac_map.get(vlan, "") != mac_addr:
                self.logger.info("Using GW: mac: %s match %s actions: %s",
                                 mac_addr, str(uplink_match.ryu_match),
                                 str(actions))

                self._current_upstream_mac_map[vlan] = mac_addr

        if vlan.isdigit():
            priority = flows.UE_FLOW_PRIORITY
        elif mac_addr != "":
            priority = flows.DEFAULT_PRIORITY
        else:
            priority = flows.MINIMUM_PRIORITY

        msgs.append(
            flows.get_add_output_flow_msg(dp,
                                          self._egress_tbl_num,
                                          uplink_match,
                                          priority=priority,
                                          actions=actions,
                                          output_port=self.config.uplink_port))
        return msgs

    def _get_default_ingress_flow_msgs(self, dp):
        """
        Sets up the ingress table, the first step in the packet processing
        pipeline.

        This sets up flow rules to annotate packets with a metadata bit
        indicating the direction. Incoming packets are defined as packets
        originating from the LOCAL port, outgoing packets are defined as
        packets originating from the gtp port.

        All other packets bypass the pipeline.

        Note that the ingress rules do *not* install any flows that cause
        PacketIns (i.e., sends packets to the controller).

        Raises:
            MagmaOFError if any of the default flows fail to install.
        """
        parser = dp.ofproto_parser
        next_table = self._service_manager.get_next_table_num(INGRESS)
        msgs = []

        # set traffic direction bits

        # set a direction bit for incoming (internet -> UE) traffic.
        match = MagmaMatch(in_port=OFPP_LOCAL)
        actions = [load_direction(parser, Direction.IN)]
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._ingress_tbl_num,
                match,
                actions=actions,
                priority=flows.DEFAULT_PRIORITY,
                resubmit_table=next_table))

        # set a direction bit for incoming (internet -> UE) traffic.
        match = MagmaMatch(in_port=self.config.uplink_port)
        actions = [load_direction(parser, Direction.IN)]
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._ingress_tbl_num,
                match,
                actions=actions,
                priority=flows.DEFAULT_PRIORITY,
                resubmit_table=next_table))

        # Send RADIUS requests directly to li table
        if self._li_port:
            match = MagmaMatch(in_port=self._li_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=self._li_table))

        # set a direction bit for incoming (mtr -> UE) traffic.
        if self._mtr_service_enabled:
            match = MagmaMatch(in_port=self.config.mtr_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=next_table))

        if self.config.he_proxy_port != 0:
            match = MagmaMatch(in_port=self.config.he_proxy_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=next_table))

        if self.config.setup_type == 'CWF':
            # set a direction bit for outgoing (pn -> inet) traffic for remaining traffic
            ps_match_out = MagmaMatch()
            actions = [load_direction(parser, Direction.OUT)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_out,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table))
        else:
            # set a direction bit for outgoing (pn -> inet) traffic for remaining traffic
            # Passthrough is zero for packets from eNodeB GTP tunnels
            ps_match_out = MagmaMatch(passthrough=REG_ZERO_VAL)
            actions = [load_direction(parser, Direction.OUT)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_out,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table))

            # Passthrough is one for packets from remote PGW GTP tunnels, set direction
            # flag to IN for such packets.
            ps_match_in = MagmaMatch(passthrough=PASSTHROUGH_REG_VAL)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_in,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table))

        return msgs

    def _get_gw_mac_address(self, ip: IPAddress, vlan: str = "") -> str:
        try:
            gw_ip = ipaddress.ip_address(ip.address)
            self.logger.debug("sending arp via egress: %s",
                              self.config.non_nat_arp_egress_port)
            eth_mac_src = get_if_hwaddr(self.config.non_nat_arp_egress_port)
            psrc = "0.0.0.0"
            egress_port_ip = get_if_addr(self.config.non_nat_arp_egress_port)
            if egress_port_ip:
                psrc = egress_port_ip

            pkt = Ether(dst=ETHER_BROADCAST, src=eth_mac_src)
            if vlan.isdigit():
                pkt /= Dot1Q(vlan=int(vlan))
            pkt /= ARP(op="who-has", pdst=gw_ip, hwsrc=eth_mac_src, psrc=psrc)
            self.logger.debug("ARP Req pkt %s", pkt.show(dump=True))

            res = srp1(pkt,
                       type=ETH_P_ALL,
                       iface=self.config.non_nat_arp_egress_port,
                       timeout=1,
                       verbose=0,
                       nofilter=1,
                       promisc=0)

            if res is not None:
                self.logger.debug("ARP Res pkt %s", res.show(dump=True))
                if str(res[ARP].psrc) != str(gw_ip):
                    self.logger.warning(
                        "Unexpected IP in ARP response. expected: %s pkt: %s",
                        str(gw_ip), res.show(dump=True))
                    return ""
                if vlan.isdigit():
                    if Dot1Q in res and str(res[Dot1Q].vlan) == vlan:
                        mac = res[ARP].hwsrc
                    else:
                        self.logger.warning(
                            "Unexpected vlan in ARP response. expected: %s pkt: %s",
                            vlan, res.show(dump=True))
                        return ""
                else:
                    mac = res[ARP].hwsrc
                return mac
            else:
                self.logger.debug("Got Null response")
                return ""

        except Scapy_Exception as ex:
            self.logger.warning("Error in probing Mac address: err %s", ex)
            return ""
        except ValueError:
            self.logger.warning("Invalid GW Ip address: [%s] or vlan %s",
                                str(ip), vlan)
            return ""

    def _monitor_and_update(self):
        while True:
            gw_info_list = get_mobilityd_gw_info()
            for gw_info in gw_info_list:
                if gw_info and gw_info.ip:
                    latest_mac_addr = self._get_gw_mac_address(
                        gw_info.ip, gw_info.vlan)
                    self.logger.debug("mac [%s] for vlan %s", latest_mac_addr,
                                      gw_info.vlan)
                    if latest_mac_addr == "":
                        latest_mac_addr = gw_info.mac

                    msgs = self._get_default_egress_flow_msgs(
                        self._datapath, latest_mac_addr, gw_info.vlan)
                    chan = self._msg_hub.send(msgs, self._datapath)
                    self._wait_for_responses(chan, len(msgs))

                    if latest_mac_addr != "":
                        set_mobilityd_gw_info(gw_info.ip, latest_mac_addr,
                                              gw_info.vlan)
                else:
                    self.logger.warning("No default GW found.")

            hub.sleep(self.config.non_nat_gw_probe_frequency)

    def _setup_non_nat_monitoring(self):
        """
        Setup egress flow to forward traffic to internet GW.
        Start a thread to figure out MAC address of uplink NAT gw.

        """
        if self._gw_mac_monitor is not None:
            # No need to multiple probes here.
            return
        if self.config.enable_nat is True:
            self.logger.info("Nat is on")
            return
        elif self.config.setup_type != 'LTE':
            self.logger.info("No GW MAC probe for %s", self.config.setup_type)
            return
        else:
            self.logger.info("Non nat conf: egress port: %s, uplink: %s",
                             self.config.non_nat_arp_egress_port,
                             self.config.uplink_port)

        self._gw_mac_monitor = hub.spawn(self._monitor_and_update)

        threading.Event().wait(1)

    def _get_proxy_flow_msgs(self, dp):
        """
        Install egress flows
        Args:
            dp datapath
            table_no table to install flow
            out_port specify egress port, if None reg value is used
            priority flow priority
            direction packet direction.
        """
        if self.config.he_proxy_port <= 0:
            return []

        parser = dp.ofproto_parser
        match = MagmaMatch(proxy_tag=PROXY_TAG_TO_PROXY)
        actions = [
            parser.NXActionRegLoad2(dst='eth_dst',
                                    value=self.config.he_proxy_eth_mac)
        ]
        return [
            flows.get_add_output_flow_msg(
                dp,
                self._egress_tbl_num,
                match,
                priority=flows.UE_FLOW_PRIORITY,
                actions=actions,
                output_port=self.config.he_proxy_port)
        ]

    def _wait_for_responses(self, chan, response_count):
        def fail(err):
            self.logger.error("Failed to install rule with error: %s", err)

        for _ in range(response_count):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS msg channel")
            if not result.ok():
                return fail(result.exception())

    def _get_ue_specific_flow_msgs(self, _):
        return {}

    def finish_init(self, _):
        pass

    def cleanup_state(self):
        pass

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)
Example #24
0
class EnforcementController(MagmaController):
    """
    EnforcementController

    The enforcement controller installs flows for tracking subscriber usage
    per rule and enforcing the usage. Each flow installed matches on a rule
    and an IMSI and then the statistics are sent to sessiond for tracking.

    NOTE: Enforcement currently relies on the fact that policies do not
    overlap. In this implementation, there is the idea of a 'default rule'
    which is the catch-all. This rule is treated specially and tagged with a
    specific priority.
    """

    APP_NAME = "enforcement"
    ENFORCE_DROP_PRIORITY = flows.MINIMUM_PRIORITY + 1
    # Should not overlap with the drop flow as drop matches all packets.
    MIN_ENFORCE_PROGRAMMED_FLOW = ENFORCE_DROP_PRIORITY + 1
    MAX_ENFORCE_PRIORITY = flows.MAXIMUM_PRIORITY
    # Effectively range is 2 -> 65535
    ENFORCE_PRIORITY_RANGE = MAX_ENFORCE_PRIORITY - MIN_ENFORCE_PROGRAMMED_FLOW

    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self._datapath = None
        self._rule_mapper = kwargs['rule_id_mapper']
        self.loop = kwargs['loop']
        self._policy_dict = PolicyRuleDict()
        self._qos_map = QosQueueMap(kwargs['config']['nat_iface'],
                                    kwargs['config']['enodeb_iface'],
                                    kwargs['config']['enable_queue_pgm'])
        self._msg_hub = MessageHub(self.logger)

        self._redirect_manager = RedirectionManager(
            kwargs['config']['bridge_ip_address'], self.logger, self.tbl_num,
            self.next_table)

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        self._install_default_flows(datapath)
        self._datapath = datapath

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        flows.delete_all_flows_from_table(datapath, self.tbl_num)

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    def _install_default_flows(self, datapath):
        """
        For each direction set the default flows to just forward to next table.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.

        Args:
            datapath: ryu datapath struct
        """
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)
        flows.add_flow(datapath,
                       self.tbl_num,
                       inbound_match, [],
                       priority=flows.MINIMUM_PRIORITY,
                       resubmit_table=self.next_table)
        flows.add_flow(datapath,
                       self.tbl_num,
                       outbound_match, [],
                       priority=flows.MINIMUM_PRIORITY,
                       resubmit_table=self.next_table)

    def _install_flow_for_static_rule(self, imsi, ip_addr, rule_id):
        """
        Install a flow to get stats for a particular static rule id. The rule
        will be loaded from Redis and installed

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule_id (string): policy rule id
        """
        rule = self._policy_dict[rule_id]
        if rule is None:
            self.logger.error("Could not find rule for rule_id: %s", rule_id)
            return RuleModResult.FAILURE
        return self._install_flow_for_rule(imsi, ip_addr, rule)

    def get_of_priority(self, precedence):
        """
        Lower the precedence higher the importance of the flow in 3GPP.
        Higher the priority higher the importance of the flow in openflow.
        Convert precedence to priority:
        1 - Flows with precedence > 65534 will have min priority which is the
        min priority for a programmed flow = (default drop + 1)
        2 - Flows in the precedence range 0-65534 will have priority 65535 -
        Precedence
        :param precedence:
        :return:
        """
        if precedence >= self.ENFORCE_PRIORITY_RANGE:
            self.logger.warning(
                "Flow precedence is higher than OF range using "
                "min priority %d", self.MIN_ENFORCE_PROGRAMMED_FLOW)
            return self.MIN_ENFORCE_PROGRAMMED_FLOW
        return self.MAX_ENFORCE_PRIORITY - precedence

    def _install_flow_for_rule(self, imsi, ip_addr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = self.get_of_priority(rule.priority)
        ul_qos = rule.qos.max_req_bw_ul

        if rule.redirect.support == rule.redirect.ENABLED:
            # TODO currently if redirection is enabled we ignore other flows
            # from rule.flow_list, confirm that this is the expected behaviour
            redirect_request = RedirectionManager.RedirectRequest(
                imsi=imsi,
                ip_addr=ip_addr,
                rule=rule,
                rule_num=rule_num,
                priority=priority)
            try:
                self._redirect_manager.handle_redirection(
                    self._datapath, self.loop, redirect_request)
                return RuleModResult.SUCCESS
            except RedirectException as err:
                self.logger.error(
                    'Redirect Exception for imsi %s, rule.id - %s : %s', imsi,
                    rule.id, err)
                return RuleModResult.FAILURE

        flow_adds = []
        for flow in rule.flow_list:
            try:
                flow_adds.append(
                    self._get_add_flow_msg(imsi, flow, rule_num, priority,
                                           ul_qos, rule.hard_timeout, rule.id))
            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to install rule %s for subscriber %s: %s", rule.id,
                    imsi, err)
                return RuleModResult.FAILURE
        chan = self._msg_hub.send(flow_adds, self._datapath)

        return self._wait_for_responses(imsi, rule, chan)

    def _wait_for_responses(self, imsi, rule, chan):
        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s", rule.id,
                imsi, err)
            self._deactivate_flow_for_rule(imsi, rule.id)
            return RuleModResult.FAILURE

        for _ in range(len(rule.flow_list)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())

        return RuleModResult.SUCCESS

    def _get_add_flow_msg(self, imsi, flow, rule_num, priority, ul_qos,
                          hard_timeout, rule_id):
        match = flow.match
        ryu_match = flow_match_to_magma_match(match)
        ryu_match.imsi = encode_imsi(imsi)

        actions = self._get_of_actions_for_flow(flow, rule_num, imsi, ul_qos,
                                                rule_id)
        resubmit_table = self.next_table if flow.action != flow.DENY else None

        return flows.get_add_flow_msg(self._datapath,
                                      self.tbl_num,
                                      ryu_match,
                                      actions,
                                      hard_timeout=hard_timeout,
                                      priority=priority,
                                      cookie=rule_num,
                                      resubmit_table=resubmit_table)

    def _get_of_actions_for_flow(self, flow, rule_num, imsi, ul_qos, rule_id):
        parser = self._datapath.ofproto_parser
        # encode the rule id in hex
        of_note = parser.NXActionNote(list(rule_id.encode()))
        if flow.action == flow.DENY:
            return [of_note]

        # QoS Rate-Limiting is currently supported for uplink traffic
        if ul_qos != 0 and flow.match.direction == flow.match.UPLINK:
            qid = self._qos_map.map_flow_to_queue(imsi, rule_num, ul_qos, True)
            if qid != 0:
                return [
                    parser.OFPActionSetField(pkt_mark=qid),
                    parser.NXActionRegLoad2(dst='reg2', value=rule_num),
                    of_note
                ]

        return [parser.NXActionRegLoad2(dst='reg2', value=rule_num), of_note]

    def _install_drop_flow(self, imsi):
        """
        Add a low priority flow to drop a subscriber's traffic in the event
        that all rules have been deactivated.

        Args:
            imsi (string): subscriber id
        """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        actions = []  # empty options == drop
        flows.add_flow(self._datapath,
                       self.tbl_num,
                       match,
                       actions,
                       priority=self.ENFORCE_DROP_PRIORITY)

    def activate_flows(self, imsi, ip_addr, static_rule_ids, dynamic_rules,
                       fut):
        """
        Activate the flows for a subscriber based on the rules stored in Redis.
        During activation, another low priority flow is installed for the
        subscriber in the event that all rules are out of credit.

        Args:
            imsi (string): subscriber id
            ip_addr (string): subscriber session ipv4 address
            static_rule_ids (string []): list of static rules to activate
            dynamic_rules (PolicyRule []): list of dynamic rules to activate
            fut (Future): future to wait on the results of flow activations
        """
        if self._datapath is None:
            self.logger.error('Datapath not initialized for adding flows')
            fut.set_result(
                ActivateFlowsResult(
                    static_rule_results=[
                        RuleModResult(
                            rule_id=rule_id,
                            result=RuleModResult.FAILURE,
                        ) for rule_id in static_rule_ids
                    ],
                    dynamic_rule_results=[
                        RuleModResult(
                            rule_id=rule.id,
                            result=RuleModResult.FAILURE,
                        ) for rule in dynamic_rules
                    ],
                ))
            return
        static_results = []
        for rule_id in static_rule_ids:
            res = self._install_flow_for_static_rule(imsi, ip_addr, rule_id)
            static_results.append(RuleModResult(rule_id=rule_id, result=res))
        dyn_results = []
        for rule in dynamic_rules:
            res = self._install_flow_for_rule(imsi, ip_addr, rule)
            dyn_results.append(RuleModResult(rule_id=rule.id, result=res))

        # No matter what, install base flow to drop packets when all other
        # flows have been deactivated
        self._install_drop_flow(imsi)
        fut.set_result(
            ActivateFlowsResult(
                static_rule_results=static_results,
                dynamic_rule_results=dyn_results,
            ))

    def _deactivate_flow_for_rule(self, imsi, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(
            self._datapath, imsi, num)
        self._qos_map.del_queue_for_flow(imsi, num)

    def _deactivate_flows_for_subscriber(self, imsi):
        """ Deactivate all rules for a subscriber, ending any enforcement """
        flows.delete_flow(self._datapath, self.tbl_num,
                          MagmaMatch(imsi=encode_imsi(imsi)))
        self._redirect_manager.deactivate_flows_for_subscriber(
            self._datapath, imsi)
        self._qos_map.del_subscriber_queues(imsi)

    def deactivate_flows(self, imsi, rule_ids):
        """
        Deactivate flows for a subscriber. If only imsi is present, delete all
        rule flows for a subscriber (i.e. end its session). If rule_ids are
        present, delete the rule flows for that subscriber.

        Args:
            imsi (string): subscriber id
            rule_ids (list of strings): policy rule ids
        """
        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, rule_id)
Example #25
0
 def setUp(self):
     self._msg_sender = MessageHub(logging)
     self._mock_datapath = MockDatapath("1234")
     self._mock_datapath
Example #26
0
class IngressController(RestartMixin, MagmaController):
    APP_NAME = "ingress"

    IngressConfig = namedtuple(
        'IngressConfig',
        [
            'uplink_port',
            'mtr_ip',
            'mtr_port',
            'li_port_name',
            'setup_type',
            'he_proxy_port',
        ],
    )

    def __init__(self, *args, **kwargs):
        super(IngressController, self).__init__(*args, **kwargs)
        self.config = self._get_config(kwargs['config'])
        self.logger.info("ingress config: %s", self.config)

        self._li_port = None
        # TODO Alex do we want this to be cofigurable from swagger?
        if self.config.mtr_ip:
            self._mtr_service_enabled = True
        else:
            self._mtr_service_enabled = False

        if (self._service_manager.is_app_enabled(LIMirrorController.APP_NAME)
                and self.config.li_port_name):
            self._li_port = BridgeTools.get_ofport(self.config.li_port_name)
            self._li_table = self._service_manager.get_table_num(
                LIMirrorController.APP_NAME, )
        self._ingress_tbl_num = self._service_manager.get_table_num(INGRESS)
        # following fields are only used in Non Nat config
        self._clean_restart = kwargs['config']['clean_restart']
        self._msg_hub = MessageHub(self.logger)
        self._datapath = None
        self.tbl_num = self._ingress_tbl_num

    def _get_config(self, config_dict):
        mtr_ip = None
        mtr_port = None
        port_no = config_dict.get('uplink_port', None)

        he_proxy_port = 0
        try:
            if 'proxy_port_name' in config_dict:
                he_proxy_port = BridgeTools.get_ofport(
                    config_dict.get('proxy_port_name'))
        except DatapathLookupError:
            # ignore it
            self.logger.debug("could not parse proxy port config")

        if 'mtr_ip' in config_dict and 'mtr_interface' in config_dict and 'ovs_mtr_port_number' in config_dict:
            self._mtr_service_enabled = True
            mtr_ip = config_dict['mtr_ip']
            mtr_port = config_dict['ovs_mtr_port_number']
        else:
            mtr_ip = None
            mtr_port = None

        li_port_name = None
        if 'li_local_iface' in config_dict:
            li_port_name = config_dict['li_local_iface']

        return self.IngressConfig(
            uplink_port=port_no,
            mtr_ip=mtr_ip,
            mtr_port=mtr_port,
            li_port_name=li_port_name,
            setup_type=config_dict.get('setup_type', None),
            he_proxy_port=he_proxy_port,
        )

    def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
        return {
            self._ingress_tbl_num:
            self._get_default_ingress_flow_msgs(datapath),
        }

    def _get_default_ingress_flow_msgs(self, dp):
        """
        Sets up the ingress table, the first step in the packet processing
        pipeline.

        This sets up flow rules to annotate packets with a metadata bit
        indicating the direction. Incoming packets are defined as packets
        originating from the LOCAL port, outgoing packets are defined as
        packets originating from the gtp port.

        All other packets bypass the pipeline.

        Note that the ingress rules do *not* install any flows that cause
        PacketIns (i.e., sends packets to the controller).

        Raises:
            MagmaOFError if any of the default flows fail to install.
        """
        parser = dp.ofproto_parser
        next_table = self._service_manager.get_next_table_num(INGRESS)
        msgs = []

        # set traffic direction bits

        # set a direction bit for incoming (internet -> UE) traffic.
        match = MagmaMatch(in_port=OFPP_LOCAL)
        actions = [load_direction(parser, Direction.IN)]
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._ingress_tbl_num,
                match,
                actions=actions,
                priority=flows.DEFAULT_PRIORITY,
                resubmit_table=next_table,
            ), )

        # set a direction bit for incoming (internet -> UE) traffic.
        match = MagmaMatch(in_port=self.config.uplink_port)
        actions = [load_direction(parser, Direction.IN)]
        msgs.append(
            flows.get_add_resubmit_next_service_flow_msg(
                dp,
                self._ingress_tbl_num,
                match,
                actions=actions,
                priority=flows.DEFAULT_PRIORITY,
                resubmit_table=next_table,
            ), )

        # Send RADIUS requests directly to li table
        if self._li_port:
            match = MagmaMatch(in_port=self._li_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=self._li_table,
                ), )

        # set a direction bit for incoming (mtr -> UE) traffic.
        if self._mtr_service_enabled:
            match = MagmaMatch(in_port=self.config.mtr_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=next_table,
                ), )

        if self.config.he_proxy_port != 0:
            match = MagmaMatch(in_port=self.config.he_proxy_port)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    match,
                    actions=actions,
                    priority=flows.DEFAULT_PRIORITY,
                    resubmit_table=next_table,
                ), )

        if self.config.setup_type == 'CWF':
            # set a direction bit for outgoing (pn -> inet) traffic for remaining traffic
            ps_match_out = MagmaMatch()
            actions = [load_direction(parser, Direction.OUT)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_out,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table,
                ), )
        else:
            # set a direction bit for outgoing (pn -> inet) traffic for remaining traffic
            # Passthrough is zero for packets from eNodeB GTP tunnels
            ps_match_out = MagmaMatch(passthrough=REG_ZERO_VAL)
            actions = [load_direction(parser, Direction.OUT)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_out,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table,
                ), )

            # Passthrough is one for packets from remote PGW GTP tunnels, set direction
            # flag to IN for such packets.
            ps_match_in = MagmaMatch(passthrough=PASSTHROUGH_REG_VAL)
            actions = [load_direction(parser, Direction.IN)]
            msgs.append(
                flows.get_add_resubmit_next_service_flow_msg(
                    dp,
                    self._ingress_tbl_num,
                    ps_match_in,
                    actions=actions,
                    priority=flows.MINIMUM_PRIORITY,
                    resubmit_table=next_table,
                ), )

        return msgs

    def _install_default_flows(self, datapath):
        default_msg_map = self._get_default_flow_msgs(datapath)
        default_msgs = []

        for _, msgs in default_msg_map.items():
            default_msgs.extend(msgs)
        chan = self._msg_hub.send(default_msgs, datapath)
        self._wait_for_responses(chan, len(default_msgs))

    def _wait_for_responses(self, chan, response_count):
        def fail(err):
            self.logger.error("Failed to install rule with error: %s", err)

        for _ in range(response_count):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS msg channel")
            if not result.ok():
                return fail(result.exception())

    def cleanup_on_disconnect(self, datapath):
        if self._clean_restart:
            self.delete_all_flows(datapath)

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    def cleanup_state(self):
        pass

    def _get_ue_specific_flow_msgs(self, _):
        return {}

    def finish_init(self, _):
        pass

    def initialize_on_connect(self, datapath):
        self._datapath = datapath
        # TODO possibly investigate stateless XWF(no sessiond)
        if self.config.setup_type == 'XWF':
            self.delete_all_flows(datapath)
            self._install_default_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
Example #27
0
class HeTableTest(unittest.TestCase):
    BRIDGE = 'testing_br'
    IFACE = 'testing_br'
    MAC_DEST = "5e:cc:cc:b1:49:4b"
    BRIDGE_IP = '192.168.128.1'
    UE_BLOCK = '192.168.128.0/24'
    UE_MAC = '5e:cc:cc:b1:49:4b'
    UE_IP = '192.168.128.22'
    OTHER_MAC = '0a:00:27:00:00:02'
    OTHER_IP = '1.2.3.4'
    VETH = 'tveth'
    VETH_NS = 'tveth_ns'
    PROXY_PORT = '15'

    @classmethod
    @unittest.mock.patch('netifaces.ifaddresses',
                         return_value=[[{
                             'addr': '00:11:22:33:44:55'
                         }]])
    @unittest.mock.patch('netifaces.AF_LINK', 0)
    def setUpClass(cls, *_):
        """
        Starts the thread which launches ryu apps

        Create a testing bridge, add a port, setup the port interfaces. Then
        launch the ryu apps for testing pipelined. Gets the references
        to apps launched by using futures.
        """
        he.activate_he_urls_for_ue = mocked_activate_he_urls_for_ue
        he.deactivate_he_urls_for_ue = mocked_deactivate_he_urls_for_ue

        super(HeTableTest, cls).setUpClass()
        warnings.simplefilter('ignore')
        cls.service_manager = create_service_manager([], ['proxy'])
        cls._tbl_num = cls.service_manager.get_table_num(
            HeaderEnrichmentController.APP_NAME)

        BridgeTools.create_veth_pair(cls.VETH, cls.VETH_NS)
        BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
        BridgeTools.add_ovs_port(cls.BRIDGE, cls.VETH, cls.PROXY_PORT)

        he_controller_reference = Future()
        testing_controller_reference = Future()

        test_setup = TestSetup(
            apps=[
                PipelinedController.HeaderEnrichment,
                PipelinedController.Testing, PipelinedController.StartupFlows
            ],
            references={
                PipelinedController.HeaderEnrichment: he_controller_reference,
                PipelinedController.Testing: testing_controller_reference,
                PipelinedController.StartupFlows: Future(),
            },
            config={
                'setup_type': 'LTE',
                'bridge_name': cls.BRIDGE,
                'bridge_ip_address': cls.BRIDGE_IP,
                'uplink_port': 20,
                'proxy_port_name': cls.VETH,
                'clean_restart': True,
                'enable_nat': True,
                'ovs_gtp_port_number': 10,
            },
            mconfig=PipelineD(ue_ip_block=cls.UE_BLOCK, ),
            loop=None,
            service_manager=cls.service_manager,
            integ_test=False,
        )

        cls.thread = start_ryu_app_thread(test_setup)
        cls.he_controller = he_controller_reference.result()
        cls.testing_controller = testing_controller_reference.result()

    def _wait_for_responses(self, chan, response_count, logger):
        def fail(err):
            logger.error("Failed to install rule for subscriber: %s", err)

        for _ in range(response_count):
            try:
                result = chan.get()

            except MsgChannel.Timeout:
                return fail("No response from OVS policy mixin")
            if not result.ok():
                return fail(result.exception())

    @classmethod
    def tearDownClass(cls):
        stop_ryu_app_thread(cls.thread)
        BridgeTools.destroy_bridge(cls.BRIDGE)

    def tearDown(self):
        cls = self.__class__
        dp = HeTableTest.he_controller._datapath
        cls.he_controller.delete_all_flows(dp)

    def test_default_flows(self):
        """
        Verify that a proxy flows are setup
        """

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass

    def test_ue_flows_add(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)

        ue_ip = '1.1.1.1'
        tun_id = 1
        dest_server = '2.2.2.2'
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            "rule1", Direction.OUT, ue_ip, tun_id, dest_server, 123,
            ['abc.com'], 'IMSI01', b'1')
        chan = self._msg_hub.send(
            flow_msg,
            HeTableTest.he_controller._datapath,
        )
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass

    def test_ue_flows_add_direction_in(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)

        ue_ip = '1.1.1.1'
        tun_id = 1
        dest_server = '2.2.2.2'
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            "rule1", Direction.IN, ue_ip, tun_id, dest_server, 123,
            ['abc.com'], 'IMSI01', b'1')
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip), 0)
        chan = self._msg_hub.send(
            flow_msg,
            HeTableTest.he_controller._datapath,
        )
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass

    def test_ue_flows_add2(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1
        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            "rule1", Direction.OUT, ue_ip1, tun_id1, dest_server1, rule1,
            ['abc.com'], 'IMSI01', b'1')

        ue_ip2 = '10.10.10.20'
        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows("rule2", Direction.OUT,
                                                      ue_ip2, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc.com'], 'IMSI01',
                                                      b'1'))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 1)
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip2), 1)

        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass

    def test_ue_flows_del(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1

        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            'rule1', Direction.OUT, ue_ip1, tun_id1, dest_server1, rule1,
            ['abc.com'], 'IMSI01', b'1')

        ue_ip2 = '10.10.10.20'
        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg2 = cls.he_controller.get_subscriber_he_flows(
            'rule2', Direction.OUT, ue_ip2, tun_id2, dest_server2, rule2,
            ['abc.com'], 'IMSI01', b'1')
        flow_msg.extend(flow_msg2)
        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2), 'rule2', rule2)

        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2), 'rule_random', 3223)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass

    def test_ue_flows_del2(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1
        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            'rule1', Direction.OUT, ue_ip1, tun_id1, dest_server1, rule1,
            ['abc.com'], 'IMSI01', b'1')

        ue_ip2 = '10.10.10.20'
        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule2', Direction.OUT,
                                                      ue_ip2, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc.com'], 'IMSI01',
                                                      b'1'))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 1)
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip2), 1)

        ue_ip2 = '10.10.10.20'
        dest_server2 = '20.20.40.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule2', Direction.OUT,
                                                      ue_ip2, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc.com'], 'IMSI01',
                                                      None))

        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2))

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass
        # verify multiple remove works.
        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip2))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip2), 0)

    def test_ue_flows_multi_rule(self):
        """
        Verify that a proxy flows are setup
        """
        cls = self.__class__
        self._msg_hub = MessageHub(HeTableTest.he_controller.logger)
        dp = HeTableTest.he_controller._datapath
        ue_ip1 = '1.1.1.200'
        tun_id1 = 1
        dest_server1 = '2.2.2.4'
        rule1 = 123
        flow_msg = cls.he_controller.get_subscriber_he_flows(
            'rule1', Direction.OUT, ue_ip1, tun_id1, dest_server1, rule1,
            ['abc.com'], 'IMSI01', b'1')

        tun_id2 = 2
        dest_server2 = '20.20.20.40'
        rule2 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule2', Direction.OUT,
                                                      ue_ip1, tun_id2,
                                                      dest_server2, rule2,
                                                      ['abc1.com'], 'IMSI01',
                                                      b'1'))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 2)

        dest_server2 = '20.20.40.40'
        rule3 = 1230
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule3', Direction.OUT,
                                                      ue_ip1, tun_id2,
                                                      dest_server2, rule3,
                                                      ['abc2.com'], 'IMSI01',
                                                      None))

        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 3)

        dest_server2 = '20.20.50.50'
        rule4 = 22
        flow_msg.extend(
            cls.he_controller.get_subscriber_he_flows('rule4', Direction.OUT,
                                                      ue_ip1, tun_id2,
                                                      dest_server2, rule4,
                                                      ['abc2.com'], 'IMSI01',
                                                      None))

        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 4)

        chan = self._msg_hub.send(flow_msg, dp)
        self._wait_for_responses(chan, len(flow_msg),
                                 HeTableTest.he_controller.logger)

        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip1), "rule1", rule1)

        snapshot_verifier = SnapshotVerifier(
            self,
            self.BRIDGE,
            self.service_manager,
            max_sleep_time=20,
            datapath=HeTableTest.he_controller._datapath)

        with snapshot_verifier:
            pass
        # verify multiple remove works.
        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip1), "rule2", rule2)
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 2)
        cls.he_controller.remove_subscriber_he_flows(
            convert_ip_str_to_ip_proto(ue_ip1))
        self.assertEqual(cls.he_controller._ue_rule_counter.get(ue_ip1), 0)
Example #28
0
class EnforcementController(PolicyMixin, RestartMixin, MagmaController):
    """
    EnforcementController

    The enforcement controller installs flows for policy enforcement and
    classification. Each flow installed matches on a rule and an IMSI and then
    classifies the packet with the rule. The flow also redirects and drops
    the packet as specified in the policy.

    NOTE: Enforcement currently relies on the fact that policies do not
    overlap. In this implementation, there is the idea of a 'default rule'
    which is the catch-all. This rule is treated specially and tagged with a
    specific priority.
    """

    APP_NAME = "enforcement"
    APP_TYPE = ControllerType.LOGICAL
    DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe

    def __init__(self, *args, **kwargs):
        super(EnforcementController, self).__init__(*args, **kwargs)
        self._config = kwargs['config']
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            EnforcementStatsController.APP_NAME)
        self._enforcement_stats_tbl = self._service_manager.get_table_num(
            EnforcementStatsController.APP_NAME)
        self.loop = kwargs['loop']

        self._msg_hub = MessageHub(self.logger)
        self._redirect_scratch = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._redirect_manager = None
        self._qos_mgr = None
        self._clean_restart = kwargs['config']['clean_restart']
        self._redirect_manager = RedirectionManager(
            self._bridge_ip_address, self.logger, self.tbl_num,
            self._enforcement_stats_tbl, self.next_main_table,
            self._redirect_scratch, self._session_rule_version_mapper)

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        self._qos_mgr = QosManager.get_qos_manager(datapath, self.loop,
                                                   self._config)

    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        if self._clean_restart:
            self.delete_all_flows(datapath)

    def delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)

    def cleanup_state(self):
        pass

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    @set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER)
    def meter_config_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_config_stats(ev.msg.body)

    @set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
    def meter_features_stats_reply_handler(self, ev):
        if not self._qos_mgr:
            return

        qos_impl = self._qos_mgr.impl
        if qos_impl and isinstance(qos_impl, MeterManager):
            qos_impl.handle_meter_feature_stats(ev.msg.body)

    def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
        """
        Gets the default flow msg that forward to stats table(traffic will be
        dropped because stats table doesn't forward anything)

        Args:
            datapath: ryu datapath struct
        Returns:
            The list of default msgs to add
        """
        match = MagmaMatch()
        msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath,
            self.tbl_num,
            match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self._enforcement_stats_tbl,
            cookie=self.DEFAULT_FLOW_COOKIE)

        return {self.tbl_num: [msg]}

    def _get_rule_match_flow_msgs(self, imsi, msisdn: bytes,
                                  uplink_tunnel: int, ip_addr, apn_ambr, rule):
        """
        Get flow msgs to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            msisdn (bytes): subscriber MSISDN
            uplink_tunnel (int): tunnel ID of the subscriber.
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = Utils.get_of_priority(rule.priority)

        flow_adds = []
        for flow in rule.flow_list:
            try:
                version = self._session_rule_version_mapper.get_version(
                    imsi, ip_addr, rule.id)
                flow_adds.extend(
                    self._get_classify_rule_flow_msgs(
                        imsi, msisdn, uplink_tunnel, ip_addr, apn_ambr, flow,
                        rule_num, priority, rule.qos, rule.hard_timeout,
                        rule.id, rule.app_name, rule.app_service_type,
                        self.next_main_table, version, self._qos_mgr,
                        self._enforcement_stats_tbl, rule.he.urls))

            except FlowMatchError as err:  # invalid match
                self.logger.error(
                    "Failed to get flow msg '%s' for subscriber %s: %s",
                    rule.id, imsi, err)
                raise err
        return flow_adds

    def _install_flow_for_rule(self, imsi, msisdn: bytes, uplink_tunnel: int,
                               ip_addr, apn_ambr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            msisdn (bytes): subscriber MSISDN
            uplink_tunnel (int): tunnel ID of the subscriber.
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        if rule.redirect.support == rule.redirect.ENABLED:
            return self._install_redirect_flow(imsi, ip_addr, rule)

        if not rule.flow_list:
            self.logger.error(
                'The flow list for imsi %s, rule.id - %s'
                'is empty, this shoudn\'t happen', imsi, rule.id)
            return RuleModResult.FAILURE

        flow_adds = []
        try:
            flow_adds = self._get_rule_match_flow_msgs(imsi, msisdn,
                                                       uplink_tunnel, ip_addr,
                                                       apn_ambr, rule)
        except FlowMatchError:
            return RuleModResult.FAILURE

        try:
            chan = self._msg_hub.send(flow_adds, self._datapath)
        except MagmaDPDisconnectedError:
            self.logger.error(
                "Datapath disconnected, failed to install rule %s"
                "for imsi %s", rule, imsi)
            return RuleModResult.FAILURE
        return self._wait_for_rule_responses(imsi, ip_addr, rule, chan)

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        rule_version = self._session_rule_version_mapper.get_version(
            imsi, ip_addr, rule.id)
        priority = Utils.get_of_priority(rule.priority)
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr.address.decode('utf-8'),
            rule=rule,
            rule_num=rule_num,
            rule_version=rule_version,
            priority=priority)
        try:
            self._redirect_manager.setup_lte_redirect(self._datapath,
                                                      self.loop,
                                                      redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s', imsi,
                rule.id, err)
            return RuleModResult.FAILURE

    def _get_default_flow_msgs_for_subscriber(self, *_):
        pass

    def _install_default_flow_for_subscriber(self, *_):
        pass

    def _deactivate_flow_for_rule(self, imsi, ip_addr, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        if num is None:
            self.logger.error('Rule num is None for rule %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)

        ip_match_in = get_ue_ip_match_args(ip_addr, Direction.IN)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi),
                           **ip_match_in)
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)
        ip_match_out = get_ue_ip_match_args(ip_addr, Direction.OUT)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi),
                           **ip_match_out)
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(
            self._datapath, imsi, num)
        self._qos_mgr.remove_subscriber_qos(imsi, num)
        self._remove_he_flows(ip_addr, rule_id, num)

    def _deactivate_flows_for_subscriber(self, imsi, ip_addr):
        """ Deactivate all rules for specified subscriber session """
        ip_match_in = get_ue_ip_match_args(ip_addr, Direction.IN)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi),
                           **ip_match_in)
        flows.delete_flow(self._datapath, self.tbl_num, match)
        ip_match_out = get_ue_ip_match_args(ip_addr, Direction.OUT)
        match = MagmaMatch(eth_type=get_eth_type(ip_addr),
                           imsi=encode_imsi(imsi),
                           **ip_match_out)
        flows.delete_flow(self._datapath, self.tbl_num, match)

        self._redirect_manager.deactivate_flows_for_subscriber(
            self._datapath, imsi)
        self._qos_mgr.remove_subscriber_qos(imsi)
        self._remove_he_flows(ip_addr)

    def deactivate_rules(self, imsi, ip_addr, rule_ids):
        """
        Deactivate flows for a subscriber.
            Only imsi -> remove all rules for imsi
            imsi+ipv4 -> remove all rules for imsi session
            imsi+rule_ids -> remove specific rules for imsi (for all sessions)
            imsi+ipv4+rule_ids -> remove rules for specific imsi session

        Args:
            imsi (string): subscriber id
            ip_addr (string): subscriber ip address
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi, ip_addr)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, ip_addr, rule_id)

    def recover_state(self, _):
        pass
Example #29
0
class EnforcementStatsController(PolicyMixin, MagmaController):
    """
    This openflow controller installs flows for aggregating policy usage
    statistics, which are sent to sessiond for tracking.

    It periodically polls OVS for flow stats on the its table and reports the
    usage records to session manager via RPC. Flows are deleted when their
    version (reg4 match) is different from the current version of the rule for
    the subscriber maintained by the rule version mapper.
    """

    APP_NAME = 'enforcement_stats'
    SESSIOND_RPC_TIMEOUT = 10
    # 0xffffffffffffffff is reserved in openflow
    DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe

    _CONTEXTS = {
        'dpset': dpset.DPSet,
    }

    def __init__(self, *args, **kwargs):
        super(EnforcementStatsController, self).__init__(*args, **kwargs)
        # No need to report usage if relay mode is not enabled.
        self._relay_enabled = kwargs['mconfig'].relay_enabled
        if not self._relay_enabled:
            self.logger.info('Relay mode is not enabled. '
                             'enforcement_stats will not report usage.')
            return
        self.tbl_num = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 1)[0]
        self.next_table = \
            self._service_manager.get_next_table_num(self.APP_NAME)
        self.dpset = kwargs['dpset']
        self.loop = kwargs['loop']
        # Spawn a thread to poll for flow stats
        poll_interval = kwargs['config']['enforcement']['poll_interval']
        self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
        # Create a rpc channel to sessiond
        self.sessiond = kwargs['rpc_stubs']['sessiond']
        self._msg_hub = MessageHub(self.logger)
        self.unhandled_stats_msgs = []  # Store multi-part responses from ovs
        self.total_usage = {}  # Store total usage
        # Store last usage excluding deleted flows for calculating deltas
        self.last_usage_for_delta = {}
        self.failed_usage = {}  # Store failed usage to retry rpc to sessiond

    def _check_relay(func):  # pylint: disable=no-self-argument
        def wrapped(self, *args, **kwargs):
            if self._relay_enabled:  # pylint: disable=protected-access
                func(self, *args, **kwargs)  # pylint: disable=not-callable

        return wrapped

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        if self._relay_enabled:
            flows.delete_all_flows_from_table(datapath, self.tbl_num)
            self._install_default_flows(datapath)

    def _install_default_flows(self, datapath):
        """
        If no flows are matched, simply forward the traffic.
        """
        match = MagmaMatch()
        flows.add_resubmit_next_service_flow(datapath,
                                             self.tbl_num,
                                             match, [],
                                             priority=flows.MINIMUM_PRIORITY,
                                             resubmit_table=self.next_table,
                                             cookie=self.DEFAULT_FLOW_COOKIE)

    @_check_relay
    def cleanup_on_disconnect(self, datapath):
        """
        Cleanup flows on datapath disconnect event.

        Args:
            datapath: ryu datapath struct
        """
        flows.delete_all_flows_from_table(datapath, self.tbl_num)

    def _install_flow_for_rule(self, imsi, ip_addr, rule):
        """
        Install a flow to get stats for a particular rule. Flows will match on
        IMSI, cookie (the rule num), in/out direction

        Args:
            imsi (string): subscriber to install rule for
            ip_addr (string): subscriber session ipv4 address
            rule (PolicyRule): policy rule proto
        """
        # Do not install anything if relay is disabled
        if not self._relay_enabled:
            return RuleModResult.SUCCESS

        def fail(err):
            self.logger.error(
                "Failed to install rule %s for subscriber %s: %s", rule.id,
                imsi, err)
            return RuleModResult.FAILURE

        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        msgs = self._get_rule_match_flow_msgs(imsi, rule_num, rule.id)

        chan = self._msg_hub.send(msgs, self._datapath)
        for _ in range(len(msgs)):
            try:
                result = chan.get()
            except MsgChannel.Timeout:
                return fail("No response from OVS")
            if not result.ok():
                return fail(result.exception())

        return RuleModResult.SUCCESS

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    @_check_relay
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    @_check_relay
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)

    def _get_rule_match_flow_msgs(self, imsi, rule_num, rule_id):
        """
        Returns flow add messages used for rule matching.
        """
        version = self._session_rule_version_mapper.get_version(imsi, rule_id)
        self.logger.debug(
            'Installing flow for %s with rule num %s (version %s)', imsi,
            rule_num, version)
        inbound_rule_match = _generate_rule_match(imsi, rule_num, version,
                                                  Direction.IN)
        outbound_rule_match = _generate_rule_match(imsi, rule_num, version,
                                                   Direction.OUT)

        return [
            flows.get_add_resubmit_next_service_flow_msg(
                self._datapath,
                self.tbl_num,
                inbound_rule_match, [],
                priority=flows.DEFAULT_PRIORITY,
                cookie=rule_num,
                resubmit_table=self.next_table),
            flows.get_add_resubmit_next_service_flow_msg(
                self._datapath,
                self.tbl_num,
                outbound_rule_match, [],
                priority=flows.DEFAULT_PRIORITY,
                cookie=rule_num,
                resubmit_table=self.next_table),
        ]

    def _install_default_flow_for_subscriber(self, imsi):
        pass

    def get_policy_usage(self, fut):
        if not self._relay_enabled:
            fut.set_exception(RelayDisabledException())
            return

        record_table = RuleRecordTable(records=self.total_usage.values())
        fut.set_result(record_table)

    def _monitor(self, poll_interval):
        """
        Main thread that sends a stats request at the configured interval in
        seconds.
        """
        while True:
            for _, datapath in self.dpset.get_all():
                self._poll_stats(datapath)
            hub.sleep(poll_interval)

    def _poll_stats(self, datapath):
        """
        Send a FlowStatsRequest message to the datapath
        """
        ofproto, parser = datapath.ofproto, datapath.ofproto_parser
        req = parser.OFPFlowStatsRequest(
            datapath,
            table_id=self.tbl_num,
            out_group=ofproto.OFPG_ANY,
            out_port=ofproto.OFPP_ANY,
        )
        try:
            messages.send_msg(datapath, req)
        except MagmaOFError as e:
            self.logger.warning("Couldn't poll datapath stats: %s", e)

    @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
    @_check_relay
    def _flow_stats_reply_handler(self, ev):
        """
        Schedule the flow stats handling in the main event loop, so as to
        unblock the ryu event loop
        """
        self.unhandled_stats_msgs.append(ev.msg.body)
        if ev.msg.flags == OFPMPF_REPLY_MORE:
            # Wait for more multi-part responses thats received for the
            # single stats request.
            return
        self.loop.call_soon_threadsafe(self._handle_flow_stats,
                                       self.unhandled_stats_msgs)
        self.unhandled_stats_msgs = []

    def _handle_flow_stats(self, stats_msgs):
        """
        Aggregate flow stats by rule, and report to session manager
        """
        stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)
        if stat_count == 0:
            return

        self.logger.debug("Processing %s stats responses", len(stats_msgs))
        # Aggregate flows into rule records
        current_usage = defaultdict(RuleRecord)
        for flow_stats in stats_msgs:
            self.logger.debug("Processing stats of %d flows", len(flow_stats))
            for stat in flow_stats:
                if stat.table_id != self.tbl_num:
                    # this update is not intended for policy
                    return
                current_usage = self._update_usage_from_flow_stat(
                    current_usage, stat)

        # Calculate the delta values from last stat update
        delta_usage = _delta_usage_maps(current_usage,
                                        self.last_usage_for_delta)
        self.total_usage = current_usage

        # Append any records which we couldn't send to session manager earlier
        delta_usage = _merge_usage_maps(delta_usage, self.failed_usage)
        self.failed_usage = {}

        # Send report even if usage is empty. Sessiond uses empty reports to
        # recognize when flows have ended
        self._report_usage(delta_usage)

        self._delete_old_flows(stats_msgs)

    def _report_usage(self, delta_usage):
        """
        Report usage to sessiond using rpc
        """
        record_table = RuleRecordTable(records=delta_usage.values())
        future = self.sessiond.ReportRuleStats.future(
            record_table, self.SESSIOND_RPC_TIMEOUT)
        future.add_done_callback(lambda future: self.loop.call_soon_threadsafe(
            self._report_usage_done, future, delta_usage))

    def _report_usage_done(self, future, delta_usage):
        """
        Callback after sessiond RPC completion
        """
        err = future.exception()
        if err:
            self.logger.error('Couldnt send flow records to sessiond: %s', err)
            self.failed_usage = _merge_usage_maps(delta_usage,
                                                  self.failed_usage)

    def _update_usage_from_flow_stat(self, current_usage, flow_stat):
        """
        Update the rule record map with the flow stat and return the
        updated map.
        """
        rule_id = self._get_rule_id(flow_stat)
        # Rule not found, must be default flow
        if rule_id == "":
            default_flow_matched = \
                flow_stat.cookie == self.DEFAULT_FLOW_COOKIE and \
                flow_stat.byte_count != 0
            if default_flow_matched:
                self.logger.error('%s bytes total not reported.',
                                  flow_stat.byte_count)
            return current_usage
        sid = _get_sid(flow_stat)

        # use a compound key to separate flows for the same rule but for
        # different subscribers
        key = sid + "|" + rule_id
        record = current_usage[key]
        record.rule_id = rule_id
        record.sid = sid
        if flow_stat.match[DIRECTION_REG] == Direction.IN:
            # HACK decrement byte count for downlink packets by the length
            # of an ethernet frame. Only IP and below should be counted towards
            # a user's data. Uplink does this already because the GTP port is
            # an L3 port.
            record.bytes_rx += _get_downlink_byte_count(flow_stat)
        else:
            record.bytes_tx += flow_stat.byte_count
        current_usage[key] = record
        return current_usage

    def _delete_old_flows(self, stats_msgs):
        """
        Check if the version of any flow is older than the current version. If
        so, delete the flow and update last_usage_for_delta so we calculate the
        correct usage delta for the next poll.
        """
        deleted_flow_usage = defaultdict(RuleRecord)
        for deletable_stat in self._old_flow_stats(stats_msgs):
            stat_rule_id = self._get_rule_id(deletable_stat)
            stat_sid = _get_sid(deletable_stat)
            rule_version = _get_version(deletable_stat)

            try:
                self._delete_flow(deletable_stat, stat_sid, rule_version)
                # Only remove the usage of the deleted flow if deletion
                # is successful.
                self._update_usage_from_flow_stat(deleted_flow_usage,
                                                  deletable_stat)
            except MagmaOFError as e:
                self.logger.error(
                    'Failed to delete rule %s for subscriber %s '
                    '(version: %s): %s', stat_rule_id, stat_sid, rule_version,
                    e)

        self.last_usage_for_delta = _delta_usage_maps(self.total_usage,
                                                      deleted_flow_usage)

    def _old_flow_stats(self, stats_msgs):
        """
        Generator function to filter the flow stats that should be deleted from
        the stats messages.
        """
        for flow_stats in stats_msgs:
            for stat in flow_stats:
                if stat.table_id != self.tbl_num:
                    # this update is not intended for policy
                    return

                rule_id = self._get_rule_id(stat)
                sid = _get_sid(stat)
                rule_version = _get_version(stat)
                if rule_id == "":
                    continue

                current_ver = \
                    self._session_rule_version_mapper.get_version(sid, rule_id)
                if current_ver != rule_version:
                    yield stat

    def _delete_flow(self, flow_stat, sid, version):
        cookie, mask = (flow_stat.cookie, flows.OVS_COOKIE_MATCH_ALL)
        match = _generate_rule_match(sid, flow_stat.cookie, version,
                                     Direction(flow_stat.match[DIRECTION_REG]))
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)

    def _get_rule_id(self, flow):
        """
        Return the rule id from the rule cookie
        """
        # the default rule will have a cookie of 0
        rule_num = flow.cookie
        if rule_num == 0 or rule_num == self.DEFAULT_FLOW_COOKIE:
            return ""
        try:
            return self._rule_mapper.get_rule_id(rule_num)
        except KeyError as e:
            self.logger.error('Could not find rule id for num %d: %s',
                              rule_num, e)
            return ""
Example #30
0
File: gy.py Project: go-magma/magma
class GYController(PolicyMixin, MagmaController):
    """
    GYController

    The GY controller installs flows for enforcement of GY final actions, this
    includes redirection and QoS(currently not supported)
    """

    APP_NAME = "gy"
    APP_TYPE = ControllerType.LOGICAL

    def __init__(self, *args, **kwargs):
        super(GYController, self).__init__(*args, **kwargs)
        self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
        self.next_main_table = self._service_manager.get_next_table_num(
            self.APP_NAME)
        self.loop = kwargs['loop']
        self._msg_hub = MessageHub(self.logger)
        self._internal_ip_allocator = kwargs['internal_ip_allocator']
        tbls = \
            self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
        self._redirect_scratch = tbls[0]
        self._mac_rewr = \
            self._service_manager.INTERNAL_MAC_IP_REWRITE_TBL_NUM
        self._bridge_ip_address = kwargs['config']['bridge_ip_address']
        self._clean_restart = kwargs['config']['clean_restart']
        self._redirect_manager = \
            RedirectionManager(
                self._bridge_ip_address,
                self.logger,
                self.tbl_num,
                self._service_manager.get_table_num(EGRESS),
                self._redirect_scratch,
                self._session_rule_version_mapper
            ).set_cwf_args(
                internal_ip_allocator=kwargs['internal_ip_allocator'],
                arp=kwargs['app_futures']['arpd'],
                mac_rewrite=self._mac_rewr,
                bridge_name=kwargs['config']['bridge_name'],
                egress_table=self._service_manager.get_table_num(EGRESS)
            )

    def initialize_on_connect(self, datapath):
        """
        Install the default flows on datapath connect event.

        Args:
            datapath: ryu datapath struct
        """
        self._datapath = datapath
        self._delete_all_flows(datapath)
        self._install_default_flows(datapath)

    def deactivate_rules(self, imsi, rule_ids):
        """
        Deactivate flows for a subscriber. If only imsi is present, delete all
        rule flows for a subscriber (i.e. end its session). If rule_ids are
        present, delete the rule flows for that subscriber.

        Args:
            imsi (string): subscriber id
            rule_ids (list of strings): policy rule ids
        """
        if not self.init_finished:
            self.logger.error('Pipelined is not initialized')
            return RuleModResult.FAILURE

        if self._datapath is None:
            self.logger.error('Datapath not initialized')
            return

        if not imsi:
            self.logger.error('No subscriber specified')
            return

        if not rule_ids:
            self._deactivate_flows_for_subscriber(imsi)
        else:
            for rule_id in rule_ids:
                self._deactivate_flow_for_rule(imsi, rule_id)

    def cleanup_state(self):
        pass

    def _deactivate_flows_for_subscriber(self, imsi):
        """ Deactivate all rules for a subscriber, ending any enforcement """
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath, self.tbl_num, match)
        self._redirect_manager.deactivate_flows_for_subscriber(
            self._datapath, imsi)

    def _deactivate_flow_for_rule(self, imsi, rule_id):
        """
        Deactivate a specific rule using the flow cookie for a subscriber
        """
        try:
            num = self._rule_mapper.get_rule_num(rule_id)
        except KeyError:
            self.logger.error('Could not find rule id %s', rule_id)
            return
        cookie, mask = (num, flows.OVS_COOKIE_MATCH_ALL)
        match = MagmaMatch(imsi=encode_imsi(imsi))
        flows.delete_flow(self._datapath,
                          self.tbl_num,
                          match,
                          cookie=cookie,
                          cookie_mask=mask)
        self._redirect_manager.deactivate_flow_for_rule(
            self._datapath, imsi, num)

    def _install_flow_for_rule(self, imsi, ip_addr, rule):
        if rule.redirect.support == rule.redirect.ENABLED:
            self._install_redirect_flow(imsi, ip_addr, rule)
            return RuleModResult.SUCCESS
        else:
            # TODO: Add support once sessiond implements restrict access QOS
            self.logger.error('GY only supports FINAL action redirect, other'
                              'final actions are not supported')
            return RuleModResult.FAILURE

    def _install_default_flow_for_subscriber(self, imsi):
        pass

    def _delete_all_flows(self, datapath):
        flows.delete_all_flows_from_table(datapath, self.tbl_num)
        flows.delete_all_flows_from_table(datapath, self._redirect_scratch)
        flows.delete_all_flows_from_table(datapath, self._mac_rewr)

    def _install_default_flows(self, datapath):
        """
        For each direction set the default flows to just forward to next app.
        The enforcement flows for each subscriber would be added when the
        IP session is created, by reaching out to the controller/PCRF.

        Args:
            datapath: ryu datapath struct
        """
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)
        flows.add_resubmit_next_service_flow(
            datapath,
            self.tbl_num,
            inbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)
        flows.add_resubmit_next_service_flow(
            datapath,
            self.tbl_num,
            outbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

    def _install_redirect_flow(self, imsi, ip_addr, rule):
        rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
        priority = rule.priority
        # TODO currently if redirection is enabled we ignore other flows
        # from rule.flow_list, confirm that this is the expected behaviour
        redirect_request = RedirectionManager.RedirectRequest(
            imsi=imsi,
            ip_addr=ip_addr,
            rule=rule,
            rule_num=rule_num,
            priority=priority)
        try:
            self._redirect_manager.setup_cwf_redirect(self._datapath,
                                                      self.loop,
                                                      redirect_request)
            return RuleModResult.SUCCESS
        except RedirectException as err:
            self.logger.error(
                'Redirect Exception for imsi %s, rule.id - %s : %s', imsi,
                rule.id, err)
            return RuleModResult.FAILURE

    def _install_default_flows_if_not_installed(
            self, datapath,
            existing_flows: List[OFPFlowStats]) -> List[OFPFlowStats]:
        inbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                   direction=Direction.IN)
        outbound_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
                                    direction=Direction.OUT)

        inbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath,
            self.tbl_num,
            inbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        outbound_msg = flows.get_add_resubmit_next_service_flow_msg(
            datapath,
            self.tbl_num,
            outbound_match, [],
            priority=flows.MINIMUM_PRIORITY,
            resubmit_table=self.next_main_table)

        msgs, remaining_flows = self._msg_hub \
            .filter_msgs_if_not_in_flow_list([inbound_msg, outbound_msg],
                                             existing_flows)
        if msgs:
            chan = self._msg_hub.send(msgs, datapath)
            self._wait_for_responses(chan, len(msgs))

        return remaining_flows

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _handle_barrier(self, ev):
        self._msg_hub.handle_barrier(ev)

    @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
    def _handle_error(self, ev):
        self._msg_hub.handle_error(ev)