Ejemplo n.º 1
0
    def process_policy_changes(self, change_info):
        if not self.cfg.isSupersetsMode():
            self.logger.warn('Dynamic policy updates only supported in SuperSet mode')
            return

        # First step towards a less brute force approach: Handle removals without having to remove everything
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            removed_in_cookies = self.remove_policies_by_cookies(cookies, 'inbound')
            self.queue_flow_removals(removed_in_cookies, 'inbound')
            removed_out_cookies = self.remove_policies_by_cookies(cookies, 'outbound')
            self.queue_flow_removals(removed_out_cookies, 'outbound')
            if not 'new_policies' in change_info:
                self.push_dp()
                return

        # Remainder of this method is brute force approach: wipe everything and re-do it
        # This should be replaced by a more fine grained approach
        self.logger.debug("Wiping outbound rules.")
        wipe_msgs = msg_clear_all_outbound(self.policies, self.port0_mac)
        self.dp_queued.extend(wipe_msgs)

        self.logger.debug("pre-updated policies: " + json.dumps(self.policies))
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            self.remove_policies_by_cookies(cookies, 'inbound')
            self.remove_policies_by_cookies(cookies, 'outbound')

        if 'new_policies' in change_info:
            new_policies = change_info['new_policies']
            self.sanitize_policies(new_policies)
            self.update_policies(new_policies, 'inbound')
            self.update_policies(new_policies, 'outbound')

        self.logger.debug("updated policies: " + json.dumps(self.policies))
        self.logger.debug("pre-recomputed supersets: " + json.dumps(self.supersets.supersets))

        self.initialize_dataplane()
        self.push_dp()

        # Send gratuitous ARP responses for all
        garp_required_FECs = self.prefix_2_FEC.values()['vnh']
        #garp_required_vnhs = self.VNH_2_prefix.keys()
        for FEC in garp_required_FECs:
            self.process_arp_request(None, FEC)
            
        return
Ejemplo n.º 2
0
    def process_bgp_route(self, route):
        "Process each incoming BGP advertisement"
        tstart = time.time()

        # Map to update for each prefix in the route advertisement.
        updates = self.bgp_instance.update(route)
        #self.logger.debug("process_bgp_route:: "+str(updates))
        # TODO: This step should be parallelized
        # TODO: The decision process for these prefixes is going to be same, we
        # should think about getting rid of such redundant computations.
        for update in updates:
            self.bgp_instance.decision_process_local(update)
            self.vnh_assignment(update)

        if TIMING:
            elapsed = time.time() - tstart
            self.logger.debug("Time taken for decision process: " +
                              str(elapsed))
            tstart = time.time()

        if self.cfg.isSupersetsMode():
            ################## SUPERSET RESPONSE TO BGP ##################
            # update supersets
            "Map the set of BGP updates to a list of superset expansions."
            ss_changes, ss_changed_prefs = self.supersets.update_supersets(
                self, updates)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to update supersets: " +
                                  str(elapsed))
                tstart = time.time()

            # ss_changed_prefs are prefixes for which the VMAC bits have changed
            # these prefixes must have gratuitous arps sent
            garp_required_vnhs = [
                self.prefix_2_VNH[prefix] for prefix in ss_changed_prefs
            ]

            "If a recomputation event was needed, wipe out the flow rules."
            if ss_changes["type"] == "new":
                self.logger.debug("Wiping outbound rules.")
                wipe_msgs = msg_clear_all_outbound(self.policies,
                                                   self.port0_mac)
                self.dp_queued.extend(wipe_msgs)

                #if a recomputation was needed, all VMACs must be reARPed
                # TODO: confirm reARPed is a word
                garp_required_vnhs = self.VNH_2_prefix.keys()

            if len(ss_changes['changes']) > 0:

                self.logger.debug("Supersets have changed: " + str(ss_changes))

                "Map the superset changes to a list of new flow rules."
                flow_msgs = update_outbound_rules(ss_changes, self.policies,
                                                  self.supersets,
                                                  self.port0_mac)

                self.logger.debug("Flow msgs: " + str(flow_msgs))
                "Dump the new rules into the dataplane queue."
                self.dp_queued.extend(flow_msgs)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to deal with ss_changes: " +
                                  str(elapsed))
                tstart = time.time()

        ################## END SUPERSET RESPONSE ##################

        else:
            # TODO: similar logic for MDS
            self.logger.debug("Creating ctrlr messages for MDS scheme")

        self.push_dp()

        if TIMING:
            elapsed = time.time() - tstart
            self.logger.debug("Time taken to push dp msgs: " + str(elapsed))
            tstart = time.time()

        changed_vnhs, announcements = self.bgp_instance.bgp_update_peers(
            updates, self.prefix_2_VNH, self.cfg.ports)
        """ Combine the VNHs which have changed BGP default routes with the
            VNHs which have changed supersets.
        """

        changed_vnhs = set(changed_vnhs)
        changed_vnhs.update(garp_required_vnhs)

        # Send gratuitous ARP responses for all them
        for vnh in changed_vnhs:
            self.process_arp_request(None, vnh)

        # Tell Route Server that it needs to announce these routes
        for announcement in announcements:
            # TODO: Complete the logic for this function
            self.send_announcement(announcement)

        if TIMING:
            elapsed = time.time() - tstart
            self.logger.debug("Time taken to send garps/announcements: " +
                              str(elapsed))
            tstart = time.time()
Ejemplo n.º 3
0
    def process_policy_changes(self, change_info):
        if not self.cfg.isSupersetsMode():
            self.logger.warn(
                'Dynamic policy updates only supported in SuperSet mode')
            return

        # First step towards a less brute force approach: Handle removals without having to remove everything
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            removed_in_cookies = self.remove_policies_by_cookies(
                cookies, 'inbound')
            self.queue_flow_removals(removed_in_cookies, 'inbound')
            removed_out_cookies = self.remove_policies_by_cookies(
                cookies, 'outbound')
            self.queue_flow_removals(removed_out_cookies, 'outbound')
            if not 'new_policies' in change_info:
                self.push_dp()
                return

        # Remainder of this method is brute force approach: wipe everything and re-do it
        # This should be replaced by a more fine grained approach
        self.logger.debug("Wiping outbound rules.")
        wipe_msgs = msg_clear_all_outbound(self.policies, self.port0_mac)
        self.dp_queued.extend(wipe_msgs)

        self.logger.debug("pre-updated policies: " + json.dumps(self.policies))
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            self.remove_policies_by_cookies(cookies, 'inbound')
            self.remove_policies_by_cookies(cookies, 'outbound')

        if 'new_policies' in change_info:
            new_policies = change_info['new_policies']
            self.sanitize_policies(new_policies)
            self.update_policies(new_policies, 'inbound')
            self.update_policies(new_policies, 'outbound')

        self.logger.debug("updated policies: " + json.dumps(self.policies))
        self.logger.debug("pre-recomputed supersets: " +
                          json.dumps(self.supersets.supersets))

        self.initialize_dataplane()
        self.push_dp()

        # Send gratuitous ARP responses for all
        garp_required_vnhs = self.VNH_2_prefix.keys()
        for vnh in garp_required_vnhs:
            self.process_arp_request(None, vnh)

        return

        # Original code below...

        "Process the changes in participants' policies"
        # TODO: Implement the logic of dynamically changing participants' outbound and inbound policy
        '''
            change_info =
            {
                'removal_cookies' : [cookie1, ...], # Cookies of deleted policies
                'new_policies' :
                {
                    <policy file format>
                }

            }
        '''
        # remove flow rules for the old policies
        removal_msgs = []
        '''
        for cookie in change_info['removal_cookies']:
            mod =  {"rule_type":"outbound", "priority":0,
                    "match":match_args , "action":{},
                    "cookie":cookie, "mod_type":"remove"}
            removal_msgs.append(mod)
        '''

        self.dp_queued.extend(removal_msgs)

        # add flow rules for the new policies
        if self.cfg.isSupersetsMode():
            dp_msgs = ss_process_policy_change(self.supersets, add_policies,
                                               remove_policies, policies,
                                               self.port_count, self.port0_mac)
        else:
            dp_msgs = []

        self.dp_queued.extend(dp_msgs)

        self.push_dp()

        return 0
Ejemplo n.º 4
0
    def process_bgp_route(self, route):
        "Process each incoming BGP advertisement"
        tstart = time.time()

        prefixes = get_prefixes_from_announcements(route)
        with self.getlock(prefixes):
            reply = ''
            # Map to update for each prefix in the route advertisement.
            updates = self.bgp_instance.update(route)
            #self.logger.debug("process_bgp_route:: "+str(updates))
            # TODO: This step should be parallelized
            # TODO: The decision process for these prefixes is going to be same, we
            # should think about getting rid of such redundant computations.
            for update in updates:
                self.bgp_instance.decision_process_local(update)
                self.vnh_assignment(update)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken for decision process: "+str(elapsed))
                tstart = time.time()

            if self.cfg.isSupersetsMode():
            ################## SUPERSET RESPONSE TO BGP ##################
                # update supersets
                "Map the set of BGP updates to a list of superset expansions."
                ss_changes, ss_changed_prefs = self.supersets.update_supersets(self, updates)

                if TIMING:
                    elapsed = time.time() - tstart
                    self.logger.debug("Time taken to update supersets: "+str(elapsed))
                    tstart = time.time()

                # ss_changed_prefs are prefixes for which the VMAC bits have changed
                # these prefixes must have gratuitous arps sent
                garp_required_vnhs = [self.prefix_2_VNH[prefix] for prefix in ss_changed_prefs]

                "If a recomputation event was needed, wipe out the flow rules."
                if ss_changes["type"] == "new":
                    self.logger.debug("Wiping outbound rules.")
                    wipe_msgs = msg_clear_all_outbound(self.policies, self.port0_mac)
                    self.dp_queued.extend(wipe_msgs)

                    #if a recomputation was needed, all VMACs must be reARPed
                    # TODO: confirm reARPed is a word
                    garp_required_vnhs = self.VNH_2_prefix.keys()

                if len(ss_changes['changes']) > 0:

                    self.logger.debug("Supersets have changed: "+str(ss_changes))

                    "Map the superset changes to a list of new flow rules."
                    flow_msgs = update_outbound_rules(ss_changes, self.policies,
                            self.supersets, self.port0_mac)

                    self.logger.debug("Flow msgs: "+str(flow_msgs))
                    "Dump the new rules into the dataplane queue."
                    self.dp_queued.extend(flow_msgs)

                if TIMING:
                    elapsed = time.time() - tstart
                    self.logger.debug("Time taken to deal with ss_changes: "+str(elapsed))
                    tstart = time.time()

            ################## END SUPERSET RESPONSE ##################

            else:
                # TODO: similar logic for MDS
                self.logger.debug("Creating ctrlr messages for MDS scheme")

            self.push_dp()

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to push dp msgs: "+str(elapsed))
                tstart = time.time()

            changed_vnhs, announcements = self.bgp_instance.bgp_update_peers(updates,
                    self.prefix_2_VNH, self.cfg.ports)

            """ Combine the VNHs which have changed BGP default routes with the
                VNHs which have changed supersets.
            """

            changed_vnhs = set(changed_vnhs)
            changed_vnhs.update(garp_required_vnhs)

            # Send gratuitous ARP responses for all them
            for vnh in changed_vnhs:
                self.process_arp_request(None, vnh)

            # Tell Route Server that it needs to announce these routes
            for announcement in announcements:
                # TODO: Complete the logic for this function
                self.send_announcement(announcement)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to send garps/announcements: "+str(elapsed))
                tstart = time.time()

            return reply
Ejemplo n.º 5
0
    def process_bgp_route(self, update):

        "Process each incoming BGP advertisement"
        tstart = time.time()

        #Check for local failure, push fast reroute rules if local failure
        #self.deal_with_local_failure(routes)

        # Map to update for each prefix in the route advertisement.
        self.bgp_instance.update(update)
        self.logger.info("process_bgp_route:: " + str(update))
        self.logger.debug("process_bgp_route:: "+str(update))
        # TODO: This step should be parallelized
        # TODO: The decision process for these prefixes is going to be same, we
        # should think about getting rid of such redundant computations.
        self.bgp_instance.decision_process(update)
        #assign FEC to prefix
        self.FEC.assignment(update)
        #assign BEC to prefix
        self.BEC.assignment(update)
        #assign VNH to FEC, BEC pair of prefix
        self.vnh_assignment(update)

        if TIMING:
            elapsed = time.time() - tstart
            #self.logger.info("Time taken for decision process: " + str(elapsed))
            self.logger.debug("Time taken for decision process: " + str(elapsed))
            tstart = time.time()


        if self.cfg.isSupersetsMode():
            ################## SUPERSET RESPONSE TO BGP ##################
            # update supersets
            "Map the set of BGP updates to a list of superset expansions."
            ss_changes, ss_changed_prefs = self.supersets.update_supersets(self, update)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to update supersets: "+str(elapsed))
                tstart = time.time()

            # ss_changed_prefs are prefixes for which the VMAC bits have changed
            # these prefixes must have gratuitous arps sent
            garp_required_VNHs = []
            for prefix in ss_changed_prefs:
                BEC_id = self.prefix_2_BEC[prefix]['id']
                FEC_id = self.prefix_2_FEC[prefix]['id']
                garp_required_VNHs.append(self.BECid_FECid_2_VNH[(BEC_id,FEC_id)])

            "If a recomputation event was needed, wipe out the flow rules."
            if ss_changes["type"] == "new":
                self.logger.debug("Wiping outbound rules.")
                wipe_msgs = msg_clear_all_outbound(self.policies, self.port0_mac)
                self.dp_queued.extend(wipe_msgs)

                #if a recomputation was needed, all VMACs must be reARPed
                # TODO: confirm reARPed is a word
                #garp_required_vnhs = self.VNH_2_prefix.keys()
                garp_required_VNHs =[]
                for VNH in self.BECid_FECid_2_VNH.values():
                    garp_required_VNHs.append(VNH)
                #garp_required_vnhs = self.prefix_2_FEC.values()['vnh']

            if len(ss_changes['changes']) > 0:

                self.logger.debug("Supersets have changed: "+str(ss_changes))

                "Map the superset changes to a list of new flow rules."
                flow_msgs = update_outbound_rules(ss_changes, self.policies,
                        self.supersets, self.port0_mac)

                self.logger.debug("Flow msgs: "+str(flow_msgs))
                "Dump the new rules into the dataplane queue."
                self.dp_queued.extend(flow_msgs)

            if TIMING:
                elapsed = time.time() - tstart
                self.logger.debug("Time taken to deal with ss_changes: "+str(elapsed))
                tstart = time.time()

        ################## END SUPERSET RESPONSE ##################

        else:
            # TODO: similar logic for MDS
            self.logger.debug("Creating ctrlr messages for MDS scheme")

        if len(self.dp_queued) != 0:
            self.push_dp()

        if TIMING:
            elapsed = time.time() - tstart
            self.logger.debug("Time taken to push dp msgs: "+str(elapsed))
            tstart = time.time()

        new_VNHs, announcements = self.bgp_instance.bgp_update_peer(update,self.prefix_2_VNH_nrfp,
                self.prefix_2_FEC, self.prefix_2_BEC, self.BECid_FECid_2_VNH, self.VNH_2_vmac, self.cfg.ports)

        """ Combine the VNHs which have changed BGP default routes with the
            VNHs which have changed supersets.
        """

        #new_FECs = set(new_FECs)

        new_VNHs = new_VNHs + garp_required_VNHs

        #remove duplicates
        new_VNHs = list(set(new_VNHs))

        # Send gratuitous ARP responses for all them
        for VNH in new_VNHs:
            self.process_arp_request(None, VNH)

        # Tell Route Server that it needs to announce these routes
        for announcement in announcements:
            # TODO: Complete the logic for this function
            self.send_announcement(announcement)

        if TIMING:
            elapsed = time.time() - tstart
            self.logger.info("Time taken to send garps/announcements: "+str(elapsed))
            self.logger.debug("Time taken to send garps/announcements: " + str(elapsed))
            tstart = time.time()
Ejemplo n.º 6
0
    def process_policy_changes(self, change_info):
        if not self.cfg.isSupersetsMode():
            self.logger.warn('Dynamic policy updates only supported in SuperSet mode')
            return

        # First step towards a less brute force approach: Handle removals without having to remove everything
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            removed_in_cookies = self.remove_policies_by_cookies(cookies, 'inbound')
            self.queue_flow_removals(removed_in_cookies, 'inbound')
            removed_out_cookies = self.remove_policies_by_cookies(cookies, 'outbound')
            self.queue_flow_removals(removed_out_cookies, 'outbound')
            if not 'new_policies' in change_info:
                self.push_dp()
                return

        # Remainder of this method is brute force approach: wipe everything and re-do it
        # This should be replaced by a more fine grained approach
        self.logger.debug("Wiping outbound rules.")
        wipe_msgs = msg_clear_all_outbound(self.policies, self.port0_mac)
        self.dp_queued.extend(wipe_msgs)

        self.logger.debug("pre-updated policies: " + json.dumps(self.policies))
        if 'removal_cookies' in change_info:
            cookies = change_info['removal_cookies']
            self.remove_policies_by_cookies(cookies, 'inbound')
            self.remove_policies_by_cookies(cookies, 'outbound')

        if 'new_policies' in change_info:
            new_policies = change_info['new_policies']
            self.sanitize_policies(new_policies)
            self.update_policies(new_policies, 'inbound')
            self.update_policies(new_policies, 'outbound')

        self.logger.debug("updated policies: " + json.dumps(self.policies))
        self.logger.debug("pre-recomputed supersets: " + json.dumps(self.supersets.supersets))

        self.initialize_dataplane()
        self.push_dp()

        # Send gratuitous ARP responses for all
        garp_required_vnhs = self.VNH_2_prefix.keys()
        for vnh in garp_required_vnhs:
            self.process_arp_request(None, vnh)
            
        return

        # Original code below...
        
        "Process the changes in participants' policies"
        # TODO: Implement the logic of dynamically changing participants' outbound and inbound policy
        '''
            change_info =
            {
                'removal_cookies' : [cookie1, ...], # Cookies of deleted policies
                'new_policies' :
                {
                    <policy file format>
                }

            }
        '''
        # remove flow rules for the old policies
        removal_msgs = []

        '''
        for cookie in change_info['removal_cookies']:
            mod =  {"rule_type":"outbound", "priority":0,
                    "match":match_args , "action":{},
                    "cookie":cookie, "mod_type":"remove"}
            removal_msgs.append(mod)
        '''

        self.dp_queued.extend(removal_msgs)


        # add flow rules for the new policies
        if self.cfg.isSupersetsMode():
            dp_msgs = ss_process_policy_change(self.supersets, add_policies, remove_policies, policies,
                                                self.port_count, self.port0_mac)
        else:
            dp_msgs = []

        self.dp_queued.extend(dp_msgs)

        self.push_dp()

        return 0