Ejemplo n.º 1
0
    def recompute_all_supersets(self, pctrl):

        self.logger.debug("~Recomputing all Supersets...")

        self.rulecounts = self.recompute_rulecounts(pctrl)
        # get all sets of participants advertising the same prefix
        peer_sets = get_prefix2part_sets(pctrl)
        peer_sets = clear_inactive_parts(peer_sets, self.rulecounts.keys())
        peer_sets = removeSubsets(peer_sets)

        self.supersets = minimize_ss_rules_greedy(peer_sets, self.rulecounts, self.max_initial_bits)

        # impose an ordering on each superset by converting sets to lists
        for i in range(len(self.supersets)):
            self.supersets[i] = list(self.supersets[i])

        # fix the mask size after a recomputation event
        self.mask_size = self.max_bits - 1
        self.id_size = 1

        # if there is more than one superset, set the field sizes appropriately
        if len(self.supersets) > 1:
            self.id_size = int(math.ceil(math.log(len(self.supersets), 2)))
            self.mask_size -= self.id_size

        self.logger.debug("done.~")
        self.logger.debug("Supersets: >> "+str(self.supersets))
Ejemplo n.º 2
0
    def recompute_all_supersets(self, pctrl):

        self.logger.debug("~Recomputing all Supersets...")

        self.rulecounts = self.recompute_rulecounts(pctrl)
        # get all sets of participants advertising the same prefix
        peer_sets = get_prefix2part_sets(pctrl)
        peer_sets = clear_inactive_parts(peer_sets, self.rulecounts.keys())
        peer_sets = removeSubsets(peer_sets)

        self.supersets = minimize_ss_rules_greedy(peer_sets, self.rulecounts, self.max_initial_bits)

        # impose an ordering on each superset by converting sets to lists
        for i in range(len(self.supersets)):
            self.supersets[i] = list(self.supersets[i])

        # if there is more than one superset, set the id size appropriately
        self.id_size = 1
        if len(self.supersets) > 1:
            self.id_size = int(math.ceil(math.log(len(self.supersets), 2)))
            
        # fix the mask size based on the id size
        self.mask_size = self.max_bits - self.id_size

        # in the unlikely case that there are more participants for a prefix than can fit in
        # the mask, truncate the list of participants (this may still be very broken)
        for superset in self.supersets:
            if len(superset) > self.mask_size:
                self.logger.warn('Superset too big!  Dropping participants.')
                del(superset[self.mask_size:])

        self.logger.debug("done.~")
        self.logger.debug("Supersets: >> "+str(self.supersets))
Ejemplo n.º 3
0
    def recompute_all_supersets(self, pctrl):

        self.logger.debug("~Recomputing all Supersets...")

        self.rulecounts = self.recompute_rulecounts(pctrl)
        # get all sets of participants advertising the same prefix
        peer_sets = get_prefix2part_sets(pctrl)
        peer_sets = clear_inactive_parts(peer_sets, self.rulecounts.keys())
        peer_sets = removeSubsets(peer_sets)

        self.supersets = minimize_ss_rules_greedy(peer_sets, self.rulecounts,
                                                  self.max_initial_bits)

        # impose an ordering on each superset by converting sets to lists
        for i in range(len(self.supersets)):
            self.supersets[i] = list(self.supersets[i])

        # fix the mask size after a recomputation event
        self.mask_size = self.max_bits - 1
        self.id_size = 1

        # if there is more than one superset, set the field sizes appropriately
        if len(self.supersets) > 1:
            self.id_size = int(math.ceil(math.log(len(self.supersets), 2)))
            self.mask_size -= self.id_size

        self.logger.debug("done.~")
        self.logger.debug("Supersets: >> " + str(self.supersets))