コード例 #1
0
def _main(autostart=False):
    graph = _PaperGraph()
    net = IPNet(topo=graph, use_v6=False)
    net.start()
    db = TopologyDB(net=net)
    for e in net.topo.egresses:
        db._network[e]['is_egress'] = True
    db.save(TOPO_DB)
    sink = net['sink']
    source = net['source']
    sink_addr = sink.IP()
    src_addr = source.IP()
    MNLOG.debug('Source is at ', src_addr, 'sink is at ', sink_addr, '\n')
    with open(REQ_FILE, 'w') as f:
        f.write('MIRROR {sink} ON [A B C D]\n'
                'CONFINE {sink} ON [A B L C D]\n'
                'USING {cnt} M DURING 500ms'.format(sink=sink_addr,
                                                    cnt=FLOW_COUNT))
    MNLOG.info('Starting sink')
    sink.filter_source(src_addr)
    MNLOG.info('Starting source')
    source.start_src(sink_addr, src_addr, FLOW_COUNT)
    time.sleep(5)
    if autostart:
        MNLOG.info('Starting collector')
        net[COLLECTOR_ID].start_collector()
    _CLI(net)
    net.stop()
    if os.path.exists(REQ_FILE):
        os.unlink(REQ_FILE)
コード例 #2
0
    def updateNeigh(self, neigh_id, neigh_ip, lasttime, netmask, helloint):
        """
        Handling Incoming HELLO Packets

        This section explains the detailed processing of a received Hello packet.
        The generic input processing of PWOSPF packets will have checked the
        validity of the IP header and the PWOSPF packet header.  Next, the values of
        the Network Mask and HelloInt fields in the received Hello packet must be
        checked against the values configured for the receiving interface.  Any
        mismatch causes processing to stop and the packet to be dropped.  In other
        words, the above fields are really describing the attached network's
        configuration.

        At this point, an attempt is made to match the source of the Hello Packet to
        one of the receiving interface's neighbors.  If the receiving interface is
        a multi-access network (either broadcast or non-broadcast) the source is
        identified by the IP source address found in the Hello's IP header.  The
        interface's current neighbor(s) are contained in the interface's data
        structure.  If the interface does not have a neighbor, a neighbor is created.
        If the interface already has neighbor(s) but none  match the IP of the
        incoming packet, a new neighbor is added. Finally, if the HELLO packet matches
        a current neighbor, the neighbor's "last hello packet received" timer is
        updated.
        """
        if netmask != self.Netmask() or helloint != self.helloint:
            lg.debug('hello packet mismatch')
            return
        self.neighbors[(neigh_id, neigh_ip)] = (lasttime, helloint)
コード例 #3
0
ファイル: base.py プロジェクト: ElNiak/ipmininet
    def render(self, cfg, **kwargs) -> Dict[str, str]:
        """Render the configuration content for each config file of this daemon

        :param cfg: The global config for the node
        :param kwargs: Additional keywords args. will be passed directly
                       to the template"""
        self.files.extend(self.cfg_filenames)
        cfg_content = {}
        for i, filename in enumerate(self.cfg_filenames):
            log.debug('Generating %s\n' % filename)
            try:
                cfg.current_filename = filename
                kwargs["node"] = cfg
                kwargs["ip_statement"] = ip_statement
                kwargs["family_translate"] = family_translate
                template = self.template_lookup.get_template(
                    self.template_filenames[i])
                cfg_content[filename] = template.render(**kwargs)
            except Exception:
                # Display template errors in a less cryptic way
                log.error('Couldn''t render a config file(',
                          self.template_filenames[i], ')')
                log.error(mako.exceptions.text_error_template().render())
                raise ValueError('Cannot render a configuration [%s: %s]' % (
                    self._node.name, self.NAME))
        return cfg_content
コード例 #4
0
ファイル: link.py プロジェクト: Maxmawt/ipmininet
 def _add_tunnel(if_local, if_remote, name, address, ttl=255):
     log.debug('Creating GRE tunnel named', name, ', for subnet',
               str(address), 'from', if_local, '[', if_local.ip, '] to',
               if_remote, '[', if_remote.ip, ']')
     cmd = if_local.node.cmd
     cmd('ip', 'tunnel', 'add', name, 'mode', 'gre', 'remote', if_remote.ip,
         'local', if_local.ip, 'ttl', str(ttl))
     cmd('ip', 'link', 'set', name, 'up')
     cmd('ip', 'address', 'add', 'dev', name, address)
コード例 #5
0
ファイル: link.py プロジェクト: oliviertilmans/ipmininet
 def _add_tunnel(if_local, if_remote, name, address, ttl=255):
     log.debug('Creating GRE tunnel named', name, ', for subnet',
               str(address), 'from', if_local, '[', if_local.ip, '] to',
               if_remote, '[', if_remote.ip, ']')
     cmd = if_local.node.cmd
     cmd('ip', 'tunnel', 'add', name, 'mode', 'gre', 'remote', if_remote.ip,
         'local', if_local.ip, 'ttl', str(ttl))
     cmd('ip', 'link', 'set', name, 'up')
     cmd('ip', 'address', 'add', 'dev', name, address)
コード例 #6
0
ファイル: __router.py プロジェクト: akemery/ipmininet
    def start(self):
        """Start the node: Configure the daemons, set the relevant sysctls,
        and fire up all needed processes"""
        # Build the config
        self.nconfig.build()
        # Check them
        err_code = False
        for d in self.nconfig.daemons:
            if self.create_logdirs and d.logdir:
                self._mklogdirs(d.logdir)
            out, err, code = self._processes.pexec(shlex.split(d.dry_run))
            err_code = err_code or code
            if code:
                lg.error(d.NAME, 'configuration check failed ['
                         'rcode:', code, ']\n'
                         'stdout:', out, '\n'
                         'stderr:', err)
        if err_code:
            lg.error('Config checks failed, aborting!')
            mininet.clean.cleanup()
            sys.exit(1)
        # Set relevant sysctls
        for opt, val in self.nconfig.sysctl:
            self._old_sysctl[opt] = self._set_sysctl(opt, val)

        # wait until NDP has finished to check each IPv6 addresses assigned
        # to the interface of the node.
        # The command lists addresses failing duplicate address detection (IPv6)
        # If any, it waits until all addresses has been checked
        lg.debug(self._processes.node.name,
                 'Checking for any "tentative" addresses')
        tentative_cmd = "ip -6 addr show tentative"
        tentative_chk = self._processes.call(tentative_cmd)
        while tentative_chk is not None and tentative_chk != '':
            if tentative_chk.find("dadfailed") != -1:
                lg.error('At least two nodes have the same IPv6 address!\n')
                mininet.clean.cleanup()
                sys.exit(1)
            time.sleep(.5)
            tentative_chk = self._processes.call(tentative_cmd)
        lg.debug(
            self._processes.node.name,
            'All IPv6 addresses has passed the Duplicate address detection mechanism'
        )

        # Fire up all daemons
        for d in self.nconfig.daemons:
            self._processes.popen(shlex.split(d.startup_line))
            # Busy-wait if the daemon needs some time before being started
            while not d.has_started(self._processes):
                time.sleep(.001)
            print(d.startup_line)
コード例 #7
0
  def addHosts(self, switches, nodes, ports_per_switch, hosts_per_switch):
    switch_num = 1
    host_list = []
    for num in range(0, nodes):
      host_id = self.id_gen(num+1, num+1).name_str()
      host_opts = self.def_nopts(self.LAYER_HOST, host_id)
      h = self.addHost(host_id, **host_opts)
      lg.debug("Adding host: %s\n" % (host_id))
      host_list.append(h)
      if num % hosts_per_switch == 0:
        switch_id = self.id_gen(switch_num, 255).name_str()
        switch_opts = self.def_nopts(self.LAYER_EDGE, switch_id)
        switch = self.addSwitch(switch_id, **switch_opts)
        lg.debug("Adding switch: %s\n" % (switch_id))
        for host in host_list:
          self.addLink(host, switch, cls=TCLink, bw=self.bw)
          lg.debug("Adding link: %s to %s\n" % (str(host), str(switch)))
        host_list = []
        switch_num += 1

    for num in range(nodes / hosts_per_switch, switches):
      switch_id = self.id_gen(num+1, 255).name_str()
      lg.debug("Adding switch: %s\n" % (switch_id))
      switch_opts = self.def_nopts(self.LAYER_EDGE, switch_id)
      switch = self.addSwitch(switch_id, **switch_opts)
コード例 #8
0
 def _run_cmds(self, prefix: str = "ip -6 route add ") -> int:
     for cmd in self.cmds:
         cmd = prefix + cmd
         if self.table is not None:
             cmd = cmd + " table {num}".format(num=self.table.num)
         out, err, code = self.source.pexec(shlex.split(cmd))
         log.debug("Installing route on router %s: '%s'\n" %
                   (self.source.name, cmd))
         if code:
             log.error('Cannot install SRv6Route', self,
                       '[rcode:', str(code), ']:\n', cmd, '\nstdout:',
                       str(out), '\nstderr:', str(err))
             return code
     return -1
コード例 #9
0
ファイル: jellyfish.py プロジェクト: hodiapa/cs244-jellyfish
  def addHosts(self, switches, nodes, ports_per_switch, hosts_per_switch):
    switch_num = 1
    host_list = []
    for num in range(0, nodes):
      host_id = self.id_gen(num+1, num+1).name_str()
      host_opts = self.def_nopts(self.LAYER_HOST, host_id)
      h = self.addHost(host_id, **host_opts)
      lg.debug("Adding host: %s\n" % (host_id))
      host_list.append(h)
      if num % hosts_per_switch == 0:
        switch_id = self.id_gen(switch_num, 255).name_str()
        switch_opts = self.def_nopts(self.LAYER_EDGE, switch_id)
        switch = self.addSwitch(switch_id, **switch_opts)
        lg.debug("Adding switch: %s\n" % (switch_id))
        for host in host_list:
          self.addLink(host, switch, bw=self.bw)
          lg.debug("Adding link: %s to %s\n" % (str(host), str(switch)))
        host_list = []
        switch_num += 1

    for num in range(nodes / hosts_per_switch, switches):
      switch_id = self.id_gen(num+1, 255).name_str()
      lg.debug("Adding switch: %s\n" % (switch_id))
      switch_opts = self.def_nopts(self.LAYER_EDGE, switch_id)
      switch = self.addSwitch(switch_id, **switch_opts)
コード例 #10
0
ファイル: __router.py プロジェクト: Ananas120/ipmininet
    def _mklogdirs(self, logdir) -> Tuple[str, str, int]:
        """Creates directories for the given logdir.

           :param logdir: The log directory path to create
           :return: (stdout, stderr, return_code)
        """
        lg.debug('{}: Creating logdir {}.\n'.format(self.name, logdir))
        cmd = 'mkdir -p {}'.format(logdir)
        stdout, stderr, return_code = self._processes.pexec(shlex.split(cmd))
        if not return_code:
            lg.debug('{}: Logdir {} successfully created.\n'.format(
                self.name, logdir))
        else:
            lg.error('{}: Could not create logdir {}. Stderr: \n'
                     '{}\n'.format(self.name, logdir, stderr))
        return (stdout, stderr, return_code)
コード例 #11
0
 def _run(self):
     self.running = True
     try:
         while True:
             if self.stop_event and self.stop_event.is_set():
                 break
             # Cleanup expired arp entries
             arp_table_snapshot = self.arp_table.copy()
             for ipk in arp_table_snapshot:
                 if ipk not in self.arp_table:
                     continue
                 if self.arp_table[ipk]['expiry'] < datetime.now():
                     lg.debug('%s is expired. cleanup now.' % ipk)
                     del self.arp_table[ipk]
                     self.sw.deleteTableEntry(
                         table_name='PWOSPFIngress.arp_table',
                         match_fields={'meta.gateway': ipk})
     except KeyboardInterrupt:
         pass
コード例 #12
0
    def updateArpTable(self, ip, mac):
        write = True
        if ip in self.arp_table:
            if self.arp_table[ip]['mac'] != mac:
                lg.debug('%s cache should be changed. cleanup now.' % ip)
                self.sw.deleteTableEntry(table_name='PWOSPFIngress.arp_table',
                                         match_fields={'meta.gateway': ip})
            else:
                write = False
        self.arp_table[ip] = {
            'mac': mac,
            'expiry': datetime.now() + timedelta(seconds=self.arp_timeout)
        }

        if write:
            self.sw.insertTableEntry(
                table_name='PWOSPFIngress.arp_table',
                match_fields={'meta.gateway': ip},
                action_name='PWOSPFIngress.update_dst_mac',
                action_params={'dstEth': mac})
コード例 #13
0
ファイル: base.py プロジェクト: slardinois/ipmininet
    def render(self, cfg, **kwargs):
        """Render the configuration file for this daemon

        :param cfg: The global config for the node
        :param kwargs: Additional keywords args. will be passed directly
                       to the template"""
        self.files.append(self.cfg_filename)
        log.debug('Generating %s\n' % self.cfg_filename)
        try:
            return template_lookup.get_template(self.template_filename)\
                                  .render(node=cfg,
                                          ip_statement=ip_statement,
                                          **kwargs)
        except:
            # Display template errors in a less cryptic way
            log.error('Couldn'
                      't render a config file(', self.template_filename, ')')
            log.error(mako.exceptions.text_error_template().render())
            raise ValueError('Cannot render a configuration [%s: %s]' %
                             (self._node.name, self.NAME))
コード例 #14
0
ファイル: base.py プロジェクト: oliviertilmans/ipmininet
    def render(self, cfg, **kwargs):
        """Render the configuration file for this daemon

        :param cfg: The global config for the node
        :param kwargs: Additional keywords args. will be passed directly
                       to the template"""
        self.files.append(self.cfg_filename)
        log.debug('Generating %s\n' % self.cfg_filename)
        try:
            return template_lookup.get_template(self.template_filename)\
                                  .render(node=cfg,
                                          ip_statement=ip_statement,
                                          **kwargs)
        except:
            # Display template errors in a less cryptic way
            log.error('Couldn''t render a config file(',
                      self.template_filename, ')')
            log.error(mako.exceptions.text_error_template().render())
            raise ValueError('Cannot render a configuration [%s: %s]' % (
                self._node.name, self.NAME))
コード例 #15
0
 def debug(self, content, pre='> ', post=''):
     mnLog.debug(content, pre, post)
コード例 #16
0
 def debugln(self, content, pre='> ', post=''):
     mnLog.debug(content, pre, (post + '\n'))
コード例 #17
0
ファイル: ipnet.py プロジェクト: tyler-marr/ipmininet
    def _allocate_subnets(subnets: List[Union[IPv4Network, IPv6Network]],
                          domains: List['BroadcastDomain'],
                          domainlen='len_v4', net_key='net',
                          size_key='max_v4prefixlen', max_prefixlen=24,
                          allocated_subnets: Iterable[Union[IPv4Network,
                                                            IPv6Network]] = ()):
        """Allocate subnets to broadcast domains.

        We keep the subnets sorted as x < y wrt the available number of
        addresses in the subnet so that the bigger domains
        take the smallest subnets before subdividing them.
        As the domains range from the biggest to the smallest, and the subnets
        from the smallest to the biggest, the biggest domains will take the
        first subnet that is able to contain it, and split it in several
        subnets until it is restricted to its prefix.
        The next domain then is necessarily of the same size (reuses on of the
        split subnets) or smaller (uses a previously split subnet or splits a
        bigger one). This avoids wasting of addresses (wrt. the specified
        max_prefixlen) at the cost of a quadratic (?) behavior.

        :param subnets: a list of ip_network of available subnets. This list
                        will be modified to account for the new allocations.
        :param domains: a list of BroadcastDomain
        :param domainlen: The name of the method used to retrieve the length
                          of the broadcast domain (address count)
        :param net_key: the key to use to set the allocated subnet in the
                        broadcast domain.
        :param size_key: the key to use to retrieve the maximal prefix length
                         suitable for a broadcast domain
        :param max_prefixlen: The maximal prefixlen that can be allocated,
                                e.g. to not allocate /126 for IPv6 P2P links
        :param allocated_subnets: The subnets that are already allocated and
                                  cannot be allocated to another domain
        :return: iterator of (domain, subnet)"""
        _domainlen = methodcaller(domainlen)
        domains.sort(key=_domainlen, reverse=True)
        _prefixlen = attrgetter('prefixlen')
        subnets.sort(key=_prefixlen, reverse=True)
        ip_version = 4 if net_key == 'net' else 6
        for d in domains:
            if not d.use_ip_version(ip_version):
                continue
            if not subnets:
                raise ValueError('No subnet left in the prefix space for all'
                                 'broadcast domains.')
            plen = min(max_prefixlen, getattr(d, size_key))
            if plen < subnets[-1].prefixlen:
                raise ValueError('Could not find a subnet big enough for a '
                                 'broadcast domain.')
            log.debug('Allocating prefix', plen, 'for interfaces',
                      d.interfaces)
            # Try to find a suitable subnet in the list
            for i, net in enumerate(subnets):
                nets = []
                # if the subnet is too big for the prefix, perform a left
                # expansion (only expand one at a time to keep subnets as
                # aggregated as possible).
                while plen > net.prefixlen:
                    # Get list of subnets and append to list of previous
                    # expanded subnets as it is bigger wrt. prefixlen
                    net, next_net = tuple(net.subnets(prefixlen_diff=1))
                    # If not a subnet of an allocated subnet
                    if len(list(filter(lambda y: is_subnet_of(next_net, y),
                                       allocated_subnets))) == 0:
                        nets.append(next_net)
                # Check if we have an appropriately-sized subnet
                if plen == net.prefixlen:
                    # If the network overlaps with an allocated subnet,
                    # we pass it
                    if len(list(filter(lambda y: is_subnet_of(net, y)
                                                 or is_subnet_of(y, net),
                                       allocated_subnets))) == 0:
                        # Register the allocation
                        setattr(d, net_key, net)
                    # Delete the expanded/used subnet
                    del subnets[i]
                    # Insert the created subnets if any
                    subnets.extend(nets)
                    # Sort the array again
                    subnets.sort(key=_prefixlen, reverse=True)
                    # Proceed to the next broadcast domain
                    break
コード例 #18
0
ファイル: controller.py プロジェクト: fno2010/pwospf-p4
    def onPacket(self, pkt):
        if lg.getEffectiveLevel() <= LEVELS['debug']:
            pkt.show2()
        if CPUMetadata not in pkt:
            lg.warn(
                "Should only receive packets from switch with special header")
            return

        # Ignore packets that the CPU sends:
        if pkt[CPUMetadata].fromCpu == 1: return

        try:
            if ARP in pkt:
                if pkt[ARP].op == ARP_OP_REPLY:
                    self.arp_manager.updateArpTable(pkt[ARP].psrc,
                                                    pkt[ARP].hwsrc)
                    # self.send(pkt, 0)
                elif pkt[ARP].op == ARP_OP_REQ:
                    self.arp_manager.updateArpTable(pkt[ARP].psrc,
                                                    pkt[ARP].hwsrc)
                    self.arpReply(pkt)
            elif IP in pkt:
                is_local_ip = pkt[IP].dst in [
                    p.IP() for p in self.sw.data_ports.values()
                ]
                if pkt[CPUMetadata].ingressPort not in self.sw.data_ports:
                    lg.warn(
                        '%s drops a packet received from an invalid port\n' %
                        self.sw.name)
                    return
                elif ICMP in pkt and pkt[ICMP].type == ICMP_TYPE_ECHO and pkt[
                        ICMP].code == 0:
                    lg.info('%s receive ICMP echo to %s:\n' %
                            (self.sw.name, pkt[IP].dst))
                    if lg.getEffectiveLevel() <= LEVELS['debug']:
                        pkt.show()
                    if is_local_ip:
                        # Reply ICMP echo request
                        self.icmpReply(pkt)
                elif pkt[IP].proto == PROTO_PWOSPF:
                    lg.debug('%s received a PWOSPF packet\n' % self.sw.name)
                    try:
                        pwospf_pkt = PWOSPF_Hdr(pkt[Raw])
                    except Exception:
                        lg.debug(
                            '%s cannot parse this PWOSPF packet correctly\n' %
                            self.sw.name)
                        return
                    if lg.getEffectiveLevel() <= LEVELS['debug']:
                        pwospf_pkt.show()
                    if pwospf_pkt.areaid != self.sw.area_id:
                        lg.debug(
                            '%s drops PWOSPF packet from a different area\n' %
                            self.sw.name)
                        return
                    if pwospf_pkt.routerid == self.sw.router_id:
                        lg.debug(
                            '%s drops PWOSPF packet generated by itself\n' %
                            self.sw.name)
                        return
                    if PWOSPF_Hello in pwospf_pkt:
                        inport = self.sw.data_ports[
                            pkt[CPUMetadata].ingressPort]
                        neigh_id = pwospf_pkt[PWOSPF_Hdr].routerid
                        neigh_ip = pkt[IP].src
                        helloint = pwospf_pkt[PWOSPF_Hello].helloint
                        netmask = pwospf_pkt[PWOSPF_Hello].netmask
                        # print(inport.intf.name, neigh_id, neigh_ip, datetime.now(), netmask, helloint)
                        if inport.ownIP(neigh_ip):
                            inport.updateNeigh(neigh_id, neigh_ip,
                                               datetime.now(), netmask,
                                               helloint)
                        else:
                            lg.debug(
                                '%s drop the hello packet from different subnets\n'
                                % self.sw.name)
                        # print(inport.intf.name, inport.neighbors)
                        return
                    if PWOSPF_LSU in pwospf_pkt:
                        self.pwospf_manager.handleLSU(pkt)
                        return
                if not is_local_ip and pkt[
                        CPUMetadata].egressPort not in self.sw.data_ports:
                    lg.warn(
                        '%s drops a packet targeting to an invalid port\n' %
                        self.sw.name)
                    return
                else:
                    outport = self.sw.data_ports[pkt[CPUMetadata].egressPort]
                    dstprefix = ipprefix(pkt[IP].dst, outport.Netmask())
                    route = self.sw.pwospf_table.get(dstprefix)
                    # print(dstprefix, route)
                    if route is None:
                        return
                    gateway = route[1]
                    if gateway == '0.0.0.0':
                        gateway = pkt[IP].dst
                    arp_entry = self.arp_manager.arp_table.get(gateway)
                    lg.info('%s prepare ARP entry: %s\n' %
                            (self.sw.name, arp_entry))
                    if arp_entry is None:
                        lg.info('Missing ARP, request first\n')
                        self.arpRequest(gateway, pkt[CPUMetadata].egressPort)
                    self.pending_processor.future_send(
                        pkt, gateway,
                        time.time() + self.timeout)
        except:
            lg.warn(
                'Some exceptions raised when handle the incoming packet; enable debug mode to see details\n'
            )
            if lg.getEffectiveLevel() <= LEVELS['warning']:
                traceback.print_exc()
コード例 #19
0
ファイル: ipnet.py プロジェクト: oliviertilmans/ipmininet
    def _allocate_subnets(subnets, domains, domainlen='len_v4',
                          net_key='net', size_key='max_v4prefixlen',
                          max_prefixlen=24):
        """Allocate subnets to broadcast domains.

        We keep the subnets sorted as x < y wrt the available number of
        addressess in the subnet so that the bigger domains
        take the smallest subnets before subdividing them.
        As the domains range from the biggest to the smallest, and the subnets
        from the smallest to the biggest, the biggest domains will take the
        first subnet that is able to contain it, and split it in several
        subnets until it is restricted to its prefix.
        The next domain then is necessarily of the same size (reuses on of the
        split subnets) or smaller (uses a previsouly split subnet or splits a
        bigger one). This avoids wasting of addresses (wrt. the specified
        max_prefixlen) at the cost of a quadratic (?) behavior.

        :param subnets: a list of ip_network of available subnets. This list
                        will be modified to account for the new allocations.
        :param domains: a list of BroadcastDomain
        :param domainlen: The name of the method used to retrieve the length
                          of the broadcast domain (address count)
        :param net_key: the key to use to set the allocated subnet in the
                        broadcast domain.
        :param size_key: the key to use to retrieve the maximal prefix length
                         suitable for a broadcast domain
        :param max_prefixlen: The maximal prefixlen that can be allocated,
                                e.g. to not allocate /126 for IPv6 P2P links
        :return: iterator of (domain, subnet)"""
        _domainlen = methodcaller(domainlen)
        domains.sort(key=_domainlen, reverse=True)
        _prefixlen = attrgetter('prefixlen')
        subnets.sort(key=_prefixlen, reverse=True)
        for d in domains:
            if not subnets:
                raise ValueError('No subnet left in the prefix space for all'
                                 'broadcast domains.')
            plen = min(max_prefixlen, getattr(d, size_key))
            if plen < subnets[-1].prefixlen:
                raise ValueError('Could not find a subnet big enough for a '
                                 'broadcast domain.')
            log.debug('Allocating prefix', plen, 'for interfaces',
                      d.interfaces)
            # Try to find a suitable subnet in the list
            for i, net in enumerate(subnets):
                nets = []
                # if the subnet is too big for the prefix, perform a left
                # expansion (only expand one at a time to keep subnets as
                # aggregated as possible).
                while plen > net.prefixlen:
                    # Get list of subnets and append to list of previous
                    # expanded subnets as it is bigger wrt. prefixlen
                    net, next_net = tuple(net.subnets(prefixlen_diff=1))
                    nets.append(next_net)
                # Check if we have an appropriately-sized subnet
                if plen == net.prefixlen:
                    # Register the allocation
                    setattr(d, net_key, net)
                    # Delete the expanded/used subnet
                    del subnets[i]
                    # Insert the creadted subnets if any
                    subnets.extend(nets)
                    # Sort the array again
                    subnets.sort(key=_prefixlen, reverse=True)
                    # Proceed to the next broadcast domain
                    break