Beispiel #1
0
def transfer(host, username, local, remote = None, key_filename = None, password = None):
    log.debug("Transferring lab to %s" % host)
    log.info("Transferring Netkit lab")
    if not remote:
        remote = local # same filename
    import paramiko
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy( paramiko.AutoAddPolicy())
    try:
        if key_filename:
            log.debug("Connecting to %s with %s and key %s" % (host, username, key_filename))
            ssh.connect(host, username = username, key_filename = key_filename)
        elif password:
            log.info("Connecting to %s with %s" % (host, username))
            ssh.connect(host, username = username, password=password)
        else: 
            log.error("No password, no key assigned for deployment")
            exit(1)
    except paramiko.SSHException:
        log.error("Could not get access to host")
        exit(1)
    log.debug("Opening SSH for SFTP")
    ftp = ssh.open_sftp()
    log.debug("Putting file %s to %s" % (local, remote))
    ftp.put(local, remote)
    log.debug("Put file %s to %s" % (local, remote))
    ftp.close()
Beispiel #2
0
def manual_ipv6_infrastructure_allocation(anm):
    """Applies manual IPv6 allocation"""

    import netaddr
    g_ipv6 = anm['ipv6']
    g_in = anm['input']
    log.info('Using specified IPv6 infrastructure allocation')

    for node in g_ipv6.l3devices():
        for interface in node.physical_interfaces():
            if not interface['input'].is_bound:
                continue  # unbound interface
            if not interface['ipv6'].is_bound:
                continue
            ip_address = netaddr.IPAddress(interface['input'].ipv6_address)
            prefixlen = interface['input'].ipv6_prefixlen
            interface.ip_address = ip_address
            interface.prefixlen = prefixlen
            cidr_string = '%s/%s' % (ip_address, prefixlen)
            interface.subnet = netaddr.IPNetwork(cidr_string)

    broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]

    # TODO: allow this to work with specified ip_address/subnet as well as
    # ip_address/prefixlen

    global_infra_block = None
    try:
        # Note this is only pickling up if explictly set in g_in
        infra_subnet = g_in.data.ipv6_infra_subnet
        infra_prefix = g_in.data.ipv6_infra_prefix
        global_infra_block = sn_preflen_to_network(infra_subnet, infra_prefix)
    except Exception, e:
        log.info("Unable to parse specified ipv4 infra subnets %s/%s")
Beispiel #3
0
def compile_network(anm):
    nidb = create_nidb(anm)
    g_phy = anm['phy']

    for target_data in config.settings['Compile Targets'].values():
        host = target_data['host']
        platform = target_data['platform']
        if platform == "netkit":
            import autonetkit.compilers.platform.netkit as pl_netkit
            platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
        elif platform == "VIRL":
            try:
                import autonetkit_cisco.compilers.platform.cisco as pl_cisco
                platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
            except ImportError:
                log.debug("Unable to load VIRL platform compiler")
        elif platform == "dynagen":
            import autonetkit.compilers.platform.dynagen as pl_dynagen
            platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
        elif platform == "junosphere":
            import autonetkit.compilers.platform.junosphere as pl_junosphere
            platform_compiler = pl_junosphere.JunosphereCompiler(
                nidb, anm, host)

        if any(g_phy.nodes(host=host, platform=platform)):
            log.info("Compiling configurations for %s on %s" %
                     (platform, host))
            platform_compiler.compile()  # only compile if hosts set
        else:
            log.debug("No devices set for %s on %s" % (platform, host))

    return nidb
Beispiel #4
0
 def starting_host(protocol, index, data):
     m = re.search('\\"(\S+)\\"', data.group(index))
     if m:
         hostname = m.group(1)
         log.info(data.group(index)) #TODO: use regex to strip out just the machine name
         body = {"starting": hostname}
         messaging.publish_json(body)
Beispiel #5
0
 def do_something(thread, host, conn):
     conn.set_timeout(timeout)
     conn.add_monitor(r'Starting (\S+)', starting_host)
     conn.add_monitor(r'The lab has been started', lab_started)
     #conn.data_received_event.connect(data_received)
     conn.execute('cd %s' % cd_dir)
     conn.execute('lcrash -k')
     conn.execute("lclean")
     conn.execute('cd') # back to home directory tar file copied to
     conn.execute('tar -xzf %s' % tar_file)
     conn.execute('cd %s' % cd_dir)
     conn.execute('vlist')
     conn.execute("lclean")
     log.info("Starting lab")
     start_command = 'lstart -p5 -o --con0=none'
     try:
         conn.execute(start_command)
     except InvalidCommandException, error:
         if "already running" in str(error):
             time.sleep(1)
             #print "Already Running" #TODO: handle appropriately
             #print "Halting previous lab"
             #conn.execute("vclean -K")
             #print "Halted previous lab"
             #conn.execute("vstart taptunnelvm --con0=none --eth0=tap,172.16.0.1,172.16.0.2") # TODO: don't hardcode this
             #print "Starting lab"
             conn.execute(start_command)
Beispiel #6
0
def mpls_te(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_l3 = anm['layer3']

    # add regardless, so allows quick check of node in anm['mpls_te'] in compilers

    g_mpls_te = anm.add_overlay('mpls_te')
    if not any(True for n in g_in.routers() if n.mpls_te_enabled):
        log.debug('No nodes with mpls_te_enabled set')
        return

    # te head end set if here

    g_mpls_te.add_nodes_from(g_in.routers())

    # build up edge list sequentially, to provide meaningful messages for multipoint links

    multipoint_edges = [e for e in g_l3.edges() if e.multipoint]
    if len(multipoint_edges):
        log.info('Excluding multi-point edges from MPLS TE topology: %s' %
                 ', '.join(str(e) for e in multipoint_edges))

    edges_to_add = set(g_l3.edges()) - set(multipoint_edges)
    g_mpls_te.add_edges_from(edges_to_add)
Beispiel #7
0
def check_server_asns(anm):
    """Checks that servers have appropriate ASN allocated.
    Warns and auto-corrects servers connected to routers of a different AS
    #TODO: provide manual over-ride for this auto-correct.
    """
    # TODO: Move to validate module?
    g_phy = anm["phy"]

    for server in g_phy.servers():
        # TODO: remove now have external_connector device_type?
        if server.device_subtype in ("SNAT", "FLAT"):
            continue  # Don't warn on ASN for NAT elements
        l3_neighbors = list(server["layer3"].neighbors())
        l3_neighbor_asns = set(n.asn for n in l3_neighbors)
        if server.asn not in l3_neighbor_asns:
            neighs_with_asn = ["%s: AS %s" % (n, n.asn) for n in l3_neighbors]
            # tuples for warning message
            server.log.warning("Server does not belong to same ASN " "as neighbors %s" % (neighs_with_asn))

            if len(l3_neighbors) == 1:
                # single ASN of neighbor -> auto correct
                if server["input"].default_asn:
                    neigh_asn = l3_neighbor_asns.pop()
                    log.warning("Updating server %s AS from %s" " to %s", server, server.asn, neigh_asn)
                    server.asn = neigh_asn
                else:
                    log.info("Server %s ASN %s explictly set by user, " "not auto-correcting", server, server.asn)
Beispiel #8
0
    def add_overlay(self, name, nodes=None, graph=None, directed=False,
            multi_edge=False, retain=None):
        """Adds overlay graph of name name"""
        if graph:
            if not directed and graph.is_directed():
                log.info("Converting graph %s to undirected" % name)
                graph = nx.Graph(graph)

        elif directed:
            if multi_edge:
                graph = nx.MultiDiGraph()
            else:
                graph = nx.DiGraph()
        else:
            if multi_edge:
                graph = nx.MultiGraph()
            else:
                graph = nx.Graph()

        self._overlays[name] = graph
        overlay = OverlayGraph(self, name)
        overlay.allocate_interfaces()
        if nodes:
            retain = retain or []  # default is an empty list
            overlay.add_nodes_from(nodes, retain)
        return overlay
Beispiel #9
0
    def compile(self):
        log.info("Compiling Dynagen for %s" % self.host)
        g_phy = self.anm['phy']
        G_graphics = self.anm['graphics']
        ios_compiler = IosClassicCompiler(self.nidb, self.anm)
        for phy_node in g_phy.nodes('is_router', host=self.host, syntax='ios'):
            nidb_node = self.nidb.node(phy_node)
            graphics_node = G_graphics.node(phy_node)
            nidb_node.render.template = os.path.join("templates", "ios.mako")
            nidb_node.render.dst_folder = os.path.join(
                "rendered", self.host, "dynagen", self.config_dir)
            nidb_node.render.dst_file = "%s.cfg" % ank.name_folder_safe(
                phy_node.label)

            # TODO: may want to normalise x/y
            nidb_node.x = graphics_node.x
            nidb_node.y = graphics_node.y

            # Allocate edges
            # assign interfaces
            # Note this could take external data
            int_ids = self.interface_ids()
            for interface in nidb_node.physical_interfaces:
                interface.id = int_ids.next()

            ios_compiler.compile(nidb_node)

        self.allocate_ports()
        self.lab_topology()
Beispiel #10
0
def update_http(anm = None, nidb = None, http_url = None):
    if http_url is None:
        http_url = default_http_url

    if anm and nidb:
        body = autonetkit.ank_json.dumps(anm, nidb)
    elif anm:
        body = autonetkit.ank_json.dumps(anm)
    else:
        import json
        body = json.dumps({}) # blank to test visualisation server running

    uuid = get_uuid(anm)

    params = urllib.urlencode({
        'body': body,
        'type': 'anm',
        'uuid': uuid,
        })
    try:
        data = urllib.urlopen(http_url, params).read()
        log.debug(data)
    except IOError, e:
        log.info("Unable to connect to visualisation server %s" % http_url)
        return
Beispiel #11
0
 def publish_http_post(self, exchange, routing_key, body):
     print "called"
     params = urllib.urlencode({'body': body})
     try:
         data = urllib.urlopen(self.http_url, params).read()
     except IOError, e:
         log.info("Unable to connect to HTTP Server %s" % self.http_url)
Beispiel #12
0
def compile_network(anm):
    nidb = create_nidb(anm)
    g_phy = anm['phy']

    for target_data in config.settings['Compile Targets'].values():
        host = target_data['host']
        platform = target_data['platform']
        if platform == "netkit":
            import autonetkit.compilers.platform.netkit as pl_netkit
            platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
        elif platform == "VIRL":
            try:
                import autonetkit_cisco.compilers.platform.cisco as pl_cisco
                platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
            except ImportError:
                log.debug("Unable to load VIRL platform compiler")
        elif platform == "dynagen":
            import autonetkit.compilers.platform.dynagen as pl_dynagen
            platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
        elif platform == "junosphere":
            import autonetkit.compilers.platform.junosphere as pl_junosphere
            platform_compiler = pl_junosphere.JunosphereCompiler(
                nidb, anm, host)

        if any(g_phy.nodes(host=host, platform=platform)):
            log.info("Compiling configurations for %s on %s" % (platform, host))
            platform_compiler.compile()  # only compile if hosts set
        else:
            log.debug("No devices set for %s on %s" % (platform, host))

    return nidb
Beispiel #13
0
def transfer(host, username, local, remote=None, key_filename=None):
    log.debug('Transferring lab to %s' % host)
    log.info('Transferring Netkit lab')
    if not remote:
        remote = local  # same filename
    import paramiko

    # import logging
    # logging.getLogger("paramiko").setLevel(logging.DEBUG)

    ssh = paramiko.SSHClient()

    # ssh.set_log_channel("ANK")

    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    if key_filename:
        log.debug('Connecting to %s with %s and key %s' % (host,
                  username, key_filename))
        ssh.connect(host, username=username, key_filename=key_filename)
    else:
        log.info('Connecting to %s with %s' % (host, username))
        ssh.connect(host, username=username)
    log.info('Opening SSH for SFTP')
    ftp = ssh.open_sftp()
    log.info('Putting file %s tspoto %s' % (local, remote))
    ftp.put(local, remote)
    log.info('Put file %s to %s' % (local, remote))
    ftp.close()
Beispiel #14
0
def update_http(
    anm=None,
    nidb=None,
    http_url=None,
    uuid=None,
    ):
    if http_url is None:
        http_url = default_http_url

    if anm and nidb:
        body = autonetkit.ank_json.dumps(anm, nidb)
    elif anm:
        body = autonetkit.ank_json.dumps(anm)
    else:
        import json
        body = json.dumps({})  # blank to test visualisation server running

    if uuid is None:
        uuid = get_uuid(anm)

    params = urllib.urlencode({'body': body, 'type': 'anm',
                              'uuid': uuid})
    try:
        data = urllib.urlopen(http_url, params).read()
        log.debug(data)
    except IOError, e:
        log.info('Unable to connect to visualisation server %s'
                 % http_url)
        return
Beispiel #15
0
def allocate(G_phy, G_bgp):
    log.info("Allocating route reflectors")
    graph_phy = G_phy._graph
    for asn, devices in G_phy.groupby("asn").items():
        routers = [d for d in devices if d.is_router]
        router_ids = ank_utils.unwrap_nodes(routers)

        subgraph_phy = graph_phy.subgraph(router_ids)
        if len(subgraph_phy) == 1:  
                continue # single node in graph, no ibgp

        betw_cen = nx.degree_centrality(subgraph_phy)

        ordered = sorted(subgraph_phy.nodes(), key = lambda x: betw_cen[x], reverse = True)

        rr_count = len(subgraph_phy)/5 # Take top 20% to be route reflectors
        route_reflectors = ordered[:rr_count] # most connected 20%
        rr_clients = ordered[rr_count:] # the other routers
        route_reflectors = list(ank_utils.wrap_nodes(G_bgp, route_reflectors))
        rr_clients = list(ank_utils.wrap_nodes(G_bgp, rr_clients))

        G_bgp.update(route_reflectors, route_reflector = True) # mark as route reflector
        # rr <-> rr
        over_links = [(rr1, rr2) for rr1 in route_reflectors for rr2 in route_reflectors if rr1 != rr2] 
        G_bgp.add_edges_from(over_links, type = 'ibgp', direction = 'over')
        # client -> rr
        up_links = [(client, rr) for (client, rr) in itertools.product(rr_clients, route_reflectors)]
        G_bgp.add_edges_from(up_links, type = 'ibgp', direction = 'up')
        # rr -> client
        down_links = [(rr, client) for (client, rr) in up_links] # opposite of up
        G_bgp.add_edges_from(down_links, type = 'ibgp', direction = 'down')

    log.debug("iBGP done")
Beispiel #16
0
def manual_ipv6_infrastructure_allocation(anm):
    """Applies manual IPv6 allocation"""

    import netaddr
    g_ipv6 = anm['ipv6']
    g_in = anm['input']
    log.info('Using specified IPv6 infrastructure allocation')

    for node in g_ipv6.l3devices():
        for interface in node.physical_interfaces():
            if not interface['input'].is_bound:
                continue  # unbound interface
            if not interface['ipv6'].is_bound:
                continue
            ip_address = netaddr.IPAddress(interface['input'
                                                     ].ipv6_address)
            prefixlen = interface['input'].ipv6_prefixlen
            interface.ip_address = ip_address
            interface.prefixlen = prefixlen
            cidr_string = '%s/%s' % (ip_address, prefixlen)
            interface.subnet = netaddr.IPNetwork(cidr_string)

    broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]

    # TODO: allow this to work with specified ip_address/subnet as well as
    # ip_address/prefixlen

    global_infra_block = None
    try:
        # Note this is only pickling up if explictly set in g_in
        infra_subnet = g_in.data.ipv6_infra_subnet
        infra_prefix = g_in.data.ipv6_infra_prefix
        global_infra_block = sn_preflen_to_network(infra_subnet, infra_prefix)
    except Exception, e:
        log.info("Unable to parse specified ipv4 infra subnets %s/%s")
Beispiel #17
0
def check_server_asns(anm):
    """Checks that servers have appropriate ASN allocated.
    Warns and auto-corrects servers which are connected to routers of a difference AS.
    #TODO: provide manual over-ride for this auto-correct.
    """
    #TODO: Move to validate module?
    g_phy = anm['phy']

    for server in g_phy.servers():
        if server.device_subtype in ("SNAT", "FLAT"):
            continue  # Don't warn on ASN for NAT elements
        l3_neighbors = list(server['l3_conn'].neighbors())
        l3_neighbor_asns = set(n.asn for n in l3_neighbors)
        if server.asn not in l3_neighbor_asns:
            neighs_with_asn = ["%s: AS %s" % (n, n.asn) for n in l3_neighbors
                               ]  # tuples for warning message
            server.log.warning(
                "Server does not belong to same ASN as neighbors %s" %
                (neighs_with_asn))

            if len(l3_neighbors) == 1:
                # single ASN of neighbor -> auto correct
                if server['input'].default_asn:
                    neigh_asn = l3_neighbor_asns.pop()
                    log.warning("Updating server %s AS from %s to %s" %
                                (server, server.asn, neigh_asn))
                    server.asn = neigh_asn
                else:
                    log.info(
                        "Server %s ASN %s explictly set by user, not auto-correcting"
                        % (server, server.asn))
Beispiel #18
0
def transfer(host, username, local, remote=None, key_filename=None):
    log.debug('Transferring lab to %s' % host)
    log.info('Transferring Netkit lab')
    if not remote:
        remote = local  # same filename
    import paramiko

    # import logging
    # logging.getLogger("paramiko").setLevel(logging.DEBUG)

    ssh = paramiko.SSHClient()

    # ssh.set_log_channel("ANK")

    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    if key_filename:
        log.debug('Connecting to %s with %s and key %s' %
                  (host, username, key_filename))
        ssh.connect(host, username=username, key_filename=key_filename)
    else:
        log.info('Connecting to %s with %s' % (host, username))
        ssh.connect(host, username=username)
    log.info('Opening SSH for SFTP')
    ftp = ssh.open_sftp()
    log.info('Putting file %s tspoto %s' % (local, remote))
    ftp.put(local, remote)
    log.info('Put file %s to %s' % (local, remote))
    ftp.close()
Beispiel #19
0
    def allocate_input_interfaces(self):
        """allocates edges to interfaces"""
        #TODO: move this to ank utils? or extra step in the anm?
        if self._overlay_id != "input":
            log.debug("Tried to allocate interfaces to %s" % overlay_id)
            return

        if all(len(node['input'].raw_interfaces) > 0 for node in self) \
            and all(len(edge['input'].raw_interfaces) > 0 for edge in
                    self.edges()):
            log.debug("Input interfaces allocated")
            return  # interfaces allocated
        else:
            log.info('Automatically assigning input interfaces')

        # Initialise loopback zero on node
        for node in self:
            node.raw_interfaces = {
                0: {
                    'description': 'loopback',
                    'category': 'loopback'
                }
            }

        ebunch = sorted(self.edges())
        for edge in ebunch:
            src = edge.src
            dst = edge.dst
            src_int_id = src._add_interface('%s to %s' %
                                            (src.label, dst.label))
            dst_int_id = dst._add_interface('%s to %s' %
                                            (dst.label, src.label))
            edge.raw_interfaces = {src.id: src_int_id, dst.id: dst_int_id}
Beispiel #20
0
def highlight(nodes, edges):
    def nfilter(n):
        try:
            return n.id
        except AttributeError:
            return n # likely already a node id (string)

    def efilter(e):
        try:
            return (e.src.id, e.dst.id)
        except AttributeError:
            return e # likely already edge (src, dst) id tuple (string)

    nodes = [nfilter(n) for n in nodes]
    edges = [efilter(e) for e in edges]
    import json
    body = json.dumps({
        'highlight': {
        'nodes': nodes,
        'edges': edges,
        }
        })

    params = urllib.urlencode({
        'body': body
        })

    #TODO: split this common function out, create at runtime so don't need to keep reading config
    host = config.settings['Http Post']['server']
    port = config.settings['Http Post']['port']
    http_url = "http://%s:%s/publish" % (host, port)
    try:
        data = urllib.urlopen(http_url, params).read()
    except IOError, e:
        log.info("Unable to connect to HTTP Server %s: e" % (http_url, e))
Beispiel #21
0
    def allocate_input_interfaces(self):
        """allocates edges to interfaces"""
        # TODO: move this to ank utils? or extra step in the anm?
        if self._overlay_id != "input":
            log.debug("Tried to allocate interfaces to %s" % overlay_id)
            return

        if all(len(node['input'].raw_interfaces) > 0 for node in self) \
            and all(len(edge['input'].raw_interfaces) > 0 for edge in
                    self.edges()):
            log.debug("Input interfaces allocated")
            return  # interfaces allocated
        else:
            log.info('Automatically assigning input interfaces')

        # Initialise loopback zero on node
        for node in self:
            node.raw_interfaces = {0:
                                   {'description': 'loopback', 'category': 'loopback'}}

        ebunch = sorted(self.edges())
        for edge in ebunch:
            src = edge.src
            dst = edge.dst
            src_int_id = src._add_interface('%s to %s' % (src.label,
                                                          dst.label))
            dst_int_id = dst._add_interface('%s to %s' % (dst.label,
                                                          src.label))
            edge.raw_interfaces = {
                src.id: src_int_id,
                dst.id: dst_int_id}
Beispiel #22
0
    def add_overlay(self, name, nodes=None, graph=None, directed=False,
            multi_edge=False, retain=None):
        """Adds overlay graph of name name"""
        if graph:
            if not directed and graph.is_directed():
                log.info("Converting graph %s to undirected" % name)
                graph = nx.Graph(graph)

        elif directed:
            if multi_edge:
                graph = nx.MultiDiGraph()
            else:
                graph = nx.DiGraph()
        else:
            if multi_edge:
                graph = nx.MultiGraph()
            else:
                graph = nx.Graph()

        self._overlays[name] = graph
        overlay = OverlayGraph(self, name)
        overlay.allocate_interfaces()
        if nodes:
            retain = retain or []  # default is an empty list
            overlay.add_nodes_from(nodes, retain)

        return overlay
Beispiel #23
0
    def compile(self):
        log.info("Compiling Netkit for %s" % self.host)
        G_phy = self.anm.overlay.phy
        quagga_compiler = QuaggaCompiler(self.nidb, self.anm)
#TODO: this should be all l3 devices not just routers
        for phy_node in G_phy.nodes('is_router', host = self.host, syntax='quagga'):
            folder_name = ank.name_folder_safe(phy_node.label)
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.base = "templates/quagga"
            nidb_node.render.template = "templates/netkit_startup.mako"
            nidb_node.render.dst_folder = "rendered/%s/%s" % (self.host, "netkit")
            nidb_node.render.base_dst_folder = "rendered/%s/%s/%s" % (self.host, "netkit", folder_name)
            nidb_node.render.dst_file = "%s.startup" % folder_name 

# allocate zebra information
            nidb_node.zebra.password = "******"
            
            # Allocate edges
            # assign interfaces
            # Note this could take external data
            int_ids = self.interface_ids()
            for edge in self.nidb.edges(nidb_node):
                edge.id = int_ids.next()
# and allocate tap interface
            nidb_node.tap.id = int_ids.next()

            quagga_compiler.compile(nidb_node)

        # and lab.conf
        self.allocate_tap_ips()
        self.lab_topology()
Beispiel #24
0
def mpls_te(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_l3 = anm['layer3']

    # add regardless, so allows quick check of node in anm['mpls_te'] in compilers

    g_mpls_te = anm.add_overlay('mpls_te')
    if not any(True for n in g_in.routers() if n.mpls_te_enabled):
        log.debug('No nodes with mpls_te_enabled set')
        return

    # te head end set if here

    g_mpls_te.add_nodes_from(g_in.routers())

    # build up edge list sequentially, to provide meaningful messages for multipoint links

    multipoint_edges = [e for e in g_l3.edges() if e.multipoint]
    if len(multipoint_edges):
        log.info('Excluding multi-point edges from MPLS TE topology: %s'
           % ', '.join(str(e) for e in multipoint_edges))

    edges_to_add = set(g_l3.edges()) - set(multipoint_edges)
    g_mpls_te.add_edges_from(edges_to_add)
Beispiel #25
0
    def compile(self):
        log.info("Compiling Dynagen for %s" % self.host)
        g_phy = self.anm['phy']
        G_graphics = self.anm['graphics']
        ios_compiler = IosClassicCompiler(self.nidb, self.anm)
        for phy_node in g_phy.nodes('is_router', host=self.host, syntax='ios'):
            nidb_node = self.nidb.node(phy_node)
            graphics_node = G_graphics.node(phy_node)
            nidb_node.render.template = os.path.join("templates", "ios.mako")
            nidb_node.render.dst_folder = os.path.join(
                "rendered", self.host, "dynagen", self.config_dir)
            nidb_node.render.dst_file = "%s.cfg" % ank.name_folder_safe(
                phy_node.label)

            # TODO: may want to normalise x/y
            nidb_node.x = graphics_node.x
            nidb_node.y = graphics_node.y

            # Allocate edges
            # assign interfaces
            # Note this could take external data
            int_ids = self.interface_ids()
            for interface in nidb_node.physical_interfaces:
                interface.id = int_ids.next()

            ios_compiler.compile(nidb_node)

        self.allocate_ports()
        self.lab_topology()
Beispiel #26
0
def manage_network(input_graph_string, timestamp, build_options, reload_build=False, grid = None):
    """Build, compile, render network as appropriate"""
    # import build_network_simple as build_network
    import autonetkit.build_network as build_network
    if reload_build:
# remap?
        build_network = reload(build_network)

    if build_options['build']:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        anm = build_network.build(graph)
        if not build_options['compile']:
            update_http(anm)

        if build_options['validate']:
            import autonetkit.ank_validate
            autonetkit.ank_validate.validate(anm)

    if build_options['compile']:
        if build_options['archive']:
            anm.save()
        nidb = compile_network(anm)

        update_http(anm, nidb)
        log.debug("Sent ANM to web server")
        if build_options['archive']:
            nidb.save()
        # render.remove_dirs(["rendered"])
        if build_options['render']:
            render.render(nidb)

    if not(build_options['build'] or build_options['compile']):
        # Load from last run
        import autonetkit.anm
        anm = autonetkit.anm.AbstractNetworkModel()
        anm.restore_latest()
        nidb = NIDB()
        nidb.restore_latest()
        update_http(anm, nidb)

    if build_options['diff']:
        import autonetkit.diff
        nidb_diff = autonetkit.diff.nidb_diff()
        import json
        data = json.dumps(nidb_diff, cls=ank_json.AnkEncoder, indent=4)
        log.info("Wrote diff to diff.json")
        with open("diff.json", "w") as fh:  # TODO: make file specified in config
            fh.write(data)

    if build_options['deploy']:
        deploy_network(anm, nidb, input_graph_string)

    if build_options['measure']:
        measure_network(anm, nidb)

    log.info("Finished")
Beispiel #27
0
def allocate(G_phy, G_bgp):
    log.info("Allocating route reflectors")
    graph_phy = G_phy._graph
    for asn, devices in G_phy.groupby("asn").items():
        routers = [d for d in devices if d.is_router]
        router_ids = list(ank_utils.unwrap_nodes(routers))
        mapping_id_to_device = dict(zip(
            router_ids, routers))  # to reverse lookup id back to device

        subgraph_phy = graph_phy.subgraph(router_ids)
        if len(subgraph_phy) == 1:
            continue  # single node in graph, no ibgp

        betw_cen = nx.degree_centrality(subgraph_phy)

        ordered = sorted(subgraph_phy.nodes(),
                         key=lambda x: betw_cen[x],
                         reverse=True)

        rr_count = len(
            subgraph_phy) / 4 or 1  # Take top 20% to be route reflectors
        route_reflectors = ordered[:rr_count]  # most connected x%
        log.debug("Chose route_reflectors %s" % route_reflectors)
        rr_clients = ordered[rr_count:]  # the other routers
        route_reflectors = list(ank_utils.wrap_nodes(G_bgp, route_reflectors))
        rr_clients = list(ank_utils.wrap_nodes(G_bgp, rr_clients))

        # distances (shortest path, physical graph) from rrs to clients
        path_lengths = {}
        for rr in route_reflectors:
            path = nx.single_source_shortest_path_length(subgraph_phy, rr)
            path_mapped = dict(
                (mapping_id_to_device[id], length)
                for (id, length) in path.items())  # ids to devices
            path_lengths[rr] = path_mapped

        G_bgp.update(route_reflectors,
                     route_reflector=True)  # mark as route reflector
        # rr <-> rr
        over_links = [(rr1, rr2) for rr1 in route_reflectors
                      for rr2 in route_reflectors if rr1 != rr2]
        G_bgp.add_edges_from(over_links, type='ibgp', direction='over')

        for client in rr_clients:
            ranked_rrs = sorted(route_reflectors,
                                key=lambda rr: path_lengths[rr][client])
            parent_count = 2  # number of parents to connect to for each rr client
            parent_rrs = ranked_rrs[:parent_count]
            log.info("Route reflectors for %s are %s " % (client, parent_rrs))

            for parent in parent_rrs:
                # client -> rr
                #up_links = [(client, rr) for (client, rr) in itertools.product(rr_clients, route_reflectors)]
                G_bgp.add_edge(client, parent, type='ibgp', direction='up')
                # rr -> client
                #down_links = [(rr, client) for (client, rr) in up_links] # opposite of up
                G_bgp.add_edge(parent, client, type='ibgp', direction='down')

    log.debug("iBGP done")
Beispiel #28
0
    def allocate_interfaces(self):
        """allocates edges to interfaces"""

        if self._overlay_id in ('input', 'phy'):
            if all(len(node['input']._interfaces) > 0 for node in self) \
                and all(len(edge['input']._interfaces) > 0 for edge in
                        self.edges()):
                input_interfaces_allocated = True
            else:
                log.info('Automatically assigning input interfaces')
                input_interfaces_allocated = False

        if self._overlay_id == 'input':

            # only return if allocated here

            if input_interfaces_allocated:
                return   # already allocated

        # int_counter = (n for n in itertools.count() if n not in

        if self._overlay_id == 'phy':

            # check if nodes added

            nodes = list(self)
            edges = list(self.edges())
            if len(nodes) and len(edges):

                # allocate called once physical graph populated

                if input_interfaces_allocated:
                    for node in self:
                        input_interfaces = node['input']._interfaces
                        if len(input_interfaces):
                            node._interfaces = input_interfaces

                    for edge in self.edges():
                        edge._interfaces = edge['input']._interfaces
                        input_interfaces = edge['input']._interfaces
                        if len(input_interfaces):
                            edge._interfaces = input_interfaces
                    return

        self._init_interfaces()

        ebunch = sorted(self.edges())

        for edge in ebunch:
            src = edge.src
            dst = edge.dst
            dst = edge.dst
            src_int_id = src._add_interface('%s to %s' % (src.label,
                                                          dst.label))
            dst_int_id = dst._add_interface('%s to %s' % (dst.label,
                                                          src.label))
            edge._interfaces = {}
            edge._interfaces[src.id] = src_int_id
            edge._interfaces[dst.id] = dst_int_id
Beispiel #29
0
def deploy_network():
    log.info("Deploying network")
    tar_file = deploy.package("rendered/nectar1/netkit/", "netkit")
    server = "trc1.trc.adelaide.edu.au"
    deploy.transfer(server, "sknight", tar_file, tar_file)
    print "server", server
    cd_dir = "rendered/nectar1/netkit/"
    deploy.extract(server, tar_file, cd_dir)
Beispiel #30
0
def validate(anm):
    tests_passed = True
    tests_passed = validate_ipv4(anm) and tests_passed

    if tests_passed:
        log.info("All validation tests passed.")
    else:
        log.warning("Some validation tests failed.")
Beispiel #31
0
def validate(anm):
    tests_passed = True
    tests_passed = validate_ipv4(anm) and tests_passed

    if tests_passed:
        log.info("All validation tests passed.")
    else:
        log.warning("Some validation tests failed.")
Beispiel #32
0
 def starting_host(protocol, index, data):
     #print "Starting", data.group(index)
     log.info(data.group(index))
     body = {"starting": data.group(index)}
     www_channel.basic_publish(exchange='www',
             routing_key = "client",
             body= json.dumps(body))
     pass
Beispiel #33
0
def deploy_network():
    log.info("Deploying network")
    tar_file = deploy.package("rendered/nectar1/netkit/", "netkit")
    server = "trc1.trc.adelaide.edu.au"
    deploy.transfer(server, "sknight", tar_file, tar_file)
    print "server", server
    cd_dir = "rendered/nectar1/netkit/"
    deploy.extract(server, tar_file, cd_dir)
Beispiel #34
0
 def publish_http_post(self, exchange, routing_key, body):
     params = urllib.urlencode({
         'body': body
         })
     try:
         data = urllib.urlopen(self.http_url, params).read()
     except IOError, e:
         log.info("Unable to connect to HTTP Server %s" % self.http_url)
Beispiel #35
0
def allocate_ips(G_ip, infra_block=None, loopback_block=None, secondary_loopback_block=None):
    log.info('Allocating Host loopback IPs')
    # TODO: move the following step to the l3 graph
    assign_asn_to_interasn_cds(G_ip)

    allocate_loopbacks(sorted(G_ip), loopback_block)
    allocate_infra(sorted(G_ip), infra_block)
    allocate_secondary_loopbacks(sorted(G_ip), secondary_loopback_block)
Beispiel #36
0
def manual_ipv6_infrastructure_allocation(anm):
    """Applies manual IPv6 allocation"""

    import netaddr

    g_ipv6 = anm["ipv6"]
    log.info("Using specified IPv6 infrastructure allocation")

    for node in g_ipv6.l3devices():
        for interface in node.physical_interfaces:
            if not interface["input"].is_bound:
                continue  # unbound interface
            ip_address = netaddr.IPAddress(interface["input"].ipv6_address)
            prefixlen = interface["input"].ipv6_prefixlen
            interface.ip_address = ip_address
            interface.prefixlen = prefixlen
            cidr_string = "%s/%s" % (ip_address, prefixlen)
            interface.subnet = netaddr.IPNetwork(cidr_string)

    broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]

    # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen

    from netaddr import IPNetwork

    for coll_dom in broadcast_domains:
        connected_interfaces = [edge.dst_int for edge in coll_dom.edges()]
        cd_subnets = [IPNetwork("%s/%s" % (i.subnet.network, i.prefixlen)) for i in connected_interfaces]

        if len(cd_subnets) == 0:
            log.warning("Collision domain %s is not connected to any nodes" % coll_dom)
            continue

        try:
            assert len(set(cd_subnets)) == 1
        except AssertionError:
            mismatch_subnets = "; ".join("%s: %s/%s" % (i, i.subnet.network, i.prefixlen) for i in connected_interfaces)
            log.warning("Non matching subnets from collision domain %s: %s" % (coll_dom, mismatch_subnets))
        else:
            coll_dom.subnet = cd_subnets[0]  # take first entry

        # apply to remote interfaces

        for edge in coll_dom.edges():
            edge.dst_int.subnet = coll_dom.subnet

    # also need to form aggregated IP blocks (used for e.g. routing prefix
    # advertisement)
    # import autonetkit
    # autonetkit.update_http(anm)

    infra_blocks = {}
    for (asn, devices) in g_ipv6.groupby("asn").items():
        broadcast_domains = [d for d in devices if d.broadcast_domain]
        subnets = [cd.subnet for cd in broadcast_domains if cd.subnet is not None]  # only if subnet is set
        infra_blocks[asn] = netaddr.cidr_merge(subnets)

    g_ipv6.data.infra_blocks = infra_blocks
Beispiel #37
0
def deploy_network(anm, nidb, input_graph_string=None):

    # TODO: make this driven from config file
    log.info("Deploying Network")

    # TODO: pick up platform, host, filenames from nidb (as set in there)
    deploy_hosts = config.settings['Deploy Hosts']
    for hostname, host_data in deploy_hosts.items():
        for platform, platform_data in host_data.items():
            if not any(nidb.nodes(host=hostname, platform=platform)):
                log.debug(
                    "No hosts for (host, platform) (%s, %s), skipping deployment"
                    % (hostname, platform))
                continue

            if not platform_data['deploy']:
                log.debug("Not deploying to %s on %s" % (platform, hostname))
                continue

            config_path = os.path.join("rendered", hostname, platform)

            if hostname == "internal":
                try:
                    from autonetkit_cisco import deploy as cisco_deploy
                except ImportError:
                    pass  # development module, may not be available
                if platform == "cisco":
                    create_new_xml = False
                    if not input_graph_string:
                        create_new_xml = True  # no input, eg if came from grid
                    elif anm['input'].data['file_type'] == "graphml":
                        create_new_xml = True  # input from graphml, create XML

                    if create_new_xml:
                        cisco_deploy.create_xml(anm, nidb, input_graph_string)
                    else:
                        cisco_deploy.package(nidb, config_path,
                                             input_graph_string)
                continue

            username = platform_data['username']
            key_file = platform_data['key file']
            host = platform_data['host']

            if platform == "netkit":
                import autonetkit.deploy.netkit as netkit_deploy
                tar_file = netkit_deploy.package(config_path, "nklab")
                netkit_deploy.transfer(host, username, tar_file, tar_file,
                                       key_file)
                netkit_deploy.extract(host,
                                      username,
                                      tar_file,
                                      config_path,
                                      timeout=60,
                                      key_filename=key_file)
            if platform == "cisco":
                #TODO: check why using nklab here
                cisco_deploy.package(config_path, "nklab")
Beispiel #38
0
def package(src_dir, target="netkit_lab"):
    log.info("Packaging %s" % src_dir)
    import tarfile
    import os
    tar_filename = "%s.tar.gz" % target
    tar = tarfile.open(os.path.join(tar_filename), "w:gz")
    tar.add(src_dir)
    tar.close()
    return tar_filename
Beispiel #39
0
def package(src_dir, target='netkit_lab'):
    log.info('Packaging %s' % src_dir)
    import tarfile
    import os
    tar_filename = '%s.tar.gz' % target
    tar = tarfile.open(os.path.join(tar_filename), 'w:gz')
    tar.add(src_dir)
    tar.close()
    return tar_filename
Beispiel #40
0
def validate(anm):
    log.info("Validating overlay topologies")
    tests_passed = True
    tests_passed = validate_ipv4(anm) and tests_passed

    try:
        from autonetkit_cisco import ank_validate as cisco_validate
    except ImportError, e:
        log.debug("Unable to load autonetkit_cisco %s" % e)
Beispiel #41
0
def package(src_dir, target = "netkit_lab"):
    log.info("Packaging %s" % src_dir)
    import tarfile
    import os
    tar_filename = "%s.tar.gz" % target
    tar = tarfile.open(os.path.join(tar_filename), "w:gz")
    tar.add(src_dir)
    tar.close()
    return tar_filename
Beispiel #42
0
def package(src_dir, target='netkit_lab'):
    log.info('Packaging %s' % src_dir)
    import tarfile
    import os
    tar_filename = '%s.tar.gz' % target
    tar = tarfile.open(os.path.join(tar_filename), 'w:gz')
    tar.add(src_dir)
    tar.close()
    return tar_filename
Beispiel #43
0
    def static_routes(self, node):
        node.static_routes_v4 = [] # initialise for case of no routes -> simplifies template logic
        node.static_routes_v6 = [] # initialise for case of no routes -> simplifies template logic
        if not self.anm['phy'].data.enable_routing:
            log.debug("Routing disabled, not configuring static routes for Ubuntu server %s" % node)
            return

        l3_conn_node = self.anm['l3_conn'].node(node)
        phy_node = self.anm['phy'].node(node)
        gateway_list = [n for n in l3_conn_node.neighbors()
            if n.is_router]
        if not len(gateway_list):
            log.warning("Server %s is not directly connected to any routers" % node)
        else:
            gateway = gateway_list[0] # choose first (and only gateway)
            if len(gateway_list) > 1:
                log.info("Server %s is multi-homed, using gateway %s" % (node, gateway))

        #TODO: warn if server has no neighbors in same ASN (either in design or verification steps)
        #TODO: need to check that servers don't have any direct ebgp connections

        gateway_edge_l3 = self.anm['l3_conn'].edge(node, gateway)
        server_interface = gateway_edge_l3.src_int
        server_interface_id = self.nidb.interface(server_interface).id

        gateway_interface = gateway_edge_l3.dst_int

        gateway_ipv4 = gateway_ipv6 = None
        if node.ip.use_ipv4:
            gateway_ipv4 = gateway_interface['ipv4'].ip_address
        if node.ip.use_ipv6:
            gateway_ipv6 = gateway_interface['ipv6'].ip_address

        #TODO: look at aggregation
        #TODO: catch case of ip addressing being disabled

        #TODO: handle both ipv4 and ipv6

        # IGP advertised infrastructure pool from same AS
        for infra_route in self.anm['ipv4'].data['infra_blocks'][phy_node.asn]:
            node.static_routes_v4.append({
                    "network": infra_route,
                    "gw": gateway_ipv4,
                    "interface": server_interface_id,
                    "description": "Route to infra subnet in local AS %s via %s" % (phy_node.asn, gateway),
                    })

        # eBGP advertised loopbacks in all (same + other) ASes
        for asn, asn_routes in self.anm['ipv4'].data['loopback_blocks'].items():
            for asn_route in asn_routes:
                node.static_routes_v4.append({
                    "network": asn_route,
                    "gw": gateway_ipv4,
                    "interface": server_interface_id,
                    "description": "Route to loopback subnet in AS %s via %s" % (asn, gateway),
                    })
Beispiel #44
0
def allocate_loopbacks(g_ip, address_block = None):
    if not address_block:
        address_block = netaddr.IPNetwork("192.168.0.0/22")
    log.info("Allocating v4 Primary Host loopback IPs")
    ip_tree = IpTree(address_block)
    ip_tree.add_nodes(g_ip.nodes("is_l3device"))
    ip_tree.build()
    #loopback_tree = ip_tree.json()
    ip_tree.assign()
    g_ip.data.loopback_blocks = ip_tree.group_allocations()
Beispiel #45
0
def allocate_loopbacks(g_ip, address_block=None):
    if not address_block:
        address_block = netaddr.IPNetwork("192.168.0.0/22")
    log.info("Allocating v4 Primary Host loopback IPs")
    ip_tree = IpTree(address_block)
    ip_tree.add_nodes(g_ip.nodes("is_l3device"))
    ip_tree.build()
    #loopback_tree = ip_tree.json()
    ip_tree.assign()
    g_ip.data.loopback_blocks = ip_tree.group_allocations()
Beispiel #46
0
    def compile(self):
        log.info("Compiling Netkit for %s" % self.host)
        g_phy = self.anm['phy']
        quagga_compiler = QuaggaCompiler(self.nidb, self.anm)
# TODO: this should be all l3 devices not just routers
        for phy_node in g_phy.l3devices(host=self.host, syntax='quagga'):
            folder_name = naming.network_hostname(phy_node)
            nidb_node = self.nidb.node(phy_node)
            nidb_node.add_stanza("render")
            #TODO: order by folder and file template src/dst
            nidb_node.render.base = os.path.join("templates","quagga")
            nidb_node.render.template = os.path.join("templates",
                "netkit_startup.mako")
            nidb_node.render.dst_folder = os.path.join("rendered",
                self.host, "netkit")
            nidb_node.render.base_dst_folder = os.path.join("rendered",
                self.host, "netkit", folder_name)
            nidb_node.render.dst_file = "%s.startup" % folder_name

            nidb_node.render.custom = {
                    'abc': 'def.txt'
                    }

# allocate zebra information
            nidb_node.add_stanza("zebra")
            if nidb_node.is_router():
                nidb_node.zebra.password = "******"
            hostname = folder_name
            if hostname[0] in string.digits:
                hostname = "r" + hostname
            nidb_node.hostname = hostname  # can't have . in quagga hostnames
            nidb_node.add_stanza("ssh")
            nidb_node.ssh.use_key = True  # TODO: make this set based on presence of key

            # Note this could take external data
            int_ids = itertools.count(0)
            for interface in nidb_node.physical_interfaces:
                numeric_id = int_ids.next()
                interface.numeric_id = numeric_id
                interface.id = self.index_to_int_id(numeric_id)

# and allocate tap interface
            nidb_node.add_stanza("tap")
            nidb_node.tap.id = self.index_to_int_id(int_ids.next())

            quagga_compiler.compile(nidb_node)

            if nidb_node.bgp:
                nidb_node.bgp.debug = True
                static_routes = []
                nidb_node.zebra.static_routes = static_routes

        # and lab.conf
        self.allocate_tap_ips()
        self.lab_topology()
Beispiel #47
0
def extract(host, username, tar_file, cd_dir, timeout = 3600, key_filename = None, password = None, verbosity = 0):
    """Extract and start lab"""
    log.debug("Extracting and starting lab on %s" % (host))
    log.info("Extracting and starting Netkit lab")
    from Exscript import Account
    from Exscript.util.start import start
    from Exscript.util.match import first_match
    from Exscript import PrivateKey
    from Exscript.protocols.Exception import InvalidCommandException, LoginFailure

    messaging = ank_messaging.AnkMessaging()

    def starting_host(protocol, index, data):
        m = re.search('\\"(\S+)\\"', data.group(index))
        if m:
            hostname = m.group(1)
            log.info(data.group(index)) #TODO: use regex to strip out just the machine name
            body = {"starting": hostname}
            messaging.publish_json(body)

    def lab_started(protocol, index, data):
        log.info("Lab started on %s" % host)
        body = {"lab started": host}
        messaging.publish_json(body)

    def make_not_found(protocol, index, data):
        log.warning("Make not installed on remote host %s. Please install make and retry." % host)
        return

    def start_lab(thread, host, conn):
        conn.set_timeout(timeout)
        conn.add_monitor(r'Starting (\S+)', starting_host)
        conn.add_monitor(r'The lab has been started', lab_started)
        conn.add_monitor(r'make: not found', make_not_found)
        #conn.data_received_event.connect(data_received)
        conn.execute('cd %s' % cd_dir)
        conn.execute('lhalt -q')
        conn.execute('lcrash -k')
        conn.execute("lclean")
        conn.execute('cd') # back to home directory tar file copied to
        conn.execute('rm -Rf rendered')
        conn.execute('tar -xzf %s' % tar_file)
        conn.execute('cd %s' % cd_dir)
        conn.execute('vlist')
        conn.execute("lclean")
        log.info("Starting lab")
        start_command = 'lstart -p20 -o--con0=none'
        try:
            conn.execute(start_command)
        except InvalidCommandException, error:
            if "already running" in str(error):
                time.sleep(1)
                conn.execute(start_command)
        first_match(conn, r'^The lab has been started')
        conn.send("exit")
Beispiel #48
0
    def compile(self):
        log.info("Compiling Netkit for %s" % self.host)
        g_phy = self.anm['phy']
        quagga_compiler = QuaggaCompiler(self.nidb, self.anm)
# TODO: this should be all l3 devices not just routers
        for phy_node in g_phy.l3devices(host=self.host, syntax='quagga'):
            folder_name = naming.network_hostname(phy_node)
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            #TODO: order by folder and file template src/dst
            DmNode.render.base = os.path.join("templates","quagga")
            DmNode.render.template = os.path.join("templates",
                "netkit_startup.mako")
            DmNode.render.dst_folder = os.path.join("rendered",
                self.host, "netkit")
            DmNode.render.base_dst_folder = os.path.join("rendered",
                self.host, "netkit", folder_name)
            DmNode.render.dst_file = "%s.startup" % folder_name

            DmNode.render.custom = {
                    'abc': 'def.txt'
                    }

# allocate zebra information
            DmNode.add_stanza("zebra")
            if DmNode.is_router():
                DmNode.zebra.password = "******"
            hostname = folder_name
            if hostname[0] in string.digits:
                hostname = "r" + hostname
            DmNode.hostname = hostname  # can't have . in quagga hostnames
            DmNode.add_stanza("ssh")
            DmNode.ssh.use_key = True  # TODO: make this set based on presence of key

            # Note this could take external data
            int_ids = itertools.count(0)
            for interface in DmNode.physical_interfaces:
                numeric_id = int_ids.next()
                interface.numeric_id = numeric_id
                interface.id = self.index_to_int_id(numeric_id)

# and allocate tap interface
            DmNode.add_stanza("tap")
            DmNode.tap.id = self.index_to_int_id(int_ids.next())

            quagga_compiler.compile(DmNode)

            if DmNode.bgp:
                DmNode.bgp.debug = True
                static_routes = []
                DmNode.zebra.static_routes = static_routes

        # and lab.conf
        self.allocate_tap_ips()
        self.lab_topology()
Beispiel #49
0
def substitute_ips(data, rev_map, interfaces=False):
    import re
    re_ip_address = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
    re_ip_loopback = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/32"
    re_ip_subnet = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}"
    if not len(data):
        log.info("No data provided to IP substitution, returning")
        return data

    def replace_ip(match):
        match = match.group()
        if match in rev_map['infra_interfaces']:
            iface = rev_map['infra_interfaces'][match]
            if interfaces:
                named = "%s.%s" % (iface.id, iface.node)
                return named
            else:
                return str(iface.node)
        return match  # no match, return the raw IP

    def replace_loopbacks(match):
        match = match.group()
        # strip off the /32
        loopback_ip = match[:-3]
        if loopback_ip in rev_map['loopbacks']:
            node = rev_map['loopbacks'][loopback_ip]
            return str(node)
        return match  # no match, return the raw IP

    def replace_loopbacks_no_mask(match):
        #TODO: refactor
        match = match.group()
        # strip off the /32
        loopback_ip = match
        if loopback_ip in rev_map['loopbacks']:
            node = rev_map['loopbacks'][loopback_ip]
            return str(node)
        return match  # no match, return the raw IP

    def replace_subnet(match):
        match = match.group()

        if match in rev_map['subnets']:
            subnet = rev_map['subnets'][match]
            return str(subnet)
        return match  # no match, return the raw IP

    # do loopbacks first
    data = re.sub(re_ip_loopback, replace_loopbacks, data)
    data = re.sub(re_ip_address, replace_ip, data)
    # try for ip addresses in loopback
    data = re.sub(re_ip_address, replace_loopbacks_no_mask, data)
    # and for subnets ie ip/netmask
    return re.sub(re_ip_subnet, replace_subnet, data)
Beispiel #50
0
def publish_data(data, type_key):
    params = urllib.urlencode({
        'body': data,
        'type': type_key,
    })

    #TODO: split this common function out, create at runtime so don't need to keep reading config
    try:
        data = urllib.urlopen(http_url, params).read()
    except IOError, e:
        log.info("Unable to connect to HTTP Server %s: %s" % (http_url, e))
Beispiel #51
0
def validate(anm):
    tests_passed = True
    tests_passed = validate_ipv4(anm) and tests_passed

    validate_ibgp(anm)
    all_nodes_have_asn(anm)

    if tests_passed:
        log.info("All validation tests passed.")
    else:
        log.warning("Some validation tests failed.")
Beispiel #52
0
def compare_output_expected(topology_name, automated=True):
    log.info("Testing %s" % topology_name)
    input_filename = "%s.graphml" % topology_name

    dirname, filename = os.path.split(os.path.abspath(__file__))
    input_file = os.path.join(dirname, input_filename)

    arg_string = "-f %s --quiet --render" % input_file
    args = console_script.parse_options(arg_string)

    console_script.main(args)
Beispiel #53
0
def allocate_ips(G_ip,
                 infra_block=None,
                 loopback_block=None,
                 secondary_loopback_block=None):
    log.info('Allocating Host loopback IPs')
    #TODO: move the following step to the l3 graph
    assign_asn_to_interasn_cds(G_ip)

    allocate_loopbacks(sorted(G_ip), loopback_block)
    allocate_infra(sorted(G_ip), infra_block)
    allocate_vrf_loopbacks(sorted(G_ip), secondary_loopback_block)
Beispiel #54
0
def build_bgp(anm):
    """Build iBGP end eBGP overlays"""
    # eBGP
    g_in = anm['input']
    g_l3 = anm['layer3']

    if not anm['phy'].data.enable_routing:
        log.info("Routing disabled, not configuring BGP")
        return

    build_ebgp(anm)
    build_ebgp_v4(anm)
    build_ebgp_v6(anm)

    """TODO: remove from here once compiler updated"""
    g_bgp = anm.add_overlay("bgp", directed=True)
    g_bgp.add_nodes_from(g_l3.routers())
    edges_to_add = [e for e in g_l3.edges()
                    if e.src in g_bgp and e.dst in g_bgp]
    g_bgp.add_edges_from(edges_to_add, bidirectional=True)
    ank_utils.copy_int_attr_from(g_l3, g_bgp, "multipoint")

    # remove ibgp links

    """TODO: remove up to here once compiler updated"""
    ank_utils.copy_attr_from(
        g_in, g_bgp, "custom_config_bgp", dst_attr="custom_config")

    # log.info("Building eBGP")
    ebgp_nodes = [d for d in g_bgp if any(
        edge.type == 'ebgp' for edge in d.edges())]
    g_bgp.update(ebgp_nodes, ebgp=True)

    for ebgp_edge in g_bgp.edges(type="ebgp"):
        for interface in ebgp_edge.interfaces():
            interface.ebgp = True

    for edge in g_bgp.edges(type='ibgp'):
        # TODO: need interface querying/selection. rather than hard-coded ids
        # TODO: create a new port (once API allows) rarher than binding to
        # loopback zero
        edge.bind_interface(edge.src, 0)

    # TODO: need to initialise interface zero to be a loopback rather than physical type
    # TODO: wat is this for?
    for node in g_bgp:
        for interface in node.interfaces():
            interface.multipoint = any(e.multipoint for e in interface.edges())

    # log.info("Building iBGP")
    build_ibgp(anm)
    build_ibgp_v4(anm)
    build_ibgp_v6(anm)
Beispiel #55
0
def render(nidb):
    #TODO: config option for single or multi threaded
    log.info("Rendering Network")
    folder_cache = cache_folders(nidb)
    render_single(nidb, folder_cache)
    #render_multi(nidb, folder_cache)
    render_topologies(nidb)

#TODO: Also cache for topologies

    folder_cache_dir = folder_cache['_folder_cache_dir']
    shutil.rmtree(folder_cache_dir)
Beispiel #56
0
def highlight(nodes, edges, paths=None):
    if not paths:
        paths = []

    def nfilter(n):
        try:
            return n.id
        except AttributeError:
            return n  # likely already a node id (string)

    def efilter(e):
        try:
            return (e.src.id, e.dst.id)
        except AttributeError:
            return e  # likely already edge (src, dst) id tuple (string)

    nodes = [nfilter(n) for n in nodes]
    edges = [efilter(e) for e in edges]
    filtered_paths = []
    for path in paths:
        #TODO: tidy this logic
        if isinstance(path, dict) and 'path' in path:
            path_data = path  # use as-s
        else:
            import random
            is_verified = bool(random.randint(0, 1))
            path_data = {'path': path, 'verified': is_verified}

        path_data['path'] = [nfilter(n) for n in path_data['path']]
        filtered_paths.append(path_data)

    #TODO: remove "highlight" from json, use as url params to distinguish
    import json
    body = json.dumps({
        'nodes': nodes,
        'edges': edges,
        'paths': filtered_paths,
    })

    params = urllib.urlencode({
        'body': body,
        'type': 'highlight',
    })

    #TODO: split this common function out, create at runtime so don't need to keep reading config
    try:
        data = urllib.urlopen(http_url, params).read()
    except IOError, e:
        log.info("Unable to connect to HTTP Server %s: e" % (http_url, e))
Beispiel #57
0
def allocate_vrf_loopbacks(g_ip, address_block=None):
    if not address_block:
        address_block = netaddr.IPNetwork("172.16.0.0/24")
    log.info("Allocating v4 Secondary Host loopback IPs")
    ip_tree = IpTree(address_block)
    secondary_loopbacks = [
        i for n in g_ip.nodes() for i in n.loopback_interfaces
        if not i.is_loopback_zero
    ]

    vrf_loopbacks = [i for i in secondary_loopbacks if i['vrf'].vrf_name]

    ip_tree.add_nodes(vrf_loopbacks)
    ip_tree.build()
    #secondary_loopback_tree = ip_tree.json()
    ip_tree.assign()
Beispiel #58
0
    def compile(self):
        log.info("Compiling Junosphere for %s" % self.host)
        G_phy = self.anm.overlay.phy
        junos_compiler = JunosCompiler(self.nidb, self.anm)
        for phy_node in G_phy.nodes('is_router', host = self.host, syntax='junos'):
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.template = "templates/junos.mako"
            nidb_node.render.dst_folder = "rendered/%s/%s" % (self.host, "junosphere")
            nidb_node.render.dst_file = "%s.conf" % ank.name_folder_safe(phy_node.label)

            int_ids = self.interface_ids()
            for edge in self.nidb.edges(nidb_node):
                edge.unit = 0
                edge.id = int_ids.next()

            junos_compiler.compile(nidb_node)
Beispiel #59
0
    def compile(self):
        log.info("Compiling Cisco for %s" % self.host)
        G_phy = self.anm.overlay.phy
        ios_compiler = IosCompiler(self.nidb, self.anm)
        for phy_node in G_phy.nodes('is_router', host = self.host, syntax='ios'):
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.template = "templates/ios.mako"
            nidb_node.render.dst_folder = os.path.join(self.host, self.timestamp)
            nidb_node.render.dst_file = "%s.conf" % ank.name_folder_safe(phy_node.label)

            # Assign interfaces
            int_ids = self.interface_ids_ios()
            for edge in self.nidb.edges(nidb_node):
                edge.id = int_ids.next()

            ios_compiler.compile(nidb_node)