Beispiel #1
0
def copy_attr_from(overlay_src, overlay_dst, src_attr, dst_attr = None, nbunch = None, type = None):
    #TODO: add dest format, eg to convert to int
    if not dst_attr:
        dst_attr = src_attr

    graph_src = unwrap_graph(overlay_src)
    graph_dst = unwrap_graph(overlay_dst)
    if not nbunch:
        nbunch = graph_src.nodes()

    for n in nbunch:
        try:
            val = graph_src.node[n][src_attr]
        except KeyError:
            #TODO: check if because node doesn't exist in dest, or because attribute doesn't exist in graph_src
            log.debug("Unable to copy node attribute %s for %s in %s" % (src_attr, n, overlay_src))
        else:
            #TODO: use a dtype to take an int, float, etc
            if type is float:
                val = float(val)
            elif type is int:
                val = int(val)

            if n in graph_dst:
                graph_dst.node[n][dst_attr] = val
Beispiel #2
0
def send_commands(server, commands, message_key, send_port=5559,
                  receive_port=5562):
    import zmq
    import json
    import autonetkit.log as log
    message_key = str(message_key)
    import autonetkit.ank_json as ank_json

    context = zmq.Context()
    zmq_socket = context.socket(zmq.PUSH)
    zmq_socket.connect("tcp://%s:%s" % (server, send_port))

    context = zmq.Context()
    results_receiver = context.socket(zmq.SUB)
    results_receiver.connect("tcp://%s:%s" % (server, receive_port))
    results_receiver.setsockopt(zmq.SUBSCRIBE, message_key)
    # NOTE: need to connect *before* send commands in order to capture replies

    for command in commands:
        command["message_key"] = message_key

        work_message = json.dumps(command, cls=ank_json.AnkEncoder, indent=4)
        # print "sending", work_message
        log.debug("Sending %s to %s" % (command['command'], command['host']))
        zmq_socket.send_json(work_message)

    zmq_socket.close()
Beispiel #3
0
def mpls_te(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_l3 = anm['layer3']

    # add regardless, so allows quick check of node in anm['mpls_te'] in compilers

    g_mpls_te = anm.add_overlay('mpls_te')
    if not any(True for n in g_in.routers() if n.mpls_te_enabled):
        log.debug('No nodes with mpls_te_enabled set')
        return

    # te head end set if here

    g_mpls_te.add_nodes_from(g_in.routers())

    # build up edge list sequentially, to provide meaningful messages for multipoint links

    multipoint_edges = [e for e in g_l3.edges() if e.multipoint]
    if len(multipoint_edges):
        log.info('Excluding multi-point edges from MPLS TE topology: %s'
           % ', '.join(str(e) for e in multipoint_edges))

    edges_to_add = set(g_l3.edges()) - set(multipoint_edges)
    g_mpls_te.add_edges_from(edges_to_add)
Beispiel #4
0
    def nailed_up_routes(self, node):
        log.debug('Configuring nailed up routes')
        phy_node = self.anm['phy'].node(node)

        if node.is_ebgp_v4 and node.ip.use_ipv4:
            infra_blocks = self.anm['ipv4'].data['infra_blocks'].get(
                phy_node.asn) or []
            for infra_route in infra_blocks:
                stanza = ConfigStanza(
                    prefix=str(infra_route.network),
                    netmask=str(infra_route.netmask),
                    nexthop="Null0",
                    metric=254,
                )
                node.ipv4_static_routes.append(stanza)

        if node.is_ebgp_v6 and node.ip.use_ipv6:
            infra_blocks = self.anm['ipv6'].data['infra_blocks'].get(
                phy_node.asn) or []
            # TODO: setup schema with defaults
            for infra_route in infra_blocks:
                stanza = ConfigStanza(
                    prefix=str(infra_route),
                    nexthop="Null0",
                    metric=254,
                )
                node.ipv6_static_routes.append(stanza)
Beispiel #5
0
def manage_network(input_graph_string, timestamp, build_options, reload_build=False, grid = None):
    """Build, compile, render network as appropriate"""
    # import build_network_simple as build_network
    import autonetkit.build_network as build_network
    if reload_build:
# remap?
        build_network = reload(build_network)

    if build_options['build']:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        anm = build_network.build(graph)
        if not build_options['compile']:
            update_http(anm)

        if build_options['validate']:
            import autonetkit.ank_validate
            autonetkit.ank_validate.validate(anm)

    if build_options['compile']:
        if build_options['archive']:
            anm.save()
        nidb = compile_network(anm)

        update_http(anm, nidb)
        log.debug("Sent ANM to web server")
        if build_options['archive']:
            nidb.save()
        # render.remove_dirs(["rendered"])
        if build_options['render']:
            render.render(nidb)

    if not(build_options['build'] or build_options['compile']):
        # Load from last run
        import autonetkit.anm
        anm = autonetkit.anm.AbstractNetworkModel()
        anm.restore_latest()
        nidb = NIDB()
        nidb.restore_latest()
        update_http(anm, nidb)

    if build_options['diff']:
        import autonetkit.diff
        nidb_diff = autonetkit.diff.nidb_diff()
        import json
        data = json.dumps(nidb_diff, cls=ank_json.AnkEncoder, indent=4)
        log.info("Wrote diff to diff.json")
        with open("diff.json", "w") as fh:  # TODO: make file specified in config
            fh.write(data)

    if build_options['deploy']:
        deploy_network(anm, nidb, input_graph_string)

    if build_options['measure']:
        measure_network(anm, nidb)

    log.info("Finished")
Beispiel #6
0
def build_isis(anm):
    """Build isis overlay"""
    g_in = anm["input"]
    if not any(n.igp == "isis" for n in g_in):
        log.debug("No ISIS nodes")
        return
    g_ipv4 = anm["ipv4"]
    g_isis = anm.add_overlay("isis")
    g_isis.add_nodes_from(g_in.nodes("is_router", igp="isis"), retain=["asn"])
    g_isis.add_nodes_from(g_in.nodes("is_switch"), retain=["asn"])
    g_isis.add_edges_from(g_in.edges(), retain=["edge_id"])
    # Merge and explode switches
    ank_utils.aggregate_nodes(g_isis, g_isis.nodes("is_switch"), retain="edge_id")
    exploded_edges = ank_utils.explode_nodes(g_isis, g_isis.nodes("is_switch"), retain="edge_id")
    for edge in exploded_edges:
        edge.multipoint = True

    g_isis.remove_edges_from([link for link in g_isis.edges() if link.src.asn != link.dst.asn])

    for node in g_isis:
        ip_node = g_ipv4.node(node)
        node.net = ip_to_net_ent_title_ios(ip_node.loopback)
        node.process_id = 1  # default

    for link in g_isis.edges():
        link.metric = 1  # default
        # link.hello = 5 # for debugging, TODO: read from graph

    for edge in g_isis.edges():
        for interface in edge.interfaces():
            interface.metric = edge.metric
            interface.multipoint = edge.multipoint
Beispiel #7
0
def copy_edge_attr_from(overlay_src, overlay_dst, src_attr, dst_attr=None, type=None, default=None):
    # note this won't work if merge/aggregate edges

    if not dst_attr:
        dst_attr = src_attr

    for edge in overlay_src.edges():
        try:
            val = edge.get(src_attr)
            if val is None:
                val = default
        except KeyError:

            # TODO: check if because edge doesn't exist in dest, or because
            # attribute doesn't exist in graph_src

            log.debug("Unable to copy edge attribute %s for (%s, %s) in %s", src_attr, edge.src, edge.dst, overlay_src)

        else:

            # TODO: use a dtype to take an int, float, etc

            if type is float:
                val = float(val)
            elif type is int:
                val = int(val)

            try:
                overlay_dst.edge(edge).set(dst_attr, val)
            except AttributeError:
                # fail to debug - as attribute may not have been set
                log.debug("Unable to set edge attribute on %s in %s", edge, overlay_dst)
Beispiel #8
0
def remove_dirs(dirs):
    for directory in dirs:
        log.debug("Removing directory %s" % directory)
        try:
            shutil.rmtree(directory)
        except OSError, e:
            log.warning("Unable to remove %s, %s" % (directory, e))
Beispiel #9
0
def allocate_secondary_loopbacks(g_ip, address_block=None):
    if not address_block:
        address_block = netaddr.IPNetwork('172.16.0.0/24')

    secondary_loopbacks = [
        i for n in g_ip.l3devices() for i in n.loopback_interfaces()
        if not i.is_loopback_zero and i['ip'].allocate is not False
    ]

    if not len(secondary_loopbacks):
        return  # nothing to set
    log.debug('Allocating v4 Secondary Host loopback IPs')
    log.debug('Allocating v4 Secondary Host loopback IPs to %s',
              secondary_loopbacks)
    ip_tree = IpTree(address_block)

    #vrf_loopbacks = [i for i in secondary_loopbacks if i['vrf'].vrf_name]

    ip_tree.add_nodes(sorted(secondary_loopbacks))

    ip_tree.build()

    # secondary_loopback_tree = ip_tree.json()

    ip_tree.assign()
Beispiel #10
0
def compile_network(anm):
    # log.info("Creating base network model")
    nidb = create_nidb(anm)
    g_phy = anm['phy']
    # log.info("Compiling to targets")

    for target_data in config.settings['Compile Targets'].values():
        host = target_data['host']
        platform = target_data['platform']
        if platform == 'netkit':
            import autonetkit.compilers.platform.netkit as pl_netkit
            platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
        elif platform == 'VIRL':
            try:
                import autonetkit_cisco.compilers.platform.cisco as pl_cisco
                platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
            except ImportError:
                log.debug('Unable to load VIRL platform compiler')
        elif platform == 'dynagen':
            import autonetkit.compilers.platform.dynagen as pl_dynagen
            platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
        elif platform == 'junosphere':
            import autonetkit.compilers.platform.junosphere as pl_junosphere
            platform_compiler = pl_junosphere.JunosphereCompiler(
                nidb, anm, host)

        if any(g_phy.nodes(host=host, platform=platform)):
            # log.info('Compiling configurations for %s on %s'
            # % (platform, host))
            platform_compiler.compile()  # only compile if hosts set
        else:
            log.debug('No devices set for %s on %s' % (platform, host))

    return nidb
Beispiel #11
0
def send_commands(server,
                  commands,
                  message_key,
                  send_port=5559,
                  receive_port=5562):
    import zmq
    import json
    import autonetkit.log as log
    message_key = str(message_key)
    import autonetkit.ank_json as ank_json

    context = zmq.Context()
    zmq_socket = context.socket(zmq.PUSH)
    zmq_socket.connect("tcp://%s:%s" % (server, send_port))

    context = zmq.Context()
    results_receiver = context.socket(zmq.SUB)
    results_receiver.connect("tcp://%s:%s" % (server, receive_port))
    results_receiver.setsockopt(zmq.SUBSCRIBE, message_key)
    # NOTE: need to connect *before* send commands in order to capture replies

    for command in commands:
        command["message_key"] = message_key

        work_message = json.dumps(command, cls=ank_json.AnkEncoder, indent=4)
        # print "sending", work_message
        log.debug("Sending %s to %s" % (command['command'], command['host']))
        zmq_socket.send_json(work_message)

    zmq_socket.close()
Beispiel #12
0
    def _init_interfaces(self, nbunch=None):
        """Initialises interfaces"""
        if not nbunch:
            nbunch = [n for n in self._graph.nodes()]

        try:
            nbunch = list(unwrap_nodes(nbunch))
        except AttributeError:
            pass  # don't need to unwrap

        phy_graph = self._anm.overlay_nx_graphs["phy"]

        for node in nbunch:
            try:
                phy_interfaces = phy_graph.node[node]["_interfaces"]
                interface_data = {
                    'description': None,
                    'type': 'physical',
                }
                # need to do dict() to copy, otherwise all point to same memory location -> clobber
                data = dict(
                    (key, dict(interface_data)) for key in phy_interfaces)
                self._graph.node[node]['_interfaces'] = data
            except KeyError:
# no counterpart in physical graph, initialise
                log.debug("Initialise interfaces for %s in %s" % (
                    node, self._overlay_id))
                self._graph.node[node]['_interfaces'] = {0:
                                                         {'description': 'loopback',
                                                             'type': 'loopback'}}
Beispiel #13
0
def copy_edge_attr_from(overlay_src, overlay_dst, src_attr,
                        dst_attr=None, type=None, default=None):
    # note this won't work if merge/aggregate edges

    if not dst_attr:
        dst_attr = src_attr

    for edge in overlay_src.edges():
        try:
            val = edge.get(src_attr)
            if val is None:
                val = default
        except KeyError:

            # TODO: check if because edge doesn't exist in dest, or because
            # attribute doesn't exist in graph_src

            log.debug('Unable to copy edge attribute %s for (%s, %s) in %s',
                      src_attr, edge.src, edge.dst, overlay_src)

        else:

            # TODO: use a dtype to take an int, float, etc

            if type is float:
                val = float(val)
            elif type is int:
                val = int(val)

            try:
                overlay_dst.edge(edge).set(dst_attr, val)
            except AttributeError:
                # fail to debug - as attribute may not have been set
                log.debug('Unable to set edge attribute on %s in %s',
                          edge, overlay_dst)
Beispiel #14
0
def compile_network(anm):
    nidb = create_nidb(anm)
    g_phy = anm['phy']

    for target_data in config.settings['Compile Targets'].values():
        host = target_data['host']
        platform = target_data['platform']
        if platform == "netkit":
            import autonetkit.compilers.platform.netkit as pl_netkit
            platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
        elif platform == "VIRL":
            try:
                import autonetkit_cisco.compilers.platform.cisco as pl_cisco
                platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
            except ImportError:
                log.debug("Unable to load VIRL platform compiler")
        elif platform == "dynagen":
            import autonetkit.compilers.platform.dynagen as pl_dynagen
            platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
        elif platform == "junosphere":
            import autonetkit.compilers.platform.junosphere as pl_junosphere
            platform_compiler = pl_junosphere.JunosphereCompiler(
                nidb, anm, host)

        if any(g_phy.nodes(host=host, platform=platform)):
            log.info("Compiling configurations for %s on %s" %
                     (platform, host))
            platform_compiler.compile()  # only compile if hosts set
        else:
            log.debug("No devices set for %s on %s" % (platform, host))

    return nidb
Beispiel #15
0
    def default(self, obj):
        if isinstance(obj, set):
            return str(obj)
        if isinstance(obj, netaddr.IPAddress):
            return str(obj)
        if isinstance(obj, netaddr.IPNetwork):
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayNode):
            #TODO: add documentation about serializing anm nodes
            log.warning("%s is anm overlay_node. Use attribute rather than object in compiler." % obj)
            return str(obj)
        if isinstance(obj, autonetkit.plugins.ipv4.TreeNode):
            #TODO: add documentation about serializing anm nodes
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayEdge):
            #TODO: add documentation about serializing anm nodes
            log.warning("%s is anm overlay_edge. Use attribute rather than object in compiler." % obj)
            return str(obj)
        if isinstance(obj, autonetkit.nidb.nidb_node_category):
            #TODO: add documentation about serializing anm nodes
            log.debug("%s is nidb nidb_node_category. Use attribute rather than object in compiler." % obj)
            return str(obj)

        

        return json.JSONEncoder.default(self, obj)
Beispiel #16
0
def allocate(G_phy, G_bgp):
    log.info("Allocating route reflectors")
    graph_phy = G_phy._graph
    for asn, devices in G_phy.groupby("asn").items():
        routers = [d for d in devices if d.is_router]
        router_ids = ank_utils.unwrap_nodes(routers)

        subgraph_phy = graph_phy.subgraph(router_ids)
        if len(subgraph_phy) == 1:  
                continue # single node in graph, no ibgp

        betw_cen = nx.degree_centrality(subgraph_phy)

        ordered = sorted(subgraph_phy.nodes(), key = lambda x: betw_cen[x], reverse = True)

        rr_count = len(subgraph_phy)/5 # Take top 20% to be route reflectors
        route_reflectors = ordered[:rr_count] # most connected 20%
        rr_clients = ordered[rr_count:] # the other routers
        route_reflectors = list(ank_utils.wrap_nodes(G_bgp, route_reflectors))
        rr_clients = list(ank_utils.wrap_nodes(G_bgp, rr_clients))

        G_bgp.update(route_reflectors, route_reflector = True) # mark as route reflector
        # rr <-> rr
        over_links = [(rr1, rr2) for rr1 in route_reflectors for rr2 in route_reflectors if rr1 != rr2] 
        G_bgp.add_edges_from(over_links, type = 'ibgp', direction = 'over')
        # client -> rr
        up_links = [(client, rr) for (client, rr) in itertools.product(rr_clients, route_reflectors)]
        G_bgp.add_edges_from(up_links, type = 'ibgp', direction = 'up')
        # rr -> client
        down_links = [(rr, client) for (client, rr) in up_links] # opposite of up
        G_bgp.add_edges_from(down_links, type = 'ibgp', direction = 'down')

    log.debug("iBGP done")
Beispiel #17
0
def apply_design_rules(anm):
    """Applies appropriate design rules to ANM"""
    g_in = anm['input']

    build_phy(anm)
    g_phy = anm['phy']

    import autonetkit
    autonetkit.update_http(anm)
    build_l3_connectivity(anm)

    check_server_asns(anm)
    autonetkit.update_http(anm)

    build_vrf(anm) # need to do before to add loopbacks before ip allocations
    from autonetkit.design.ip import build_ip, build_ipv4,build_ipv6
    build_ip(anm) # ip infrastructure topology

#TODO: set defaults at the start, rather than inline, ie set g_in.data.address_family then use later

    address_family = g_in.data.address_family or "v4" # default is v4
#TODO: can remove the infrastructure now create g_ip seperately
    if address_family == "None":
        log.info("IP addressing disabled, disabling routing protocol configuration")
        anm['phy'].data.enable_routing = False

    if address_family == "None":
        log.info("IP addressing disabled, skipping IPv4")
        anm.add_overlay("ipv4") # create empty so rest of code follows through
        g_phy.update(g_phy, use_ipv4 = False)
    elif address_family in ("v4", "dual_stack"):
        build_ipv4(anm, infrastructure = True)
        g_phy.update(g_phy, use_ipv4 = True)
    elif address_family == "v6":
        # Allocate v4 loopbacks for router ids
        build_ipv4(anm, infrastructure = False)
        g_phy.update(g_phy, use_ipv4 = False)

    #TODO: Create a collision domain overlay for ip addressing - l2 overlay?
    if address_family == "None":
        log.info("IP addressing disabled, not allocating IPv6")
        anm.add_overlay("ipv6") # create empty so rest of code follows through
        g_phy.update(g_phy, use_ipv6 = False)
    elif address_family in ("v6", "dual_stack"):
        build_ipv6(anm)
        g_phy.update(g_phy, use_ipv6 = True)
    else:
        anm.add_overlay("ipv6") # placeholder for compiler logic

    default_igp = g_in.data.igp or "ospf"
    non_igp_nodes = [n for n in g_in if not n.igp]
#TODO: should this be modifying g_in?
    g_in.update(non_igp_nodes, igp=default_igp) # store igp onto each node

    ank_utils.copy_attr_from(g_in, g_phy, "include_csr")

    try:
        from autonetkit_cisco import build_network as cisco_build_network
    except ImportError, e:
        log.debug("Unable to load autonetkit_cisco %s" % e)
Beispiel #18
0
    def __init__(self, host = None):
        try:
            if use_rabbitmq:
                log.debug("Using Rabbitmq with server %s " % host)
                self.connection = pika.BlockingConnection(pika.ConnectionParameters(
                    host = host))
                self.channel = self.connection.channel()
                self.channel.exchange_declare(exchange='www',
                        type='direct')
                self.publish = self.publish_pika
                self.publish_compressed = self.publish_compressed_pika

            if use_http_post:
                host = config.settings['Http Post']['server']
                port = config.settings['Http Post']['port']
                self.http_url = "http://%s:%s/publish" % (host, port)
                self.publish = self.publish_http_post
                self.publish_compressed = self.publish_http_post

            if not (use_rabbitmq or use_http_post):
                log.debug("Not using Rabbitmq or telnet")
                self.publish = self.publish_blank_stub
                self.publish_compressed = self.publish_blank_stub
        except socket.timeout: #TODO: check if these should move up to the use_rabbitmq block
            log.warning("Socket Timeout: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
        except socket.error:
            log.warning("Socket Error: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
Beispiel #19
0
    def asn(self):
        """Returns ASN of this node"""
        # TODO: make a function (not property)
        # TODO: refactor, for nodes created such as virtual switches

        try:
            return self._graph.node[self.node_id]['asn']  # not in this graph
        except KeyError:

            # try from phy

            try:
                return self.anm.overlay_nx_graphs['phy'
                                                  ].node[self.node_id]['asn']
            except KeyError:
                if self.node_id not in self.anm.overlay_nx_graphs['phy'
                                                                  ]:
                    message = \
                        'Node id %s not found in physical overlay' \
                        % self.node_id
                    if self.overlay_id == 'input':

                        # don't warn, most likely node not copied across

                        log.debug(message)
                    else:
                        log.debug(message)
                    return
Beispiel #20
0
    def __init__(self, host=None):
        try:
            if use_rabbitmq:
                log.debug("Using Rabbitmq with server %s " % host)
                self.connection = pika.BlockingConnection(
                    pika.ConnectionParameters(host=host))
                self.channel = self.connection.channel()
                self.channel.exchange_declare(exchange='www', type='direct')
                self.publish = self.publish_pika
                self.publish_compressed = self.publish_compressed_pika

            if use_http_post:
                host = config.settings['Http Post']['server']
                port = config.settings['Http Post']['port']
                self.http_url = "http://%s:%s/publish" % (host, port)
                self.publish = self.publish_http_post
                self.publish_compressed = self.publish_http_post

            if not (use_rabbitmq or use_http_post):
                log.debug("Not using Rabbitmq or telnet")
                self.publish = self.publish_blank_stub
                self.publish_compressed = self.publish_blank_stub
        except socket.timeout:  #TODO: check if these should move up to the use_rabbitmq block
            log.warning("Socket Timeout: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
        except socket.error:
            log.warning("Socket Error: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
Beispiel #21
0
def aggregate_nodes(nm_graph, nodes, retain=None):
    """Combines connected into a single node"""
    if retain is None:
        retain = []

    try:
        retain.lower()
        retain = [retain]  # was a string, put into list
    except AttributeError:
        pass  # already a list

    nodes = list(unwrap_nodes(nodes))
    graph = unwrap_graph(nm_graph)
    subgraph = graph.subgraph(nodes)
    if not len(subgraph.edges()):

        # print "Nothing to aggregate for %s: no edges in subgraph"

        pass
    total_added_edges = []
    if graph.is_directed():
        component_nodes_list = nx.strongly_connected_components(subgraph)
    else:
        component_nodes_list = nx.connected_components(subgraph)
    for component_nodes in component_nodes_list:
        if len(component_nodes) > 1:
            component_nodes = [nm_graph.node(n) for n in component_nodes]

            # TODO: could choose most connected, or most central?
            # TODO: refactor so use nodes_to_remove

            nodes_to_remove = list(component_nodes)
            base = nodes_to_remove.pop()  # choose a base device to retain
            log.debug("Retaining %s, removing %s", base, nodes_to_remove)

            external_edges = []
            for node in nodes_to_remove:
                external_edges += [e for e in node.edges() if e.dst not in component_nodes]
                # all edges out of component

            log.debug("External edges %s", external_edges)
            edges_to_add = []
            for edge in external_edges:
                dst = edge.dst
                data = dict((key, edge._data.get(key)) for key in retain)
                ports = edge.raw_interfaces
                dst_int_id = ports[dst.node_id]

                # TODO: bind to (and maybe add) port on the new switch?

                data["_ports"] = {dst.node_id: dst_int_id}

                append = (base.node_id, dst.node_id, data)
                edges_to_add.append(append)

            nm_graph.add_edges_from(edges_to_add)
            total_added_edges += edges_to_add
            nm_graph.remove_nodes_from(nodes_to_remove)

    return wrap_edges(nm_graph, total_added_edges)
Beispiel #22
0
def update_http(
    anm=None,
    nidb=None,
    http_url=None,
    uuid=None,
    ):
    if http_url is None:
        http_url = default_http_url

    if anm and nidb:
        body = autonetkit.ank_json.dumps(anm, nidb)
    elif anm:
        body = autonetkit.ank_json.dumps(anm)
    else:
        import json
        body = json.dumps({})  # blank to test visualisation server running

    if uuid is None:
        uuid = get_uuid(anm)

    params = urllib.urlencode({'body': body, 'type': 'anm',
                              'uuid': uuid})
    try:
        data = urllib.urlopen(http_url, params).read()
        log.debug(data)
    except IOError, e:
        log.info('Unable to connect to visualisation server %s'
                 % http_url)
        return
Beispiel #23
0
    def nailed_up_routes(self, node):
        log.debug('Configuring nailed up routes')
        phy_node = self.anm['phy'].node(node)

        if node.is_ebgp_v4 and node.ip.use_ipv4:
            infra_blocks = self.anm['ipv4'].data['infra_blocks'
                                                 ].get(phy_node.asn) or []
            for infra_route in infra_blocks:
                stanza = ConfigStanza(
                    prefix=str(infra_route.network),
                    netmask=str(infra_route.netmask),
                    nexthop="Null0",
                    metric=254,
                )
                node.ipv4_static_routes.append(stanza)

        if node.is_ebgp_v6 and node.ip.use_ipv6:
            infra_blocks = self.anm['ipv6'].data['infra_blocks'
                                                 ].get(phy_node.asn) or []
            # TODO: setup schema with defaults
            for infra_route in infra_blocks:
                stanza = ConfigStanza(
                    prefix=str(infra_route),
                    nexthop="Null0",
                    metric=254,
                )
                node.ipv6_static_routes.append(stanza)
Beispiel #24
0
    def asn(self):
        """Returns ASN of this node"""
        # TODO: make a function (not property)

        try:
            return self._graph.node[self.node_id]['asn']  # not in this graph
        except KeyError:

            # try from phy

            try:
                return self.anm.overlay_nx_graphs['phy'].node[
                    self.node_id]['asn']
            except KeyError:
                if self.node_id not in self.anm.overlay_nx_graphs['phy']:
                    message = \
                        'Node id %s not found in physical overlay' \
                        % self.node_id
                    if self.overlay_id == 'input':

                        # don't warn, most likely node not copied across

                        log.debug(message)
                    else:
                        log.warning(message)
                    return
Beispiel #25
0
    def _init_interfaces(self, nbunch=None):
        """Initialises interfaces"""
        if not nbunch:
            nbunch = [n for n in self._graph.nodes()]

        try:
            nbunch = list(unwrap_nodes(nbunch))
        except AttributeError:
            pass  # don't need to unwrap

        phy_graph = self._anm.overlay_nx_graphs["phy"]

        for node in nbunch:
            try:
                phy_interfaces = phy_graph.node[node]["_interfaces"]
                interface_data = {
                    'description': None,
                    'type': 'physical',
                }
                # need to do dict() to copy, otherwise all point to same memory location -> clobber
                data = dict(
                    (key, dict(interface_data)) for key in phy_interfaces)
                self._graph.node[node]['_interfaces'] = data
            except KeyError:
# no counterpart in physical graph, initialise
                log.debug("Initialise interfaces for %s in %s" % (
                    node, self._overlay_id))
                self._graph.node[node]['_interfaces'] = {0:
                        {'description': 'loopback',
                            'type': 'loopback'}}
Beispiel #26
0
 def restore(self, pickle_file):
     import gzip
     log.debug("Restoring %s" % pickle_file)
     with gzip.open(pickle_file, "r") as fh:
         #data = json.load(fh)
         data = fh.read()
         self._graph = ank_json.ank_json_loads(data)
Beispiel #27
0
def remove_parallel_switch_links(anm):
    g_phy = anm['phy']
    subs = ank_utils.connected_subgraphs(g_phy, g_phy.switches())
    for component in subs:
        log.debug("Checking for multiple links to switch cluster %s" %
                  str(sorted(component)))

        # Collect all links into this cluster
        external_edges = []
        for switch in component:
            for edge in switch.edges():
                if edge.dst not in component:
                    external_edges.append(edge)

        # Group by the node they link to
        from collections import defaultdict
        check_dict = defaultdict(list)
        for edge in external_edges:
            check_dict[edge.dst].append(edge)

        # Check to see if any nodes have more than one link into this aggregate
        for dst, edges in check_dict.items():
            if len(edges) > 1:
                edges_to_remove = sorted(edges)[1:]  # remove all but first
                interfaces = ", ".join(
                    sorted(str(edge.dst_int['phy']) for edge in edges))
                interfaces_to_disconnect = ", ".join(
                    sorted(
                        str(edge.dst_int['phy']) for edge in edges_to_remove))
                dst.log.warning(
                    "Multiple edges exist to same switch cluster: %s (%s). Removing edges from interfaces %s"
                    % (str(sorted(component)), interfaces,
                       interfaces_to_disconnect))

                g_phy.remove_edges_from(edges_to_remove)
Beispiel #28
0
def mpls_te(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_l3 = anm['layer3']

    # add regardless, so allows quick check of node in anm['mpls_te'] in compilers

    g_mpls_te = anm.add_overlay('mpls_te')
    if not any(True for n in g_in.routers() if n.mpls_te_enabled):
        log.debug('No nodes with mpls_te_enabled set')
        return

    # te head end set if here

    g_mpls_te.add_nodes_from(g_in.routers())

    # build up edge list sequentially, to provide meaningful messages for multipoint links

    multipoint_edges = [e for e in g_l3.edges() if e.multipoint]
    if len(multipoint_edges):
        log.info('Excluding multi-point edges from MPLS TE topology: %s' %
                 ', '.join(str(e) for e in multipoint_edges))

    edges_to_add = set(g_l3.edges()) - set(multipoint_edges)
    g_mpls_te.add_edges_from(edges_to_add)
Beispiel #29
0
def manage_network(input_graph_string, timestamp,
    build_options, reload_build=False, grid=None):
    """Build, compile, render network as appropriate"""
    # import build_network_simple as build_network
    import autonetkit.build_network as build_network
    if reload_build:
# remap?
        build_network = reload(build_network)

    if build_options['build']:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        anm = build_network.build(graph)
        if not build_options['compile']:
            update_vis(anm)

        if build_options['validate']:
            import autonetkit.ank_validate
            try:
                autonetkit.ank_validate.validate(anm)
            except Exception, e:
                log.warning("Unable to validate topologies: %s" % e)
                log.debug("Unable to validate topologies", exc_info=True)
Beispiel #30
0
    def allocate_input_interfaces(self):
        """allocates edges to interfaces"""
        #TODO: move this to ank utils? or extra step in the anm?
        if self._overlay_id != "input":
            log.debug("Tried to allocate interfaces to %s" % overlay_id)
            return

        if all(len(node['input'].raw_interfaces) > 0 for node in self) \
            and all(len(edge['input'].raw_interfaces) > 0 for edge in
                    self.edges()):
            log.debug("Input interfaces allocated")
            return  # interfaces allocated
        else:
            log.info('Automatically assigning input interfaces')

        # Initialise loopback zero on node
        for node in self:
            node.raw_interfaces = {
                0: {
                    'description': 'loopback',
                    'category': 'loopback'
                }
            }

        ebunch = sorted(self.edges())
        for edge in ebunch:
            src = edge.src
            dst = edge.dst
            src_int_id = src._add_interface('%s to %s' %
                                            (src.label, dst.label))
            dst_int_id = dst._add_interface('%s to %s' %
                                            (dst.label, src.label))
            edge.raw_interfaces = {src.id: src_int_id, dst.id: dst_int_id}
Beispiel #31
0
def remove_dirs(dirs):
    for directory in dirs:
        log.debug("Removing directory %s" % directory)
        try:
            shutil.rmtree(directory)
        except OSError, e:
            log.warning("Unable to remove %s, %s" % (directory, e))
Beispiel #32
0
def build_rip(anm):
    """Build rip overlay"""
    g_in = anm['input']
    g_l3 = anm['layer3']
    g_rip = anm.add_overlay("rip")
    g_phy = anm['phy']

    if not anm['phy'].data.enable_routing:
        g_rip.log.info("Routing disabled, not configuring rip")
        return

    if not any(n.igp == "rip" for n in g_phy):
        log.debug("No rip nodes")
        return
    rip_nodes = [n for n in g_l3 if n['phy'].igp == "rip"]
    g_rip.add_nodes_from(rip_nodes)
    g_rip.add_edges_from(g_l3.edges(), warn=False)
    ank_utils.copy_int_attr_from(g_l3, g_rip, "multipoint")

    ank_utils.copy_attr_from(
        g_in, g_rip, "custom_config_rip", dst_attr="custom_config")

    g_rip.remove_edges_from(
        [link for link in g_rip.edges() if link.src.asn != link.dst.asn])

    for node in g_rip:
        node.process_id = node.asn

    for link in g_rip.edges():
        link.metric = 1  # default

    for edge in g_rip.edges():
        for interface in edge.interfaces():
            interface.metric = edge.metric
            interface.multipoint = edge.multipoint
Beispiel #33
0
def manage_network(input_graph_string,
                   timestamp,
                   build_options,
                   reload_build=False,
                   grid=None):
    """Build, compile, render network as appropriate"""
    # import build_network_simple as build_network
    import autonetkit.build_network as build_network
    if reload_build:
        # remap?
        build_network = reload(build_network)

    if build_options['build']:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        anm = build_network.build(graph)
        if not build_options['compile']:
            update_vis(anm)

        if build_options['validate']:
            import autonetkit.ank_validate
            try:
                autonetkit.ank_validate.validate(anm)
            except Exception, e:
                log.warning("Unable to validate topologies: %s" % e)
                log.debug("Unable to validate topologies", exc_info=True)
Beispiel #34
0
def build_ip(anm):
    g_ip = anm.add_overlay('ip')
    g_l2_bc = anm['layer2_bc']
    g_phy = anm['phy']
    # Retain arbitrary ASN allocation for IP addressing
    g_ip.add_nodes_from(g_l2_bc, retain=["asn", "broadcast_domain"])
    g_ip.add_edges_from(g_l2_bc.edges())

    #TODO:
    for bc in g_ip.nodes("broadcast_domain"):
        bc.allocate = True

    for bc in g_ip.nodes("broadcast_domain"):
        # Encapsulated if any neighbor interface has
        for edge in bc.edges():
            if edge.dst_int['phy'].l2_encapsulated:
                log.debug("Removing IP allocation for broadcast_domain %s "
                         "as neighbor %s is L2 encapsulated", bc, edge.dst)

                #g_ip.remove_node(bc)
                bc.allocate = False

                # and mark on connected interfaces
                for neigh_int in bc.neighbor_interfaces():
                    neigh_int.allocate = False

                break


    # copy over skipped loopbacks
    #TODO: check if loopbck copy attr
    for node in g_ip.l3devices():
        for interface in node.loopback_interfaces():
            if interface['phy'].allocate is not None:
                interface['ip'].allocate = interface['phy'].allocate
Beispiel #35
0
def expand(G_in):
    """ Expands out graph products. G is the source "backbone" graph. H_x is the "PoP template" graphs
    """
    graph_unwrapped = ank_utils.unwrap_graph(G_in)
    G = graph_unwrapped.copy()

    ank.set_node_default(G_in, G_in)

    template_names = set(node.pop_template for node in G_in)
    template_names.discard("None")
    template_names.discard(None)
    if not len(template_names):
        log.debug("No PoP templates set")
        return  # no templates set

# Load these templates
    templates = {}
    for template in template_names:
        template_filename = os.path.join("pop_templates",
                                         "%s.graphml" % template)
        try:
            pop_graph = autonetkit.load.graphml.load_graphml(
                template_filename
            )  #TODO: pass in properties eg edge type = physical
        except Exception, e:
            log.warning("Unable to load pop template %s: %s" % (template, e))
            return
        pop_graph = pop_graph.to_undirected(
        )  # Undirected for now TODO: document this
        templates[template] = pop_graph
Beispiel #36
0
def assign_loopback_ip_pool(anm):
    g_in = anm['input']
    g_ipv4 = anm['ipv4']
    pool_provided = False
    if g_in.data.ignite is not None:
        pool = g_in.data.ignite
        if 'loopback_subnet' in pool:
            pool_provided =True
            loopback_pool_id = pool['loopback_subnet']
    if pool_provided==False:
        log.debug("loopback Pool not provided")
        return
    for l3_device in g_ipv4.l3devices():
        #call pool func:Will be completed when pool func is written
        #log an error if something fails
        loopback_ip = allocate_pool_entry(loopback_pool_id, l3_device.name, None)
        pos_mask = loopback_ip.find('/')
        if pos_mask != -1:
            network = loopback_ip[:pos_mask]
            mask = int(loopback_ip[pos_mask+1:])
        else:
            network = loopback_ip[:pos_mask]
            mask = 32

        l3_device.loopback = network
       # l3_device.loopback_prefix = mask
        #    interface.ipv4_prefixlen = mask

    log.debug("Allocated IP's from loopback Pool")
Beispiel #37
0
def extract_ipv6_blocks(anm):

    # TODO: set all these blocks globally in config file, rather than repeated
    # in load, build_network, compile, etc

    from autonetkit.ank import sn_preflen_to_network
    from netaddr import IPNetwork
    g_in = anm['input']

    ipv6_defaults = SETTINGS["IP Addressing"]["v6"]

    try:
        infra_subnet = g_in.data.ipv6_infra_subnet
        infra_prefix = g_in.data.ipv6_infra_prefix
        infra_block = sn_preflen_to_network(infra_subnet, infra_prefix)
    except Exception, error:
        infra_block = IPNetwork(
            '%s/%s' %
            (ipv6_defaults["infra_subnet"], ipv6_defaults["infra_prefix"]))
        if infra_subnet is None or infra_prefix is None:
            log.debug('Using default IPv6 infra_subnet %s', infra_block)
        else:
            log.warning(
                'Unable to obtain IPv6 infra_subnet from input graph: %s, using default %s'
                % (error, infra_block))
Beispiel #38
0
def update_http(anm = None, nidb = None, http_url = None):
    if http_url is None:
        http_url = default_http_url

    if anm and nidb:
        body = autonetkit.ank_json.dumps(anm, nidb)
    elif anm:
        body = autonetkit.ank_json.dumps(anm)
    else:
        import json
        body = json.dumps({}) # blank to test visualisation server running

    uuid = get_uuid(anm)

    params = urllib.urlencode({
        'body': body,
        'type': 'anm',
        'uuid': uuid,
        })
    try:
        data = urllib.urlopen(http_url, params).read()
        log.debug(data)
    except IOError, e:
        log.info("Unable to connect to visualisation server %s" % http_url)
        return
Beispiel #39
0
def assign_interface_ip_pool(anm):
    g_in = anm["input"]

    pool_provided = False
    if g_in.data.ignite is not None:
        pool = g_in.data.ignite
        if "infra_subnet" in pool:
            pool_provided = True
            infra_pool_id = pool["infra_subnet"]
    if pool_provided == False:
        log.debug("Infra Pool not provided")
        return

    l3_devices = [d for d in g_in if d.device_type in ("router", "server")]
    for device in l3_devices:
        physical_interfaces = list(device.edge_interfaces())
        for interface in physical_interfaces:
            # call pool func:Will be completed when pool func is written
            # log an error if something fails
            infra_ip = allocate_pool_entry(infra_pool_id, device.name, None)
            # print infra_ip, infra_pool_id, device.name
            pos_mask = infra_ip.find("/")
            if pos_mask != -1:
                network = infra_ip[:pos_mask]
                mask = int(infra_ip[pos_mask + 1 :])
            else:
                network = infra_ip[:pos_mask]
                mask = 32

            interface.ipv4_address = network
            interface.ipv4_prefixlen = mask

    log.debug("Allocated IP's from Infra Pool")
Beispiel #40
0
def build_ebgp(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_ebgp = anm.add_overlay("ebgp", directed=True)
    g_ebgp.add_nodes_from(g_in.nodes("is_router"))
    ebgp_edges = [e for e in g_in.edges() if not e.attr_equal("asn")]
    g_ebgp.add_edges_from(ebgp_edges, bidirectional=True, type='ebgp')

    ebgp_switches = [
        n for n in g_in.nodes("is_switch")
        if not ank_utils.neigh_equal(g_phy, n, "asn")
    ]
    g_ebgp.add_nodes_from(ebgp_switches, retain=['asn'])
    log.debug("eBGP switches are %s" % ebgp_switches)
    g_ebgp.add_edges_from(
        (e for e in g_in.edges()
         if e.src in ebgp_switches or e.dst in ebgp_switches),
        bidirectional=True,
        type='ebgp')
    ank_utils.aggregate_nodes(g_ebgp, ebgp_switches, retain="edge_id")
    ebgp_switches = list(g_ebgp.nodes(
        "is_switch"))  # need to recalculate as may have aggregated
    log.debug("aggregated eBGP switches are %s" % ebgp_switches)
    exploded_edges = ank_utils.explode_nodes(g_ebgp,
                                             ebgp_switches,
                                             retain="edge_id")
    for edge in exploded_edges:
        edge.multipoint = True
Beispiel #41
0
def assign_interface_ip_pool(anm):
    g_in = anm['input']

    pool_provided = False
    if g_in.data.ignite is not None:
        pool = g_in.data.ignite
        if 'infra_subnet' in pool:
            pool_provided =True
            infra_pool_id = pool['infra_subnet']
    if pool_provided==False:
        log.debug("Infra Pool not provided")
        return

    l3_devices = [d for d in g_in if d.device_type in ('router', 'server')]
    for device in l3_devices:
        physical_interfaces = list(device.edge_interfaces())
        for interface in physical_interfaces:
            #call pool func:Will be completed when pool func is written
            #log an error if something fails
	    infra_ip = allocate_pool_entry(infra_pool_id, device.name, None)
	    #print infra_ip, infra_pool_id, device.name
	    pos_mask = infra_ip.find('/')
            if pos_mask != -1:
                network = infra_ip[:pos_mask]
                mask = int(infra_ip[pos_mask+1:])
            else:
                network = infra_ip[:pos_mask]
                mask = 32

            interface.ipv4_address = network
            interface.ipv4_prefixlen = mask

    log.debug("Allocated IP's from Infra Pool")
Beispiel #42
0
    def default(self, obj):
        if isinstance(obj, set):
            return str(obj)
        if isinstance(obj, netaddr.IPAddress):
            return str(obj)
        if isinstance(obj, netaddr.IPNetwork):
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayNode):
            #TODO: add documentation about serializing anm nodes
            log.warning(
                "%s is anm overlay_node. Use attribute rather than object in compiler."
                % obj)
            return str(obj)
        if isinstance(obj, autonetkit.plugins.ipv4.TreeNode):
            #TODO: add documentation about serializing anm nodes
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayEdge):
            #TODO: add documentation about serializing anm nodes
            log.warning(
                "%s is anm overlay_edge. Use attribute rather than object in compiler."
                % obj)
            return str(obj)
        if isinstance(obj, autonetkit.nidb.nidb_node_category):
            #TODO: add documentation about serializing anm nodes
            log.debug(
                "%s is nidb nidb_node_category. Use attribute rather than object in compiler."
                % obj)
            return str(obj)
        if isinstance(obj, nx.classes.Graph):
            return json_graph.node_link_data(obj)

        return json.JSONEncoder.default(self, obj)
Beispiel #43
0
def compile_network(anm):
    nidb = create_nidb(anm)
    g_phy = anm['phy']

    for target_data in config.settings['Compile Targets'].values():
        host = target_data['host']
        platform = target_data['platform']
        if platform == "netkit":
            import autonetkit.compilers.platform.netkit as pl_netkit
            platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
        elif platform == "VIRL":
            try:
                import autonetkit_cisco.compilers.platform.cisco as pl_cisco
                platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
            except ImportError:
                log.debug("Unable to load VIRL platform compiler")
        elif platform == "dynagen":
            import autonetkit.compilers.platform.dynagen as pl_dynagen
            platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
        elif platform == "junosphere":
            import autonetkit.compilers.platform.junosphere as pl_junosphere
            platform_compiler = pl_junosphere.JunosphereCompiler(
                nidb, anm, host)

        if any(g_phy.nodes(host=host, platform=platform)):
            log.info("Compiling configurations for %s on %s" % (platform, host))
            platform_compiler.compile()  # only compile if hosts set
        else:
            log.debug("No devices set for %s on %s" % (platform, host))

    return nidb
Beispiel #44
0
 def _ports(self):
     """Returns underlying interface dict"""
     try:
         return self._graph.node[self.node_id]["_ports"]
     except KeyError:
         log.debug("No interfaces initialised for %s" % self)
         return
Beispiel #45
0
def transfer(host, username, local, remote=None, key_filename=None):
    log.debug('Transferring lab to %s' % host)
    log.info('Transferring Netkit lab')
    if not remote:
        remote = local  # same filename
    import paramiko

    # import logging
    # logging.getLogger("paramiko").setLevel(logging.DEBUG)

    ssh = paramiko.SSHClient()

    # ssh.set_log_channel("ANK")

    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    if key_filename:
        log.debug('Connecting to %s with %s and key %s' % (host,
                  username, key_filename))
        ssh.connect(host, username=username, key_filename=key_filename)
    else:
        log.info('Connecting to %s with %s' % (host, username))
        ssh.connect(host, username=username)
    log.info('Opening SSH for SFTP')
    ftp = ssh.open_sftp()
    log.info('Putting file %s tspoto %s' % (local, remote))
    ftp.put(local, remote)
    log.info('Put file %s to %s' % (local, remote))
    ftp.close()
Beispiel #46
0
def load(input_graph_string, defaults=True):

    # TODO: look at XML header for file type
    import autonetkit.load.graphml as graphml
    import autonetkit.load.load_json as load_json
    try:
        input_graph = graphml.load_graphml(input_graph_string,
                                           defaults=defaults)
    except autonetkit.exception.AnkIncorrectFileFormat:
        try:
            input_graph = load_json.load_json(input_graph_string,
                                              defaults=defaults)
        except (ValueError, autonetkit.exception.AnkIncorrectFileFormat):
            # try a different reader
            try:
                from autonetkit_cisco import load as cisco_load
            except ImportError, error:
                log.debug("Unable to load autonetkit_cisco %s", error)
                return  # module not present (development module)
            else:
                input_graph = cisco_load.load(input_graph_string)
                # add local deployment host
                SETTINGS['General']['deploy'] = True
                SETTINGS['Deploy Hosts']['internal'] = {
                    'VIRL': {
                        'deploy': True,
                    },
                }
Beispiel #47
0
def assign_loopback_ip_pool(anm):
    g_in = anm["input"]
    g_ipv4 = anm["ipv4"]
    pool_provided = False
    if g_in.data.ignite is not None:
        pool = g_in.data.ignite
        if "loopback_subnet" in pool:
            pool_provided = True
            loopback_pool_id = pool["loopback_subnet"]
    if pool_provided == False:
        log.debug("loopback Pool not provided")
        return
    for l3_device in g_ipv4.l3devices():
        # call pool func:Will be completed when pool func is written
        # log an error if something fails
        loopback_ip = allocate_pool_entry(loopback_pool_id, l3_device.name, None)
        pos_mask = loopback_ip.find("/")
        if pos_mask != -1:
            network = loopback_ip[:pos_mask]
            mask = int(loopback_ip[pos_mask + 1 :])
        else:
            network = loopback_ip[:pos_mask]
            mask = 32

        l3_device.loopback = network
    # l3_device.loopback_prefix = mask
    #    interface.ipv4_prefixlen = mask

    log.debug("Allocated IP's from loopback Pool")
Beispiel #48
0
def build_ebgp(anm):
    g_in = anm['input']
    g_phy = anm['phy']
    g_ebgp = anm.add_overlay("ebgp", directed=True)
    g_ebgp.add_nodes_from(g_in.nodes("is_router"))
    ebgp_edges = [e for e in g_in.edges() if not e.attr_equal("asn")]
    g_ebgp.add_edges_from(ebgp_edges, bidirectional=True, type='ebgp')

    ebgp_switches = [n for n in g_in.nodes("is_switch")
            if not ank_utils.neigh_equal(g_phy, n, "asn")]
    g_ebgp.add_nodes_from(ebgp_switches, retain=['asn'])
    log.debug("eBGP switches are %s" % ebgp_switches)
    g_ebgp.add_edges_from((e for e in g_in.edges()
            if e.src in ebgp_switches or e.dst in ebgp_switches),
    bidirectional=True, type='ebgp')
    ank_utils.aggregate_nodes(g_ebgp, ebgp_switches, retain="edge_id")
    # need to recalculate as may have aggregated
    ebgp_switches = list(g_ebgp.nodes("is_switch"))
    log.debug("aggregated eBGP switches are %s" % ebgp_switches)
    exploded_edges = ank_utils.explode_nodes(g_ebgp, ebgp_switches,
            retain="edge_id")
    same_asn_edges = []
    for edge in exploded_edges:
        if edge.src.asn == edge.dst.asn:
            same_asn_edges.append(edge)
        else:
            edge.multipoint = True
    """TODO: remove up to here once compiler updated"""

    g_ebgp.remove_edges_from(same_asn_edges)
Beispiel #49
0
def transfer(host, username, local, remote=None, key_filename=None):
    log.debug('Transferring lab to %s' % host)
    log.info('Transferring Netkit lab')
    if not remote:
        remote = local  # same filename
    import paramiko

    # import logging
    # logging.getLogger("paramiko").setLevel(logging.DEBUG)

    ssh = paramiko.SSHClient()

    # ssh.set_log_channel("ANK")

    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    if key_filename:
        log.debug('Connecting to %s with %s and key %s' %
                  (host, username, key_filename))
        ssh.connect(host, username=username, key_filename=key_filename)
    else:
        log.info('Connecting to %s with %s' % (host, username))
        ssh.connect(host, username=username)
    log.info('Opening SSH for SFTP')
    ftp = ssh.open_sftp()
    log.info('Putting file %s tspoto %s' % (local, remote))
    ftp.put(local, remote)
    log.info('Put file %s to %s' % (local, remote))
    ftp.close()
Beispiel #50
0
    def allocate_input_interfaces(self):
        """allocates edges to interfaces"""
        # TODO: move this to ank utils? or extra step in the anm?
        if self._overlay_id != "input":
            log.debug("Tried to allocate interfaces to %s" % overlay_id)
            return

        if all(len(node['input'].raw_interfaces) > 0 for node in self) \
            and all(len(edge['input'].raw_interfaces) > 0 for edge in
                    self.edges()):
            log.debug("Input interfaces allocated")
            return  # interfaces allocated
        else:
            log.info('Automatically assigning input interfaces')

        # Initialise loopback zero on node
        for node in self:
            node.raw_interfaces = {0:
                                   {'description': 'loopback', 'category': 'loopback'}}

        ebunch = sorted(self.edges())
        for edge in ebunch:
            src = edge.src
            dst = edge.dst
            src_int_id = src._add_interface('%s to %s' % (src.label,
                                                          dst.label))
            dst_int_id = dst._add_interface('%s to %s' % (dst.label,
                                                          src.label))
            edge.raw_interfaces = {
                src.id: src_int_id,
                dst.id: dst_int_id}
Beispiel #51
0
def load(input_graph_string):
    # TODO: look at XML header for file type
    import autonetkit.load.graphml as graphml
    import autonetkit.load.load_json as load_json
    try:
        input_graph = graphml.load_graphml(input_graph_string)
    except autonetkit.exception.AnkIncorrectFileFormat:
        try:
            input_graph = load_json.load_json(input_graph_string)
        except (ValueError, autonetkit.exception.AnkIncorrectFileFormat):
            # try a different reader
            try:
                from autonetkit_cisco import load as cisco_load
            except ImportError, error:
                log.debug("Unable to load autonetkit_cisco %s", error)
                return  # module not present (development module)
            else:
                input_graph = cisco_load.load(input_graph_string)
                # add local deployment host
                SETTINGS['General']['deploy'] = True
                SETTINGS['Deploy Hosts']['internal'] = {
                    'VIRL': {
                        'deploy': True,
                    },
                }
Beispiel #52
0
def explode_nodes(overlay_graph, nodes, retain = []):
    """Explodes all nodes in nodes
    TODO: explain better
    TODO: Add support for digraph - check if overlay_graph.is_directed()
    """
    log.debug("Exploding nodes")
    try:
        retain.lower()
        retain = [retain] # was a string, put into list
    except AttributeError:
        pass # already a list

    graph = unwrap_graph(overlay_graph)
    nodes = unwrap_nodes(nodes)
    added_edges = []
#TODO: need to keep track of edge_ids here also?
    nodes = list(nodes)
    for node in nodes:
        log.debug("Exploding from %s" % node)
        neighbors = graph.neighbors(node)
        neigh_edge_pairs = ( (s,t) for s in neighbors for t in neighbors if s != t)
        edges_to_add = []
        for (src, dst) in neigh_edge_pairs:
            src_to_node_data = dict( (key, graph[src][node][key]) for key in retain)
            node_to_dst_data = dict( (key, graph[node][dst][key]) for key in retain)
            src_to_node_data.update(node_to_dst_data)
            edges_to_add.append((src, dst, src_to_node_data))

        graph.add_edges_from(edges_to_add)
        added_edges.append(edges_to_add)

        graph.remove_node(node)

    return wrap_edges(overlay_graph, added_edges)
Beispiel #53
0
 def _interfaces(self):
     """Returns underlying interface dict"""
     try:
         return self._graph.node[self.node_id]["_interfaces"]
     except KeyError:
         log.debug("No interfaces initialised for %s" % self)
         return []
Beispiel #54
0
def expand(G_in):
    """ Expands out graph products. G is the source "backbone" graph. H_x is the "PoP template" graphs
    """
    graph_unwrapped = ank_utils.unwrap_graph(G_in)
    G = graph_unwrapped.copy()

    ank.set_node_default(G_in, G_in)

    template_names = set(node.pop_template for node in G_in)
    template_names.discard("None")
    template_names.discard(None)
    if not len(template_names):
        log.debug("No PoP templates set")
        return # no templates set

# Load these templates
    templates = {}
    for template in template_names:
        template_filename = os.path.join("pop_templates", "%s.graphml" % template)
        try:
            pop_graph = autonetkit.load.graphml.load_graphml(template_filename) #TODO: pass in properties eg edge type = physical
        except Exception, e:
            log.warning("Unable to load pop template %s: %s" % (template, e))
            return
        pop_graph = pop_graph.to_undirected() # Undirected for now TODO: document this
        templates[template] = pop_graph
Beispiel #55
0
def copy_attr_from(overlay_src, overlay_dst, src_attr, dst_attr = None, nbunch = None, type = None, default = None):
    #TODO: add dest format, eg to convert to int
    if not dst_attr:
        dst_attr = src_attr

    graph_src = unwrap_graph(overlay_src)
    graph_dst = unwrap_graph(overlay_dst)
    if not nbunch:
        nbunch = graph_src.nodes()

    for n in nbunch:
        try:
            val = graph_src.node[n].get(src_attr, default)
        except KeyError:
            #TODO: check if because node doesn't exist in dest, or because attribute doesn't exist in graph_src
            log.debug("Unable to copy node attribute %s for %s in %s" % (src_attr, n, overlay_src))
        else:
            #TODO: use a dtype to take an int, float, etc
            if type is float:
                val = float(val)
            elif type is int:
                val = int(val)

            if n in graph_dst:
                graph_dst.node[n][dst_attr] = val
Beispiel #56
0
 def restore(self, pickle_file):
     import gzip
     log.debug("Restoring %s" % pickle_file)
     with gzip.open(pickle_file, "r") as fh:
         #data = json.load(fh)
         data = fh.read()
         self._graph = ank_json.ank_json_loads(data)
Beispiel #57
0
def allocate(G_phy, G_bgp):
    log.info("Allocating route reflectors")
    graph_phy = G_phy._graph
    for asn, devices in G_phy.groupby("asn").items():
        routers = [d for d in devices if d.is_router]
        router_ids = list(ank_utils.unwrap_nodes(routers))
        mapping_id_to_device = dict(zip(
            router_ids, routers))  # to reverse lookup id back to device

        subgraph_phy = graph_phy.subgraph(router_ids)
        if len(subgraph_phy) == 1:
            continue  # single node in graph, no ibgp

        betw_cen = nx.degree_centrality(subgraph_phy)

        ordered = sorted(subgraph_phy.nodes(),
                         key=lambda x: betw_cen[x],
                         reverse=True)

        rr_count = len(
            subgraph_phy) / 4 or 1  # Take top 20% to be route reflectors
        route_reflectors = ordered[:rr_count]  # most connected x%
        log.debug("Chose route_reflectors %s" % route_reflectors)
        rr_clients = ordered[rr_count:]  # the other routers
        route_reflectors = list(ank_utils.wrap_nodes(G_bgp, route_reflectors))
        rr_clients = list(ank_utils.wrap_nodes(G_bgp, rr_clients))

        # distances (shortest path, physical graph) from rrs to clients
        path_lengths = {}
        for rr in route_reflectors:
            path = nx.single_source_shortest_path_length(subgraph_phy, rr)
            path_mapped = dict(
                (mapping_id_to_device[id], length)
                for (id, length) in path.items())  # ids to devices
            path_lengths[rr] = path_mapped

        G_bgp.update(route_reflectors,
                     route_reflector=True)  # mark as route reflector
        # rr <-> rr
        over_links = [(rr1, rr2) for rr1 in route_reflectors
                      for rr2 in route_reflectors if rr1 != rr2]
        G_bgp.add_edges_from(over_links, type='ibgp', direction='over')

        for client in rr_clients:
            ranked_rrs = sorted(route_reflectors,
                                key=lambda rr: path_lengths[rr][client])
            parent_count = 2  # number of parents to connect to for each rr client
            parent_rrs = ranked_rrs[:parent_count]
            log.info("Route reflectors for %s are %s " % (client, parent_rrs))

            for parent in parent_rrs:
                # client -> rr
                #up_links = [(client, rr) for (client, rr) in itertools.product(rr_clients, route_reflectors)]
                G_bgp.add_edge(client, parent, type='ibgp', direction='up')
                # rr -> client
                #down_links = [(rr, client) for (client, rr) in up_links] # opposite of up
                G_bgp.add_edge(parent, client, type='ibgp', direction='down')

    log.debug("iBGP done")
Beispiel #58
0
def deploy_network(anm, nidb, input_graph_string=None):

    # TODO: make this driven from config file
    log.info("Deploying Network")

    # TODO: pick up platform, host, filenames from nidb (as set in there)
    deploy_hosts = config.settings['Deploy Hosts']
    for hostname, host_data in deploy_hosts.items():
        for platform, platform_data in host_data.items():
            if not any(nidb.nodes(host=hostname, platform=platform)):
                log.debug(
                    "No hosts for (host, platform) (%s, %s), skipping deployment"
                    % (hostname, platform))
                continue

            if not platform_data['deploy']:
                log.debug("Not deploying to %s on %s" % (platform, hostname))
                continue

            config_path = os.path.join("rendered", hostname, platform)

            if hostname == "internal":
                try:
                    from autonetkit_cisco import deploy as cisco_deploy
                except ImportError:
                    pass  # development module, may not be available
                if platform == "cisco":
                    create_new_xml = False
                    if not input_graph_string:
                        create_new_xml = True  # no input, eg if came from grid
                    elif anm['input'].data['file_type'] == "graphml":
                        create_new_xml = True  # input from graphml, create XML

                    if create_new_xml:
                        cisco_deploy.create_xml(anm, nidb, input_graph_string)
                    else:
                        cisco_deploy.package(nidb, config_path,
                                             input_graph_string)
                continue

            username = platform_data['username']
            key_file = platform_data['key file']
            host = platform_data['host']

            if platform == "netkit":
                import autonetkit.deploy.netkit as netkit_deploy
                tar_file = netkit_deploy.package(config_path, "nklab")
                netkit_deploy.transfer(host, username, tar_file, tar_file,
                                       key_file)
                netkit_deploy.extract(host,
                                      username,
                                      tar_file,
                                      config_path,
                                      timeout=60,
                                      key_filename=key_file)
            if platform == "cisco":
                #TODO: check why using nklab here
                cisco_deploy.package(config_path, "nklab")
Beispiel #59
0
def apply_design_rules(anm):
    """Applies appropriate design rules to ANM"""
    g_in = anm['input']

    build_phy(anm)
    g_phy = anm['phy']

    build_l3_connectivity(anm)
    check_server_asns(anm)

    from autonetkit.design.mpls import build_vrf
    build_vrf(anm)  # need to do before to add loopbacks before ip allocations
    from autonetkit.design.ip import build_ip, build_ipv4, build_ipv6
    #TODO: replace this with layer2 overlay topology creation
    build_ip(anm)  # ip infrastructure topology

    #TODO: set defaults at the start, rather than inline, ie set g_in.data.address_family then use later

    address_family = g_in.data.address_family or "v4"  # default is v4
    #TODO: can remove the infrastructure now create g_ip seperately
    if address_family == "None":
        log.info(
            "IP addressing disabled, disabling routing protocol configuration")
        anm['phy'].data.enable_routing = False

    if address_family == "None":
        log.info("IP addressing disabled, skipping IPv4")
        anm.add_overlay("ipv4")  # create empty so rest of code follows through
        g_phy.update(g_phy, use_ipv4=False)
    elif address_family in ("v4", "dual_stack"):
        build_ipv4(anm, infrastructure=True)
        g_phy.update(g_phy, use_ipv4=True)
    elif address_family == "v6":
        # Allocate v4 loopbacks for router ids
        build_ipv4(anm, infrastructure=False)
        g_phy.update(g_phy, use_ipv4=False)

    #TODO: Create a collision domain overlay for ip addressing - l2 overlay?
    if address_family == "None":
        log.info("IP addressing disabled, not allocating IPv6")
        anm.add_overlay("ipv6")  # create empty so rest of code follows through
        g_phy.update(g_phy, use_ipv6=False)
    elif address_family in ("v6", "dual_stack"):
        build_ipv6(anm)
        g_phy.update(g_phy, use_ipv6=True)
    else:
        anm.add_overlay("ipv6")  # placeholder for compiler logic

    default_igp = g_in.data.igp or "ospf"
    ank_utils.set_node_default(g_in, igp=default_igp)

    ank_utils.copy_attr_from(g_in, g_phy, "include_csr")

    try:
        from autonetkit_cisco import build_network as cisco_build_network
    except ImportError, e:
        log.debug("Unable to load autonetkit_cisco %s" % e)