Пример #1
0
    def asn(self):
        """Returns ASN of this node"""
        # TODO: make a function (not property)

        try:
            return self._graph.node[self.node_id]['asn']  # not in this graph
        except KeyError:

            # try from phy

            try:
                return self.anm.overlay_nx_graphs['phy'
                                                  ].node[self.node_id]['asn']
            except KeyError:
                if self.node_id not in self.anm.overlay_nx_graphs['phy'
                                                                  ]:
                    message = \
                        'Node id %s not found in physical overlay' \
                        % self.node_id
                    if self.overlay_id == 'input':

                        # don't warn, most likely node not copied across

                        log.debug(message)
                    else:
                        log.warning(message)
                    return
Пример #2
0
def check_server_asns(anm):
    """Checks that servers have appropriate ASN allocated.
    Warns and auto-corrects servers connected to routers of a different AS
    #TODO: provide manual over-ride for this auto-correct.
    """
    # TODO: Move to validate module?
    g_phy = anm["phy"]

    for server in g_phy.servers():
        # TODO: remove now have external_connector device_type?
        if server.device_subtype in ("SNAT", "FLAT"):
            continue  # Don't warn on ASN for NAT elements
        l3_neighbors = list(server["layer3"].neighbors())
        l3_neighbor_asns = set(n.asn for n in l3_neighbors)
        if server.asn not in l3_neighbor_asns:
            neighs_with_asn = ["%s: AS %s" % (n, n.asn) for n in l3_neighbors]
            # tuples for warning message
            server.log.warning("Server does not belong to same ASN " "as neighbors %s" % (neighs_with_asn))

            if len(l3_neighbors) == 1:
                # single ASN of neighbor -> auto correct
                if server["input"].default_asn:
                    neigh_asn = l3_neighbor_asns.pop()
                    log.warning("Updating server %s AS from %s" " to %s", server, server.asn, neigh_asn)
                    server.asn = neigh_asn
                else:
                    log.info("Server %s ASN %s explictly set by user, " "not auto-correcting", server, server.asn)
Пример #3
0
def render_inline(node, render_template_file, to_memory=True,
                  render_dst_file=None):
    """Generic rendering of a node attribute rather than the standard location.
    Needs to be called by render_node.
    Doesn't support base folders - only single attributes.
    Note: supports rendering to memory (ie back to nidb rather than file)
    """

    node.log.debug("Rendering template %s" % (render_template_file))
    version_banner = format_version_banner()

    date = time.strftime("%Y-%m-%d %H:%M", time.localtime())

    if render_template_file:
        try:
            render_template = TEMPLATE_LOOKUP.get_template(
                render_template_file)
        except SyntaxException, error:
            log.warning("Unable to render %s: "
                        "Syntax error in template: %s" % (node, error))
            return

        if to_memory:
            # Render directly to DeviceModel
            render_output = render_template.render(
                node=node,
                version_banner=version_banner,
                date=date,
            )

            return render_output
Пример #4
0
 def _interface(self):
     """Return data dict for the interface"""
     try:
         return self._node["_interfaces"][self.interface_id]
     except KeyError:
         log.warning("Unable to find interface %s in %s" % (self.interface_id, self.node))
         return None
Пример #5
0
    def neighbor_interfaces(self, *args, **kwargs):
        #TODO: implement filtering for args and kwargs
        if len(args) or len(kwargs):
            log.warning("Attribute-based filtering not currently supported" 
                    " for neighbor_interfaces")

        return iter(edge.dst_int for edge in self.edges())
Пример #6
0
    def default(self, obj):
        if isinstance(obj, set):
            return str(obj)
        if isinstance(obj, netaddr.IPAddress):
            return str(obj)
        if isinstance(obj, netaddr.IPNetwork):
            return str(obj)
        if isinstance(obj, autonetkit.nidb.DmNode):
            #TODO: need to unserialize nidb nodes...
            return str(obj)
        if isinstance(obj, autonetkit.anm.NmEdge):
            log.warning("%s is anm overlay_edge. Use attribute rather than object in compiler." % obj)
            return str(obj)
        if isinstance(obj, autonetkit.nidb.ConfigStanza):
            retval = obj.to_json()
            return retval
        if isinstance(obj, autonetkit.nidb.DmInterface):
            #TODO: check this is consistent with deserialization
            return str(obj)
        if isinstance(obj, nx.classes.Graph):
            #TODO: remove now?
            return json_graph.node_link_data(obj)

        if isinstance(obj, logging.LoggerAdapter):
            #TODO: filter this out in the to_json methods
            return ""

        return json.JSONEncoder.default(self, obj)
Пример #7
0
    def default(self, obj):
        if isinstance(obj, set):
            return str(obj)
        if isinstance(obj, netaddr.IPAddress):
            return str(obj)
        if isinstance(obj, netaddr.IPNetwork):
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayNode):
            #TODO: add documentation about serializing anm nodes
            log.warning("%s is anm overlay_node. Use attribute rather than object in compiler." % obj)
            return str(obj)
        if isinstance(obj, autonetkit.plugins.ipv4.TreeNode):
            #TODO: add documentation about serializing anm nodes
            return str(obj)
        if isinstance(obj, autonetkit.anm.OverlayEdge):
            #TODO: add documentation about serializing anm nodes
            log.warning("%s is anm overlay_edge. Use attribute rather than object in compiler." % obj)
            return str(obj)
        if isinstance(obj, autonetkit.nidb.nidb_node_category):
            #TODO: add documentation about serializing anm nodes
            log.debug("%s is nidb nidb_node_category. Use attribute rather than object in compiler." % obj)
            return str(obj)

        

        return json.JSONEncoder.default(self, obj)
Пример #8
0
 def publish_telnet(self, exchange, routing_key, body):
     try:
         tn = telnetlib.Telnet("localhost", self.telnet_port)
         tn.write(body)
         tn.close()
     except socket.error:
         log.warning("Unable to connect to telnet on localhost at %s" % self.telnet_port)
Пример #9
0
def manage_network(input_graph_string, timestamp, build=True,
                   visualise=True, compile=True, validate=True, render=True,
                   monitor=False, deploy=False, measure=False, diff=False,
                   archive=False, grid=None, ):
    """Build, compile, render network as appropriate"""

    # import build_network_simple as build_network

    import autonetkit.build_network as build_network

    if build:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        # TODO: integrate the code to visualise on error (enable in config)
        anm = None
        try:
            anm = build_network.build(graph)
        except Exception, e:
            # Send the visualisation to help debugging
            try:
                if visualise:
                    import autonetkit
                    autonetkit.update_vis(anm)
            except Exception, e:
                # problem with vis -> could be coupled with original exception -
                # raise original
                log.warning("Unable to visualise: %s" % e)
            raise  # raise the original exception
Пример #10
0
    def _sync_loopbacks(self, interface_id):
        """Syncs a newly added loopback across all overlays the
        node is in"""
        for overlay in self.anm:
            if self not in overlay:
                continue

            if overlay._overlay_id in ("phy", self.overlay_id):
                # don't copy to self or phy as already there
                continue

            if overlay._overlay_id in ("graphics"):
                """
                TODO: debug why get problem for graphics
                 File "/autonetkit/autonetkit/anm/node.py", line 257, in _sync_loopbacks
                    o_node._ports[interface_id] = {"category": "loopback"}
                IndexError: list assignment index out of range
                None
                """
                # don't copy to self or phy as already there
                continue

            o_node = overlay.node(self)
            if interface_id in o_node._ports:
                # something has gone wrong - allocated the id over the top
                # shouldn't happen since the next free id is from the phy

                log.warning("Internal consistency error with copying loopback "
                    "interface %s to %s in %s", interface_id, self, overlay)
                continue

            o_node._ports[interface_id] = {"category": "loopback"}
Пример #11
0
    def __setattr__(self, key, val):
        """Sets interface property"""

        try:
            self._interface[key] = val
        except KeyError, e:
            log.warning(e)
Пример #12
0
 def _nx_node_data(self):
     """Return NetworkX node data for the node"""
     try:
         return self._graph.node[self.node_id]
     except Exception, e:
         log.warning("Error accessing node data %s for node %s: %s" %
                     (self.overlay_id, self.node_id, e))
Пример #13
0
    def interface(self, key):
        """Returns interface based on interface id"""

        try:
            if key.interface_id in self._interface_ids():
                return NmPort(self.anm, self.overlay_id,
                              self.node_id, key.interface_id)
        except AttributeError:

            # try with key as id

            try:
                if key in self._interface_ids():
                    return NmPort(self.anm, self.overlay_id,
                                  self.node_id, key)
            except AttributeError:

                # no match for either

                log.warning('Unable to find interface %s in %s '
                            % (key, self))
                return None

        # try searching for the "id" attribute of the interface eg
        # GigabitEthernet0/0 if set
        search = list(self.interfaces(id=key))
        # TODO: warn if more than one match ie len > 1
        if len(search):
            return search[0]  # first result

        search = list(self.interfaces(description=key))
        # TODO: warn if more than one match ie len > 1
        if len(search):
            return search[0]  # first result
Пример #14
0
def check_layer2(anm):
    """Sanity checks on topology"""
    from collections import defaultdict
    g_l2 = anm['layer2']

    # check for igp and ebgp on same switch
    for switch in sorted(g_l2.switches()):
        neigh_asns = defaultdict(int)
        for neigh in switch.neighbors():
            if neigh.asn is None:
                continue  # don't add if not set
            neigh_asns[neigh.asn] += 1

        # IGP if two or more neighbors share the same ASN
        is_igp = any(asns > 1 for asns in neigh_asns.values())
        # eBGP if more than one unique neigh ASN
        is_ebgp = len(neigh_asns.keys()) > 1
        if is_igp and is_ebgp:
            log.warning("Switch %s contains both IGP and eBGP neighbors",
                        switch)

    # check for multiple links from nodes to switch
    for switch in sorted(g_l2.switches()):
        for neighbor in sorted(switch.neighbors()):
            edges = g_l2.edges(switch, neighbor)
            if len(edges) > 1:
                # more than one edge between the (src, dst) pair -> parallel
                log.warning("Multiple edges (%s) between %s and device %s",
                            len(edges), switch, neighbor)
Пример #15
0
def expand(G_in):
    """ Expands out graph products. G is the source "backbone" graph. H_x is the "PoP template" graphs
    """
    graph_unwrapped = ank_utils.unwrap_graph(G_in)
    G = graph_unwrapped.copy()

    ank.set_node_default(G_in, G_in)

    template_names = set(node.pop_template for node in G_in)
    template_names.discard("None")
    template_names.discard(None)
    if not len(template_names):
        log.debug("No PoP templates set")
        return # no templates set

# Load these templates
    templates = {}
    for template in template_names:
        template_filename = os.path.join("pop_templates", "%s.graphml" % template)
        try:
            pop_graph = autonetkit.load.graphml.load_graphml(template_filename) #TODO: pass in properties eg edge type = physical
        except Exception, e:
            log.warning("Unable to load pop template %s: %s" % (template, e))
            return
        pop_graph = pop_graph.to_undirected() # Undirected for now TODO: document this
        templates[template] = pop_graph
Пример #16
0
def remove_dirs(dirs):
    for directory in dirs:
        log.debug("Removing directory %s" % directory)
        try:
            shutil.rmtree(directory)
        except OSError, e:
            log.warning("Unable to remove %s, %s" % (directory, e))
Пример #17
0
def manage_network(input_graph_string, timestamp,
    build_options, reload_build=False, grid=None):
    """Build, compile, render network as appropriate"""
    # import build_network_simple as build_network
    import autonetkit.build_network as build_network
    if reload_build:
# remap?
        build_network = reload(build_network)

    if build_options['build']:
        if input_graph_string:
            graph = build_network.load(input_graph_string)
        elif grid:
            graph = build_network.grid_2d(grid)

        anm = build_network.build(graph)
        if not build_options['compile']:
            update_vis(anm)

        if build_options['validate']:
            import autonetkit.ank_validate
            try:
                autonetkit.ank_validate.validate(anm)
            except Exception, e:
                log.warning("Unable to validate topologies: %s" % e)
                log.debug("Unable to validate topologies", exc_info=True)
Пример #18
0
 def default(self, obj):
     if isinstance(obj, set):
         return str(obj)
     if isinstance(obj, netaddr.IPAddress):
         return str(obj)
     if isinstance(obj, netaddr.IPNetwork):
         return str(obj)
     if isinstance(obj, autonetkit.anm.OverlayNode):
         #TODO: add documentation about serializing anm nodes
         log.warning("%s is anm overlay_node. Use attribute rather than object in compiler." % obj)
         return str(obj)
     if isinstance(obj, autonetkit.plugins.ipv4.TreeNode):
         #TODO: add documentation about serializing anm nodes
         return str(obj)
     if isinstance(obj, autonetkit.compiler.Domain):
         #TODO: add documentation about serializing anm nodes
         return str(obj)
     try:
         if isinstance(obj, autonetkit.anm.overlay_edge):
             #TODO: add documentation about serializing anm nodes
             log.warning("%s is anm overlay_edge. Use attribute rather than object in compiler." % obj)
             return str(obj)
     except Exception, error:
         print("except ank_json line 34: "+str(obj))  # parfait le lien 13-14 cause probleme
         print("except ank_json line 35: "+str(error))
Пример #19
0
    def __init__(self, host = None):
        try:
            if use_rabbitmq:
                log.debug("Using Rabbitmq with server %s " % host)
                self.connection = pika.BlockingConnection(pika.ConnectionParameters(
                    host = host))
                self.channel = self.connection.channel()
                self.channel.exchange_declare(exchange='www',
                        type='direct')
                self.publish = self.publish_pika
                self.publish_compressed = self.publish_compressed_pika

            if use_http_post:
                host = config.settings['Http Post']['server']
                port = config.settings['Http Post']['port']
                self.http_url = "http://%s:%s/publish" % (host, port)
                self.publish = self.publish_http_post
                self.publish_compressed = self.publish_http_post

            if not (use_rabbitmq or use_http_post):
                log.debug("Not using Rabbitmq or telnet")
                self.publish = self.publish_blank_stub
                self.publish_compressed = self.publish_blank_stub
        except socket.timeout: #TODO: check if these should move up to the use_rabbitmq block
            log.warning("Socket Timeout: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
        except socket.error:
            log.warning("Socket Error: not using Rabbitmq")
            self.publish = self.publish_blank_stub
            self.publish_compressed = self.publish_blank_stub
Пример #20
0
    def add_nodes_from(self, nbunch, retain=[], **kwargs):
        try:
            retain.lower()
            retain = [retain] # was a string, put into list
        except AttributeError:
            pass # already a list

        nbunch = list(nbunch)
        nodes_to_add = nbunch # retain for interface copying

        if len(retain):
            add_nodes = []
            for n in nbunch:
                data = dict( (key, n.get(key)) for key in retain)
                add_nodes.append( (n.node_id, data) )
            nbunch = add_nodes
        else:
            log.warning("Cannot add node ids directly to NIDB: must add overlay nodes")
        self._graph.add_nodes_from(nbunch, **kwargs)

        for node in nodes_to_add:
            #TODO: add an interface_retain for attributes also
            int_dict = {i.interface_id: {'type': i.type, 'layer': i.overlay_id} for i in node.interfaces()}
            int_dict = {i.interface_id: {'type': i.type} for i in node.interfaces()}
            self._graph.node[node.node_id]["_interfaces"] = int_dict
Пример #21
0
    def add_edges_from(self, ebunch, retain=None, **kwargs):
        """Used to copy edges from ANM -> NIDB
        Note: won't support (yet) copying from one NIDB to another
        #TODO: allow copying from one NIDB to another
        (check for DmNode as well as NmNode)

        To keep congruency, only allow copying edges from ANM
        can't add NIDB edges directly (node, node) oor (port, port)
        workflow: if need this, create a new overlay and copy from there
        """
        from autonetkit.anm import NmEdge
        if not retain:
            retain = []
        try:
            retain.lower()
            retain = [retain]  # was a string, put into list
        except AttributeError:
            pass  # already a list

        # TODO: this needs to support parallel links
        for in_edge in ebunch:
            """Edge could be one of:
            - NmEdge - copied into be returned as a DmEdge
            """
            # This is less efficient than nx add_edges_from, but cleaner logic
            # TODO: could put the interface data into retain?
            data = {'_ports': {}}  # to retain
            ekey = 0

            # convert input to a NmEdge
            if isinstance(in_edge, NmEdge):
                edge = in_edge  # simple case
                ekey = edge.ekey
                src = edge.src.node_id
                dst = edge.dst.node_id

                # and copy retain data
                retain.append('_ports')
                # TODO: explicity copy ports as raw_interfaces?
                data = dict((key, edge.get(key)) for key in retain)

                # this is the only case where copy across data
                # but want to copy attributes for all cases

            # TODO: add check that edge.src and edge.dst exist
            if not(src in self and dst in self):
                log.warning("Not adding edge, %s to %s, "
                            "src and/or dst not in overlay %s" % (src, dst, self))
                continue

            # TODO: warn if not multigraph and edge already exists - don't
            # add/clobber
            data.update(**kwargs)

            if self.is_multigraph():
                self._graph.add_edge(src, dst, key=ekey,
                                     attr_dict=dict(data))
            else:
                self._graph.add_edge(src, dst, attr_dict=dict(data))
Пример #22
0
 def init_logging(self, my_type):
     try:
         self_id = str(self)
     except Exception, e:
         #TODO: log warning here
         import autonetkit.log as log
         log.warning("Unable to set per-element logger %s", e)
         self_id = ""
Пример #23
0
    def _graph(self):
        """Return graph the node belongs to"""

        try:
            return self.anm.overlay_nx_graphs[self.overlay_id]
        except Exception, e:
            log.warning("Error accessing overlay %s for node %s: %s" %
                        (self.overlay_id, self.node_id, e))
Пример #24
0
def validate(anm):
    tests_passed = True
    tests_passed = validate_ipv4(anm) and tests_passed

    if tests_passed:
        log.info("All validation tests passed.")
    else:
        log.warning("Some validation tests failed.")
Пример #25
0
def manual_ipv6_infrastructure_allocation(anm):
    """Applies manual IPv6 allocation"""

    import netaddr

    g_ipv6 = anm["ipv6"]
    log.info("Using specified IPv6 infrastructure allocation")

    for node in g_ipv6.l3devices():
        for interface in node.physical_interfaces:
            if not interface["input"].is_bound:
                continue  # unbound interface
            ip_address = netaddr.IPAddress(interface["input"].ipv6_address)
            prefixlen = interface["input"].ipv6_prefixlen
            interface.ip_address = ip_address
            interface.prefixlen = prefixlen
            cidr_string = "%s/%s" % (ip_address, prefixlen)
            interface.subnet = netaddr.IPNetwork(cidr_string)

    broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]

    # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen

    from netaddr import IPNetwork

    for coll_dom in broadcast_domains:
        connected_interfaces = [edge.dst_int for edge in coll_dom.edges()]
        cd_subnets = [IPNetwork("%s/%s" % (i.subnet.network, i.prefixlen)) for i in connected_interfaces]

        if len(cd_subnets) == 0:
            log.warning("Collision domain %s is not connected to any nodes" % coll_dom)
            continue

        try:
            assert len(set(cd_subnets)) == 1
        except AssertionError:
            mismatch_subnets = "; ".join("%s: %s/%s" % (i, i.subnet.network, i.prefixlen) for i in connected_interfaces)
            log.warning("Non matching subnets from collision domain %s: %s" % (coll_dom, mismatch_subnets))
        else:
            coll_dom.subnet = cd_subnets[0]  # take first entry

        # apply to remote interfaces

        for edge in coll_dom.edges():
            edge.dst_int.subnet = coll_dom.subnet

    # also need to form aggregated IP blocks (used for e.g. routing prefix
    # advertisement)
    # import autonetkit
    # autonetkit.update_http(anm)

    infra_blocks = {}
    for (asn, devices) in g_ipv6.groupby("asn").items():
        broadcast_domains = [d for d in devices if d.broadcast_domain]
        subnets = [cd.subnet for cd in broadcast_domains if cd.subnet is not None]  # only if subnet is set
        infra_blocks[asn] = netaddr.cidr_merge(subnets)

    g_ipv6.data.infra_blocks = infra_blocks
Пример #26
0
 def interface(self, key):
     """Returns interface based on interface id"""
     try:
         if key.interface_id in self._interface_ids:
             return overlay_interface(self.anm, self.overlay_id, 
                     self.node_id, key.interface_id)
     except AttributeError:
         log.warning("Unable to find interface %s in %s " % (key, self))
         return None
Пример #27
0
def copy_edge_attr_from(overlay_src, overlay_dst, attr):
    graph_src = unwrap_graph(overlay_src)
    graph_dst = unwrap_graph(overlay_dst)
    for src, dst in graph_src.edges():
        try:
            graph_dst[src][dst][attr] = graph_src[src][dst][attr]
        except KeyError:
            #TODO: check if because edge doesn't exist in dest, or because attribute doesn't exist in graph_src
            log.warning("Unable to copy edge attribute %s for (%s, %s) in %s" % (attr, src, dst, overlay_src))
Пример #28
0
def most_frequent(iterable):
    """returns most frequent item in iterable"""
# from http://stackoverflow.com/q/1518522
    g = itertools.groupby
    try:
        return max(g(sorted(iterable)), key=lambda(x, v):(len(list(v)),-iterable.index(x)))[0]
    except ValueError, e:
        log.warning("Unable to calculate most_frequent, %s" % e)
        return None
Пример #29
0
    def static_routes(self, node):
        node.static_routes_v4 = [] # initialise for case of no routes -> simplifies template logic
        node.static_routes_v6 = [] # initialise for case of no routes -> simplifies template logic
        if not self.anm['phy'].data.enable_routing:
            log.debug("Routing disabled, not configuring static routes for Ubuntu server %s" % node)
            return

        l3_conn_node = self.anm['l3_conn'].node(node)
        phy_node = self.anm['phy'].node(node)
        gateway_list = [n for n in l3_conn_node.neighbors()
            if n.is_router]
        if not len(gateway_list):
            log.warning("Server %s is not directly connected to any routers" % node)
        else:
            gateway = gateway_list[0] # choose first (and only gateway)
            if len(gateway_list) > 1:
                log.info("Server %s is multi-homed, using gateway %s" % (node, gateway))

        #TODO: warn if server has no neighbors in same ASN (either in design or verification steps)
        #TODO: need to check that servers don't have any direct ebgp connections

        gateway_edge_l3 = self.anm['l3_conn'].edge(node, gateway)
        server_interface = gateway_edge_l3.src_int
        server_interface_id = self.nidb.interface(server_interface).id

        gateway_interface = gateway_edge_l3.dst_int

        gateway_ipv4 = gateway_ipv6 = None
        if node.ip.use_ipv4:
            gateway_ipv4 = gateway_interface['ipv4'].ip_address
        if node.ip.use_ipv6:
            gateway_ipv6 = gateway_interface['ipv6'].ip_address

        #TODO: look at aggregation
        #TODO: catch case of ip addressing being disabled

        #TODO: handle both ipv4 and ipv6

        # IGP advertised infrastructure pool from same AS
        for infra_route in self.anm['ipv4'].data['infra_blocks'][phy_node.asn]:
            node.static_routes_v4.append({
                    "network": infra_route,
                    "gw": gateway_ipv4,
                    "interface": server_interface_id,
                    "description": "Route to infra subnet in local AS %s via %s" % (phy_node.asn, gateway),
                    })

        # eBGP advertised loopbacks in all (same + other) ASes
        for asn, asn_routes in self.anm['ipv4'].data['loopback_blocks'].items():
            for asn_route in asn_routes:
                node.static_routes_v4.append({
                    "network": asn_route,
                    "gw": gateway_ipv4,
                    "interface": server_interface_id,
                    "description": "Route to loopback subnet in AS %s via %s" % (asn, gateway),
                    })
Пример #30
0
    def isis(self, node):
        super(IosBaseCompiler, self).isis(node)
        for interface in node.physical_interfaces():
            isis_int = self.anm['isis'].interface(interface)
            edges = isis_int.edges()
            if not isis_int.is_bound:
                # Could occur for VRFs
                log.debug("No ISIS connections for interface %s" % interface)
                continue

            # TODO: change this to be is_bound and is_multipoint
            if isis_int.multipoint:
                log.warning('Extended IOS config support not valid for multipoint ISIS connections on %s'
                            % interface)
                continue

                # TODO multipoint handling?

            edge = edges[0]
            dst = edge.dst
            if not dst.is_router():
                log.debug('Connection to non-router host not added to IGP'
                          )
                continue

            src_type = node.device_subtype
            dst_type = dst['phy'].device_subtype
            if src_type == 'IOS XRv':
                if dst_type == 'IOSv':
                    interface.isis.hello_padding_disable = True
                elif dst_type == 'CSR1000v':
                    interface.isis.hello_padding_disable = True
                elif dst_type == 'NX-OSv':
                    interface.isis.hello_padding_disable = True

            if src_type == 'IOSv':
                if dst_type == 'IOS XRv':
                    interface.isis.mtu = 1430

            if src_type == 'CSR1000v':
                if dst_type == 'IOS XRv':
                    interface.isis.mtu = 1430

            if src_type == 'NX-OSv':
                if dst_type == 'IOS XRv':
                    interface.mtu = 1430  # for all of interface
                    interface.isis.hello_padding_disable = True
                elif dst_type == 'IOSv':
                    interface.isis.hello_padding_disable = True
                elif dst_type == 'CSR1000v':
                    interface.isis.hello_padding_disable = True

            interface.isis_mtu = interface.isis.mtu
            interface.hello_padding_disable = \
                interface.isis.hello_padding_disable
Пример #31
0
# YAML helpers from http://stackoverflow.com/questions/8640959

try:
    import yaml
except ImportError:
    import autonetkit.log as log
    log.warning('Yaml Parsing requires pyyaml installed')

from collections import OrderedDict

class quoted(str): pass

def quoted_presenter(dumper, data):
    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')

class literal(str): pass

def literal_presenter(dumper, data):
    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')

def ordered_dict_presenter(dumper, data):
    return dumper.represent_dict(data.items())

def add_representers():
    yaml.add_representer(quoted, quoted_presenter)
    yaml.add_representer(literal, literal_presenter)
    yaml.add_representer(OrderedDict, ordered_dict_presenter)
Пример #32
0
 def make_not_found(protocol, index, data):
     log.warning(
         "Make not installed on remote host %s. Please install make and retry."
         % host)
     return
Пример #33
0
def build_ipv4(anm, infrastructure=True):
    """Builds IPv4 graph"""

    import autonetkit.plugins.ipv4 as ipv4
    import netaddr
    g_ipv4 = anm.add_overlay('ipv4')
    g_ip = anm['ip']
    g_in = anm['input']
    g_ipv4.add_nodes_from(g_ip, retain=['label', 'broadcast_domain'])  # retain if collision domain or not

    # Copy ASN attribute chosen for collision domains (used in alloc algorithm)

    ank_utils.copy_attr_from(g_ip, g_ipv4, 'asn',
                             nbunch=g_ipv4.nodes('broadcast_domain'))
    g_ipv4.add_edges_from(g_ip.edges())

    # check if ip ranges have been specified on g_in

    (infra_block, loopback_block, vrf_loopback_block) = \
        extract_ipv4_blocks(anm)

#TODO: don't present if using manual allocation
    block_message = "IPv4 allocations: Infrastructure: %s, Loopback: %s" % (infra_block, loopback_block)
    if any(i for n in g_ip.nodes() for i in
     n.loopback_interfaces() if not i.is_loopback_zero):
        block_message += " Secondary Loopbacks: %s" % vrf_loopback_block

    log.info(block_message)

    # See if IP addresses specified on each interface

    # do we need this still? in ANM? - differnt because input graph.... but can map back to  self overlay first then phy???
    l3_devices = [d for d in g_in if d.device_type in ('router', 'server')]

    manual_alloc_devices = set()
    for device in l3_devices:
        physical_interfaces = list(device.physical_interfaces())
        allocated = list(interface.ipv4_address for interface in physical_interfaces if interface.is_bound)
        if all(interface.ipv4_address for interface in
               physical_interfaces if interface.is_bound):
            manual_alloc_devices.add(device)  # add as a manual allocated device

    if manual_alloc_devices == set(l3_devices):
        manual_alloc_ipv4_infrastructure = True
    else:
        manual_alloc_ipv4_infrastructure = False
        # warn if any set
        allocated = []
        unallocated = []
        for node in l3_devices:
            allocated += sorted([i for i in node.physical_interfaces() if i.is_bound and i.ipv4_address])
            unallocated += sorted([i for i in node.physical_interfaces() if i.is_bound and not i.ipv4_address])

        #TODO: what if IP is set but not a prefix?
        if len(allocated):
            #TODO: if set is > 50% of nodes then list those that are NOT set
            log.warning("Using automatic IPv4 interface allocation. IPv4 interface addresses specified on interfaces %s will be ignored." % allocated)

    # TODO: need to set allocate_ipv4 by default in the readers

    if manual_alloc_ipv4_infrastructure:
        manual_ipv4_infrastructure_allocation(anm)
    else:
        ipv4.allocate_infra(g_ipv4, infra_block)

    if g_in.data.alloc_ipv4_loopbacks is False:
        manual_ipv4_loopback_allocation(anm)
    else:
        # Check if some nodes are allocated
        allocated = sorted([n for n in g_ip if n['input'].loopback_v4])
        unallocated = sorted([n for n in g_ip if not n['input'].loopback_v4])
        if len(allocated):
            log.warning("Using automatic IPv4 loopback allocation. IPv4 loopback addresses specified on nodes %s will be ignored." % allocated)
            #TODO: if set is > 50% of nodes then list those that are NOT set
        ipv4.allocate_loopbacks(g_ipv4, loopback_block)

    # TODO: need to also support secondary_loopbacks for IPv6
    # TODO: only call if secondaries are set

    ipv4.allocate_vrf_loopbacks(g_ipv4, vrf_loopback_block)

    # TODO: replace this with direct allocation to interfaces in ip alloc plugin
    #TODO: add option for nonzero interfaces on node - ie node.secondary_loopbacks

    for node in g_ipv4.routers():
        node.loopback_zero.ip_address = node.loopback
        node.loopback_zero.subnet = netaddr.IPNetwork("%s/32" % node.loopback)
        for interface in node.loopback_interfaces():
            if not interface.is_loopback_zero:
                interface.ip_address = interface.loopback #TODO: fix this inconsistency elsewhere
Пример #34
0
def all_nodes_have_asn(anm):
    g_phy = anm['phy']
    for node in g_phy.l3devices():
        if node.asn is None:
            log.warning("No ASN set for physical device %s" % node)
Пример #35
0
    def static_routes(self, node):
        node.static_routes_v4 = [
        ]  # initialise for case of no routes -> simplifies template logic
        node.host_routes_v4 = [
        ]  # initialise for case of no routes -> simplifies template logic
        node.static_routes_v6 = [
        ]  # initialise for case of no routes -> simplifies template logic
        node.host_routes_v6 = [
        ]  # initialise for case of no routes -> simplifies template logic
        if not self.anm['phy'].data.enable_routing:
            log.info(
                'Routing disabled, not configuring static routes for Ubuntu server %s'
                % node)
            return

        if self.anm['phy'].node(node).dont_configure_static_routing:
            log.info('Static routing disabled for server %s' % node)
            return

        l3_node = self.anm['layer3'].node(node)
        gateway_list = [n for n in l3_node.neighbors() if n.is_router()]
        if not len(gateway_list):
            log.warning('Server %s is not directly connected to any routers' %
                        node)
            return
        elif len(gateway_list) > 1:
            log.info('Server %s is multi-homed: using gateways %s' %
                     (node, sorted(gateway_list)))

        # TODO: warn if server has no neighbors in same ASN (either in design or verification steps)
        # TODO: need to check that servers don't have any direct ebgp connections

        cloud_init_static_routes = []

        for gateway in sorted(gateway_list):
            gateway_edge_l3 = self.anm['layer3'].edge(node, gateway)
            server_interface = gateway_edge_l3.src_int
            server_interface_id = self.nidb.interface(server_interface).id

            gateway_interface = gateway_edge_l3.dst_int

            gateway_ipv4 = gateway_ipv6 = None
            node.add_stanza("ip")
            if node.ip.use_ipv4:
                gateway_ipv4 = gateway_interface['ipv4'].ip_address
            if node.ip.use_ipv6:
                gateway_ipv6 = gateway_interface['ipv6'].ip_address

            # TODO: look at aggregation
            # TODO: catch case of ip addressing being disabled

            # TODO: handle both ipv4 and ipv6

            # IGP advertised infrastructure pool from same AS
            static_routes_v4 = []
            host_routes_v4 = []
            for (asn,
                 asn_routes) in self.anm['ipv4'].data['infra_blocks'].items():

                # host_routes_v4
                for infra_route in asn_routes:
                    route_entry = {
                    'network': infra_route,
                    'prefix': infra_route.network,
                    'gw': gateway_ipv4,
                    'interface': server_interface_id,
                    'description': 'Route to infra subnet in AS %s via %s' \
                    % (asn, gateway),
                    }
                    route_entry = ConfigStanza(**route_entry)
                    if infra_route.prefixlen == 32:
                        host_routes_v4.append(route_entry)
                    else:
                        static_routes_v4.append(route_entry)

            # eBGP advertised loopbacks in all (same + other) ASes

            for (asn, asn_routes
                 ) in self.anm['ipv4'].data['loopback_blocks'].items():
                for asn_route in asn_routes:
                    route_entry = {
                        'network': asn_route,
                        'prefix': asn_route.network,
                        'gw': gateway_ipv4,
                        'interface': server_interface_id,
                        'description': 'Route to loopback subnet in AS %s via %s' \
                            % (asn, gateway),
                        }
                    route_entry = ConfigStanza(**route_entry)
                    if asn_route.prefixlen == 32:
                        host_routes_v4.append(route_entry)
                    else:
                        static_routes_v4.append(route_entry)

            # TODO: combine the above logic into single step rather than creating dict then formatting with it

            for entry in static_routes_v4:
                formatted = 'route add -net %s gw %s dev %s' \
                    % (entry.network, entry.gw, entry.interface)
                cloud_init_static_routes.append(formatted)
            for entry in host_routes_v4:
                formatted = 'route add -host %s gw %s dev %s' \
                    % (entry.prefix, entry.gw, entry.interface)
                cloud_init_static_routes.append(formatted)

        node.add_stanza("cloud_init")
        node.cloud_init.static_routes = cloud_init_static_routes
Пример #36
0
    def interfaces(self, node):
        node.interfaces = []

        node.loopback_zero.id = self.lo_interface
        node.loopback_zero.description = 'Loopback'
        phy_node = self.anm['phy'].node(node)
        node.loopback_zero.custom_config = phy_node.loopback_zero.custom_config

        if node.ip.use_ipv4:
            ipv4_node = self.anm['ipv4'].node(node)
            node.loopback_zero.ipv4_address = ipv4_node.loopback
            node.loopback_zero.ipv4_subnet = node.loopback_subnet

        #TODO: bne consistent wit hcidr name so can use in cisco ios xr templates
        #if node.ip.use_ipv6:
        #ipv6_node = self.anm['ipv6'].node(node)
        #node.loopback_zero.ipv6_address = ipv6_node.loopback
        #node.loopback_zero.ipv6_subnet = node.loopback_subnet

        for interface in node.physical_interfaces():
            phy_int = self.anm['phy'].interface(interface)
            interface.physical = True

            # TODO: allocate ID in platform compiler

            if not phy_int:
                # for instance if added as management interface to nidb in compile
                continue

            interface.custom_config = phy_int.custom_config

            interface.description = phy_int.description
            remote_edges = phy_int.edges()
            if len(remote_edges):
                interface.description = 'to %s' \
                    % remote_edges[0].dst.label

            # TODO: fix the description to use mapped label

            if node.ip.use_ipv4:
                ipv4_int = phy_int['ipv4']
                if ipv4_int.is_bound:

                    # interface is connected

                    interface.use_ipv4 = True
                    interface.ipv4_address = ipv4_int.ip_address
                    interface.ipv4_subnet = ipv4_int.subnet
                    interface.ipv4_cidr = \
                        sn_preflen_to_network(interface.ipv4_address,
                            interface.ipv4_subnet.prefixlen)

            if node.ip.use_ipv6:
                ipv6_int = phy_int['ipv6']
                if ipv6_int.is_bound:

                    # interface is connected

                    interface.use_ipv6 = True

                    # TODO: for consistency, make ipv6_cidr

                    interface.ipv6_subnet = ipv6_int.subnet
                    try:
                        interface.ipv6_address = \
                        sn_preflen_to_network(ipv6_int.ip_address,
                            interface.ipv6_subnet.prefixlen)
                    except AttributeError:
                        log.warning("Unable to format interface ")

        for interface in node.loopback_interfaces():

            # TODO: check if nonzero is different to __eq__

            if interface == node.loopback_zero:
                continue
            else:
                phy_int = self.anm['phy'].interface(interface)
                if node.ip.use_ipv4:
                    ipv4_int = phy_int['ipv4']
                    interface.use_ipv4 = True

                    interface.ipv4_address = ipv4_int.loopback
                    interface.ipv4_subnet = node.loopback_subnet
                    interface.ipv4_cidr = \
                        sn_preflen_to_network(interface.ipv4_address,
                            interface.ipv4_subnet.prefixlen)

                if node.ip.use_ipv6:
                    ipv6_int = phy_int['ipv6']
                    interface.use_ipv6 = True

                    # TODO: for consistency, make ipv6_cidr
                    # interface.ipv6_subnet = ipv6_int.loopback # TODO: do we need for consistency?

                    interface.ipv6_address = \
                        sn_preflen_to_network(ipv6_int.loopback, 128)

                # secondary loopbacks
                # TODO: check why vrf names not showing up for all
                # print vrf_interface.vrf_name

            continue
Пример #37
0
    def assign_management_interfaces(self):
        g_phy = self.anm['phy']
        lab_topology = self.nidb.topology[self.host]
        oob_management_ips = {}

        #TODO: make this seperate function
        from netaddr import IPNetwork, IPRange

        mgmt_address_start = g_phy.data.mgmt_address_start 
        mgmt_address_end = g_phy.data.mgmt_address_end 
        mgmt_prefixlen = int(g_phy.data.mgmt_prefixlen)

        #TODO: need to check if range is insufficient
        mgmt_ips = (IPRange(mgmt_address_start, mgmt_address_end))
        mgmt_ips_iter = iter(mgmt_ips) # to iterate over

        mgmt_address_start_network = IPNetwork(mgmt_address_start) # as /32 for supernet
        mgmt_address_end_network = IPNetwork(mgmt_address_end) # as /32 for supernet
        # retrieve the first supernet, as this is the range requested. subsequent are the subnets
        start_subnet = mgmt_address_start_network.supernet(mgmt_prefixlen)[0] # retrieve first
        end_subnet = mgmt_address_end_network.supernet(mgmt_prefixlen)[0] # retrieve first

        try: # validation
            assert(start_subnet == end_subnet)
            log.debug("Verified: Cisco management subnets match")
        except AssertionError:
            log.warning("Error: Cisco management subnets do not match: %s and %s, using start subnet"
                    % (start_subnet, end_subnet))

        mgmt_subnet = start_subnet
        hosts_to_allocate = sorted(self.nidb.nodes('is_router', host=self.host))
        dhcp_subtypes = {"os"}
        dhcp_hosts = [h for h in hosts_to_allocate if h.device_subtype in dhcp_subtypes]
        non_dhcp_hosts = [h for h in hosts_to_allocate if h.device_subtype not in dhcp_subtypes]

        try: # validation
            assert(len(mgmt_ips) >= len(non_dhcp_hosts))
            log.debug("Verified: Cisco management IP range is sufficient size %s for %s hosts"
                    % (len(mgmt_ips), len(non_dhcp_hosts)))
        except AssertionError:
            log.warning("Error: Cisco management IP range is insufficient size %s for %s hosts"
                    % (len(mgmt_ips), len(non_dhcp_hosts)))
            # TODO: need to use default range
            return

        for nidb_node in hosts_to_allocate:
            for interface in nidb_node.physical_interfaces:
                if interface.management:
                    interface.description = "OOB Management"
                    interface.physical = True
                    if nidb_node in dhcp_hosts:
                        interface.use_dhcp = True
                        oob_management_ips[str(nidb_node)] = "dhcp"
                    else:
                        ipv4_address = mgmt_ips_iter.next()
                        interface.ipv4_address = ipv4_address
                        interface.ipv4_subnet = mgmt_subnet
                        interface.ipv4_cidr = sn_preflen_to_network(ipv4_address, mgmt_prefixlen)
                        oob_management_ips[str(nidb_node)] = ipv4_address

        lab_topology.oob_management_ips = oob_management_ips
Пример #38
0
def build_ibgp_vpn_v4(anm):
    """Based on the ibgp_v4 hierarchy rules.
    Exceptions:
    1. Remove links to (PE, RRC) nodes

    CE nodes are excluded from RR hierarchy ibgp creation through pre-process step

    """
        # TODO: remove the bgp layer and have just ibgp and ebgp
    # TODO: build from design rules, currently just builds from ibgp links in
    # bgp layer
    g_bgp = anm['bgp']
    g_ibgp_v4 = anm['ibgp_v4']
    g_vrf = anm['vrf']
    g_ibgp_vpn_v4 = anm.add_overlay("ibgp_vpn_v4", directed=True)

    v6_vrf_nodes = [n for n in g_vrf
                    if n.vrf is not None and n['phy'].use_ipv6 is True]
    if len(v6_vrf_nodes):
        message = ", ".join(str(s) for s in v6_vrf_nodes)
        log.warning("This version of AutoNetkit does not support IPv6 MPLS VPNs. "
                    "The following nodes have IPv6 enabled but will not have an associated IPv6 MPLS VPN topology created: %s" % message)

    ibgp_v4_nodes = list(g_ibgp_v4.nodes())
    pe_nodes = set(g_vrf.nodes(vrf_role="PE"))
    pe_rrc_nodes = {n for n in ibgp_v4_nodes if
                    n in pe_nodes and n.ibgp_role == "RRC"}
    # TODO: warn if pe_rrc_nodes?
    ce_nodes = set(g_vrf.nodes(vrf_role="CE"))

    if len(pe_nodes) == len(ce_nodes) == len(pe_rrc_nodes) == 0:
        # no vrf nodes to connect
        return

    # TODO: extend this to only connect nodes which are connected in VRFs, so
    # don't set to others


    ibgp_vpn_v4_nodes = (n for n in ibgp_v4_nodes
                         if n not in ce_nodes)
    g_ibgp_vpn_v4.add_nodes_from(ibgp_vpn_v4_nodes, retain=["ibgp_role"])
    g_ibgp_vpn_v4.add_edges_from(g_ibgp_v4.edges(), retain="direction")

    for node in g_ibgp_vpn_v4:
        if node.ibgp_role in ("HRR", "RR"):
            node.retain_route_target = True

    ce_edges = [e for e in g_ibgp_vpn_v4.edges()
                if e.src in ce_nodes or e.dst in ce_nodes]

    # mark ibgp direction
    ce_pe_edges = []
    pe_ce_edges = []
    for edge in g_ibgp_vpn_v4.edges():
        if (edge.src.vrf_role, edge.dst.vrf_role) == ("CE", "PE"):
            edge.direction = "up"
            edge.vrf = edge.src.vrf
            ce_pe_edges.append(edge)
        elif (edge.src.vrf_role, edge.dst.vrf_role) == ("PE", "CE"):
            edge.direction = "down"
            edge.vrf = edge.dst.vrf
            pe_ce_edges.append(edge)

    # TODO: Document this
    g_ibgpv4 = anm['ibgp_v4']
    g_ibgpv6 = anm['ibgp_v6']
    g_ibgpv4.remove_edges_from(ce_edges)
    g_ibgpv6.remove_edges_from(ce_edges)
    g_ibgpv4.add_edges_from(ce_pe_edges, retain=["direction", "vrf"])
    g_ibgpv4.add_edges_from(pe_ce_edges, retain=["direction", "vrf"])
    g_ibgpv6.add_edges_from(ce_pe_edges, retain=["direction", "vrf"])
    g_ibgpv6.add_edges_from(pe_ce_edges, retain=["direction", "vrf"])
    for edge in pe_ce_edges:
        # mark as exclude so don't include in standard ibgp config stanzas
        if g_ibgpv4.has_edge(edge):
            edge['ibgp_v4'].exclude = True
        if g_ibgpv6.has_edge(edge):
            edge['ibgp_v6'].exclude = True

# legacy
    g_bgp = anm['bgp']
    g_bgp.remove_edges_from(ce_edges)
    g_bgp.add_edges_from(ce_pe_edges, retain=["direction", "vrf", "type"])
    g_bgp.add_edges_from(pe_ce_edges, retain=["direction", "vrf", "type"])
Пример #39
0
                if input_filemonitor.next():
                    rebuild = True
                # if build_filemonitor.next():
                    #reload_build = True
                    #rebuild = True

                if rebuild:
                    try:
                        log.info("Input graph updated, recompiling network")
                        with open(options.file, "r") as fh:
                            input_string = fh.read()  # read updates
                        manage_network(input_string,
                                       timestamp, build_options, reload_build)
                        log.info("Monitoring for updates...")
                    except Exception, e:
                        log.warning("Unable to build network %s" % e)
                        traceback.print_exc()

        except KeyboardInterrupt:
            log.info("Exiting")


def create_nidb(anm):
    nidb = NIDB()
    g_phy = anm['phy']
    g_ip = anm['ip']
    g_graphics = anm['graphics']
    nidb.add_nodes_from(
        g_phy, retain=['label', 'host', 'platform', 'Network', 'update', 'asn'])

    cd_nodes = [n for n in g_ip.nodes(
Пример #40
0
            if visualise:
                # log.info("Visualising network")
                # import autonetkit
                # autonetkit.update_vis(anm)
                pass

        if not compile and visualise:
            autonetkit.update_vis(anm)
            pass

        if validate:
            import autonetkit.ank_validate
            try:
                autonetkit.ank_validate.validate(anm)
            except Exception, e:
                log.warning('Unable to validate topologies: %s' % e)
                log.debug('Unable to validate topologies', exc_info=True)

    if compile:
        if archive:
            anm.save()
        nidb = compile_network(anm)
        autonetkit.update_vis(anm, nidb)

        #autonetkit.update_vis(anm, nidb)
        log.debug('Sent ANM to web server')
        if archive:
            nidb.save()

        # render.remove_dirs(["rendered"])
Пример #41
0
    def compile_devices(self):
        import re
        g_phy = self.anm['phy']

        to_memory, use_mgmt_interfaces, dst_folder = self._parameters()

        if use_mgmt_interfaces:
            log.debug("Allocating VIRL management interfaces")
        else:
            log.debug("Not allocating VIRL management interfaces")

	pc_only_config = False
        vxlan_global_config = None
        if g_phy.data.vxlan_global_config is not None:
            vxlan_global_config = g_phy.data.vxlan_global_config
        if g_phy.data.pc_only is not None:
            pc_only_config = g_phy.data.pc_only
        use_ignite_pool = False
        if g_phy.data.mgmt_block is not None:
            mgmt_address_block = netaddr.IPNetwork(g_phy.data.mgmt_block).iter_hosts()
            mgmt_address_mask = (netaddr.IPNetwork(g_phy.data.mgmt_block)).netmask
        else:
            pool = g_phy.data.ignite
            if pool is not None:
                if 'mgmt_pool_id' in pool and pool['mgmt_pool_id'] is not None:
                    mgmt_pool_id = pool['mgmt_pool_id']
                    use_ignite_pool = True

        if g_phy.data.vpcid_block is not None:
            vpc_re = "([0-9]+)(-)([0-9]+)"
            #vpc_id_start = int(re.search(vpc_re, g_phy.data.vpcid_block).group(1))
            #vpc_id_end = int(re.search(vpc_re, g_phy.data.vpcid_block).group(3))
            #vpc_id_range = vpc_id_end-vpc_id_start

        vpc_id_range = 1000
# TODO: need to copy across the interface name from edge to the interface

# TODO: merge common router code, so end up with three loops: routers, ios
# routers, ios_xr routers

    # TODO: Split out each device compiler into own function

    # TODO: look for unused code paths here - especially for interface
    # allocation

        # store autonetkit_cisco version
        log.debug("Generating device configurations")
        from pkg_resources import get_distribution

        # Copy across indices for external connectors (e.g may want to copy
        # configs)
        external_connectors = [n for n in g_phy
                               if n.host == self.host and n.device_type == "external_connector"]
        for phy_node in external_connectors:
            DmNode = self.nidb.node(phy_node)
            DmNode.indices = phy_node.indices

        g_input = self.anm['input']

        managed_switches = [n for n in g_phy.switches()
        if n.host == self.host
        and n.device_subtype == "managed"]
        for phy_node in managed_switches:
            DmNode = self.nidb.node(phy_node)
            DmNode.indices = phy_node.indices

        for phy_node in g_phy.l3devices(host=self.host):
            loopback_ids = self.loopback_interface_ids()
            # allocate loopbacks to routes (same for all ios variants)
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.indices = phy_node.indices


            DmNode.add_stanza("syslog")
            DmNode.add_stanza('mgmt')
            if use_ignite_pool == True:
                mgmt_ip = allocate_pool_entry(mgmt_pool_id,phy_node.name, None)
                pos_mask = mgmt_ip.find('/')
                if pos_mask != -1:
                    network = mgmt_ip[:pos_mask]
                    mask = int(mgmt_ip[pos_mask+1:])
                else:
                    network = mgmt_ip[:pos_mask]
                    mask = 32
                DmNode.mgmt.ip = network +  '/' + str(mask)
                DmNode.mgmt.mask = ""
            elif g_phy.data.mgmt_block is not None:
                DmNode.mgmt.ip = mgmt_address_block.next()
                DmNode.mgmt.mask = mgmt_address_mask

#           for node_data in phy_node._graph.node:

            for interface in DmNode.loopback_interfaces():
                if interface != DmNode.loopback_zero:
                    interface.id = loopback_ids.next()

            # numeric ids
            numeric_int_ids = self.numeric_interface_ids()
            for interface in DmNode.physical_interfaces():
                phy_numeric_id = phy_node.interface(interface).numeric_id
                if phy_numeric_id is None:
                    # TODO: remove numeric ID code
                    interface.numeric_id = numeric_int_ids.next()
                else:
                    interface.numeric_id = int(phy_numeric_id)

                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id


            # numeric ids
            numeric_po_ids = self.numeric_portchannel_ids()
            for interface in DmNode.portchannel_interfaces():
                po_interface = phy_node.interface(interface)
                interface.numeric_id =  int(po_interface.id[po_interface.id.rfind('_')+1:])

                po_specified_id = phy_node.interface(interface).specified_id
                if po_specified_id is not None:
                    interface.id = po_specified_id
                interface.pc = True
                for po_mem_int in DmNode.physical_interfaces():
                    po_mem_interface = phy_node.interface(po_mem_int)
                    if po_mem_interface.id in po_interface.members:
                        po_mem_int.channel_group = interface.numeric_id
                        po_interface_int = po_interface._interface
                        if po_interface_int.has_key('subcat_prot'):
                            if po_interface.subcat_prot == "vpc":
                                po_mem_int.keepalive_port_vpc = True# is a member port of VPC peer link
                                interface.virt_port_channel = True# is a VPC interface
                                DmNode.add_stanza('vpc')
                            if po_interface.subcat_prot == "vpc-member":
                                po_mem_int.member_port_vpc = True
                                interface.vpc_member_id = interface.numeric_id 
                                interface.member_vpc = True

        ##adding rp's
        if vxlan_global_config is not None and 'rendezvous_point' in vxlan_global_config:
            for rp in vxlan_global_config['rendezvous_point']:
                for phy_node in g_phy.l3devices(host=self.host):
                    DmNode = self.nidb.node(phy_node)
                    if phy_node.id == rp['node_id']:
                        rp['node_id'] = DmNode.interfaces[0]._port['ipv4_address']

            #from autonetkit.compilers.device.ubuntu import UbuntuCompiler
        #from autonetkit_cisco.compilers.device.ubuntu import UbuntuCompiler

        #ubuntu_compiler = UbuntuCompiler(self.nidb, self.anm)
        for phy_node in g_phy.servers(host=self.host):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.add_stanza("ip")

            #interface.id = self.numeric_to_interface_label_linux(interface.numeric_id)
            # print "numeric", interface.numeric_id, interface.id
            DmNode.ip.use_ipv4 = phy_node.use_ipv4
            DmNode.ip.use_ipv6 = phy_node.use_ipv6

            # TODO: clean up interface handling
            numeric_int_ids = self.numeric_interface_ids()
            for interface in DmNode.physical_interfaces():
                phy_numeric_id = phy_node.interface(interface).numeric_id
                if phy_numeric_id is None:
                    # TODO: remove numeric ID code
                    interface.numeric_id = numeric_int_ids.next()
                else:
                    interface.numeric_id = int(phy_numeric_id)

                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id

            # numeric ids
            numeric_po_ids = self.numeric_portchannel_ids()
            for interface in DmNode.portchannel_interfaces():
                phy_numeric_id = phy_node.interface(interface).numeric_id
                if phy_numeric_id is None:
                    # TODO: remove numeric ID code
                    interface.numeric_id = numeric_po_ids.next()
                else:
                    interface.numeric_id = int(phy_numeric_id)

                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id

                # TODO: make this part of the base device compiler, which
                # server/router inherits

            # not these are physical interfaces; configure after previous
            # config steps
            if use_mgmt_interfaces:
                mgmt_int = DmNode.add_interface(
                    management=True, description="eth0")
                mgmt_int_id = "eth0"
                mgmt_int.id = mgmt_int_id

                # render route config
            DmNode = self.nidb.node(phy_node)
            #ubuntu_compiler.compile(DmNode)

            if not phy_node.dont_configure_static_routing:
                DmNode.render.template = os.path.join(
                    "templates", "linux", "static_route.mako")
                if to_memory:
                    DmNode.render.to_memory = True
                else:
                    DmNode.render.dst_folder = dst_folder
                    DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                        phy_node)

        # TODO: refactor out common logic

        ios_compiler = IosClassicCompiler(self.nidb, self.anm)
        host_routers = g_phy.routers(host=self.host)
        ios_nodes = (n for n in host_routers if n.syntax in ("ios", "ios_xe"))
        for phy_node in ios_nodes:
            if (phy_node.devsubtype == "core" or phy_node.devsubtype == "border"):
                continue
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")

            DmNode.render.template = os.path.join("templates", "ios.mako")
            to_memory = False

            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # TODO: write function that assigns interface number excluding
            # those already taken

            # Assign interfaces
            if phy_node.device_subtype == "IOSv":
                int_ids = self.interface_ids_ios()
                numeric_to_interface_label = self.numeric_to_interface_label_ios
            elif phy_node.device_subtype == "CSR1000v":
                int_ids = self.interface_ids_csr1000v()
                numeric_to_interface_label = self.numeric_to_interface_label_ra
            else:
                # default if no subtype specified
                # TODO: need to set default in the load module
                log.warning("Unexpected subtype %s for %s" %
                            (phy_node.device_subtype, phy_node))
                int_ids = self.interface_ids_ios()
                numeric_to_interface_label = self.numeric_to_interface_label_ios
                numeric_to_portchannel_interface_label = self.numeric_to_portchannel_interface_label_ios

            if use_mgmt_interfaces:
                if phy_node.device_subtype == "IOSv":
                    # TODO: make these configured in the internal config file
                    # for platform/device_subtype keying
                    mgmt_int_id = "GigabitEthernet0/0"
                if phy_node.device_subtype == "CSR1000v":
                    mgmt_int_id = "GigabitEthernet1"

            for interface in DmNode.physical_interfaces():
                # TODO: use this code block once for all routers
                if not interface.id:
                    interface.id = numeric_to_interface_label(
                        interface.numeric_id)
                else:
                    if interface.id[0] == 'e' or interface.id[0] == 'E':
                        #import re
                        port_re = "([a-zA-Z]+)([0-9]+/[0-9]+)"
                        interface.id = "%s %s"%((re.search(port_re,interface.id)).group(1),
                                                (re.search(port_re,interface.id)).group(2))

            for interface in DmNode.portchannel_interfaces():
                # TODO: use this code block once for all routers
                #if not interface.id:
                interface.id = numeric_to_portchannel_interface_label(
                        interface.numeric_id)

            ios_compiler.compile(DmNode)
            if use_mgmt_interfaces:
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id

        nxos_compiler = NxOsCompiler(self.nidb, self.anm)
        for phy_node in g_phy.routers(host=self.host, syntax='nx_os'):
            if (phy_node.devsubtype == "core" or phy_node.devsubtype == "border"):
                continue
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            if pc_only_config == True:
                DmNode.render.template = os.path.join("templates", "nexus_os_pc_only.mako")
	    else:
                DmNode.render.template = os.path.join("templates", "nexus_os.mako")
            #if to_memory:
            #    DmNode.render.to_memory = True
            #else:
            DmNode.render.dst_folder = dst_folder
            DmNode.render.dst_file = "%s.conf" % phy_node.name

            # Assign interfaces
            int_ids = self.interface_ids_nxos()
            numeric_to_interface_label = self.numeric_to_interface_label_nxos
            numeric_to_portchannel_interface_label = self.numeric_to_portchannel_interface_label_nxos
            for interface in DmNode.physical_interfaces():
                if not interface.id:
                    interface.id = self.numeric_to_interface_label_nxos(
                        interface.numeric_id)
                elif interface.id[0] == 'e' or interface.id[0] == 'E':
                    import re
                    port_re = "([a-zA-Z]+)([0-9]+/[0-9]+)"
                    interface.id = "%s %s"%((re.search(port_re,interface.id)).group(1),
                                            (re.search(port_re,interface.id)).group(2))
                else:
                    interface.id = 'Ethernet ' + interface.id


            for interface in DmNode.portchannel_interfaces():
                # TODO: use this code block once for all routers
                #if not interface.id:
                interface.id = numeric_to_portchannel_interface_label(
                        interface.numeric_id)

            DmNode.supported_features = ConfigStanza(
                mpls_te=False, mpls_oam=False, vrf=False)

            nxos_compiler.compile(DmNode)
            # TODO: make this work other way around

            if use_mgmt_interfaces:
                mgmt_int_id = "mgmt0"
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id

        staros_compiler = StarOsCompiler(self.nidb, self.anm)
        for phy_node in g_phy.routers(host=self.host, syntax='StarOS'):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.render.template = os.path.join("templates", "staros.mako")
            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_nxos()
            for interface in DmNode.physical_interfaces():
                if not interface.id:
                    interface.id = self.numeric_to_interface_label_star_os(
                        interface.numeric_id)

            staros_compiler.compile(DmNode)
            # TODO: make this work other way around

            if use_mgmt_interfaces:
                mgmt_int_id = "ethernet 1/1"
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id
Пример #42
0
#TODO: make sure is an abspath here so don't wipe user directory!!!
    if render_output_dir and not os.path.isdir(render_output_dir):
        try:
            os.makedirs(render_output_dir)
        except OSError, e:
            if e.strerror == "File exists":
                pass  # created by another process, safe to ignore
            else:
                raise e

    if render_template_file:
        try:
            render_template = TEMPLATE_LOOKUP.get_template(
                render_template_file)
        except SyntaxException, error:
            log.warning("Unable to render %s: "
                        "Syntax error in template: %s" % (node, error))
            return

        if node.render.dst_file:
            dst_file = os.path.join(render_output_dir, node.render.dst_file)
            with open(dst_file, 'wb') as dst_fh:
                try:
                    dst_fh.write(
                        render_template.render(
                            node=node,
                            version_banner=version_banner,
                            date=date,
                        ))
                except KeyError, error:
                    log.warning("Unable to render %s:"
                                " %s not set" % (node, error))
Пример #43
0
                % (e, infra_block))

    try:
        loopback_subnet = g_in.data.ipv4_loopback_subnet
        loopback_prefix = g_in.data.ipv4_loopback_prefix
        loopback_block = sn_preflen_to_network(loopback_subnet,
                                               loopback_prefix)
    except Exception, e:
        loopback_block = IPNetwork('%s/%s' %
                                   (ipv4_defaults["loopback_subnet"],
                                    ipv4_defaults["loopback_prefix"]))
        if loopback_subnet is None or loopback_prefix is None:
            log.debug('Using default IPv4 loopback_subnet %s' % loopback_block)
        else:
            log.warning(
                'Unable to obtain IPv4 loopback_subnet from input graph: %s, using default %s'
                % (e, loopback_block))

    try:
        vrf_loopback_subnet = g_in.data.ipv4_vrf_loopback_subnet
        vrf_loopback_prefix = g_in.data.ipv4_vrf_loopback_prefix
        vrf_loopback_block = sn_preflen_to_network(vrf_loopback_subnet,
                                                   vrf_loopback_prefix)
    except Exception, e:
        vrf_loopback_block = IPNetwork('%s/%s' %
                                       (ipv4_defaults["vrf_loopback_subnet"],
                                        ipv4_defaults["vrf_loopback_prefix"]))
        if vrf_loopback_subnet is None or vrf_loopback_prefix is None:
            log.debug('Using default IPv4 vrf_loopback_subnet %s' %
                      vrf_loopback_block)
        else:
Пример #44
0
    def compile(self):
        settings = autonetkit.config.settings
        to_memory = settings['Compiler']['Cisco']['to memory']
#TODO: need to copy across the interface name from edge to the interface
        g_phy = self.anm['phy']
        use_mgmt_interfaces = g_phy.data.mgmt_interfaces_enabled 
        if use_mgmt_interfaces:
            log.info("Allocating management interfaces for Cisco")
        else:
            log.info("Not allocating management interfaces for Cisco")

        log.info("Compiling Cisco for %s" % self.host)
        ios_compiler = IosClassicCompiler(self.nidb, self.anm)
        now = datetime.now()
        if settings['Compiler']['Cisco']['timestamp']:
            timestamp = now.strftime("%Y%m%d_%H%M%S_%f")
            dst_folder = os.path.join("rendered", self.host, timestamp, "cisco")
        else:
            dst_folder = os.path.join("rendered", self.host, "cisco")
# TODO: merge common router code, so end up with three loops: routers, ios
# routers, ios2 routers

        # store autonetkit_cisco version
        from pkg_resources import get_distribution
        ank_cisco_version = get_distribution("autonetkit_cisco").version

        for phy_node in g_phy.nodes('is_router', host=self.host):
            loopback_ids = self.loopback_interface_ids()
            # allocate loopbacks to routes (same for all ios variants)
            nidb_node = self.nidb.node(phy_node)
            nidb_node.ank_cisco_version = ank_cisco_version

            for interface in nidb_node.loopback_interfaces:
                if interface != nidb_node.loopback_zero:
                    interface.id = loopback_ids.next()

        for phy_node in g_phy.nodes('is_router', host=self.host, syntax='ios'):
            specified_int_names = phy_node.specified_int_names
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.template = os.path.join("templates","ios.mako")
            if to_memory:
                nidb_node.render.to_memory = True
            else:
                nidb_node.render.dst_folder = dst_folder
                nidb_node.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            if phy_node.device_subtype == "os":
                int_ids = self.interface_ids_ios()
            elif phy_node.device_subtype == "ra":
                int_ids = self.interface_ids_ra()
            else:
                # default if no subtype specified
                #TODO: need to set default in the load module
                log.warning("Unexpected subtype %s" % phy_node.device_subtype)
                int_ids = self.interface_ids_ios()
                
            if use_mgmt_interfaces:
                mgmt_int_id = int_ids.next()  # 0/0 is used for management ethernet

            for interface in nidb_node.physical_interfaces:
                if specified_int_names:
                    interface.id = phy_node.interface(interface).name
                # TODO: need to determine if interface name already specified
                else:
                    interface.id = int_ids.next()

            ios_compiler.compile(nidb_node)
            if use_mgmt_interfaces:
                mgmt_int = nidb_node.add_interface(management = True)
                mgmt_int.id = mgmt_int_id

        ios2_compiler = Ios2Compiler(self.nidb, self.anm)
        for phy_node in g_phy.nodes('is_router', host=self.host, syntax='ios2'):
            specified_int_names = phy_node.specified_int_names
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.template = os.path.join("templates","ios2","router.conf.mako")
            if to_memory:
                nidb_node.render.to_memory = True
            else:
                nidb_node.render.dst_folder = dst_folder
                nidb_node.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_ios2()
            for interface in nidb_node.physical_interfaces:
                if specified_int_names:
                    interface.id = phy_node.interface(interface).name
                # TODO: need to determine if interface name already specified
                else:
                    interface.id = int_ids.next()

            ios2_compiler.compile(nidb_node)

            if use_mgmt_interfaces:
                mgmt_int_id = "mgmteth0/0/CPU0/0"
                mgmt_int = nidb_node.add_interface(management = True)
                mgmt_int.id = mgmt_int_id

        nxos_compiler = NxOsCompiler(self.nidb, self.anm)
        for phy_node in g_phy.nodes('is_router', host=self.host, syntax='nx_os'):
            nidb_node = self.nidb.node(phy_node)
            nidb_node.render.template = os.path.join("templates","nx_os.mako")
            if to_memory:
                nidb_node.render.to_memory = True
            else:
                nidb_node.render.dst_folder = dst_folder
                nidb_node.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_nxos()
            for interface in nidb_node.physical_interfaces:
                if specified_int_names:
                    interface.id = phy_node.interface(interface).name
                # TODO: need to determine if interface name already specified
                else:
                    interface.id = int_ids.next()

            if use_mgmt_interfaces:
                mgmt_int_id = "mgmt0"
                mgmt_int = nidb_node.add_interface(management = True)
                mgmt_int.id = mgmt_int_id

            nxos_compiler.compile(nidb_node)

        other_nodes = [phy_node for phy_node in g_phy.nodes('is_router', host=self.host)
                       if phy_node.syntax not in ("ios", "ios2")]
        for node in other_nodes:
            #TODO: check why we need this
            phy_node = g_phy.node(node)
            nidb_node = self.nidb.node(phy_node)
            nidb_node.input_label = phy_node.id  # set specifically for now for other variants

# TODO: use more os.path.join for render folders
# TODO: Split compilers into seperate modules

        if use_mgmt_interfaces:
            self.assign_management_interfaces()
Пример #45
0
def build_ipv6(anm):
    """Builds IPv6 graph, using nodes and edges from IP graph"""
    import netaddr
    import autonetkit.plugins.ipv6 as ipv6

    # uses the nodes and edges from ipv4

    g_ipv6 = anm.add_overlay('ipv6')
    g_ip = anm['ip']
    g_in = anm['input']
    g_ipv6.add_nodes_from(g_ip, retain=['label', 'asn', 'broadcast_domain'])  # retain if collision domain or not
    g_ipv6.add_edges_from(g_ip.edges())

    #TODO: tidy up naming consitency of secondary_loopback_block and vrf_loopback_block
    (infra_block, loopback_block, secondary_loopback_block) = \
        extract_ipv6_blocks(anm)

    block_message = "IPv6 allocations: Infrastructure: %s, Loopback: %s" % (infra_block, loopback_block)
    if any(i for n in g_ip.nodes() for i in
     n.loopback_interfaces() if not i.is_loopback_zero):
        block_message += " Secondary Loopbacks: %s" % secondary_loopback_block
    log.info(block_message)

    # TODO: replace this with direct allocation to interfaces in ip alloc plugin
    allocated = sorted([n for n in g_ip if n['input'].loopback_v6])
    if len(allocated) == len(g_ip.l3devices()):
        # all allocated
        #TODO: need to infer subnetomanual_ipv6_loopback_allocation
        log.info("Using user-specified IPv6 loopback addresses")
        manual_ipv6_loopback_allocation(anm)
    else:
        if len(allocated):
            log.warning("Using automatic IPv6 loopback allocation. IPv6 loopback addresses specified on nodes %s will be ignored." % allocated)
        else:
            log.info("Automatically assigning IPv6 loopback addresses")

        ipv6.allocate_loopbacks(g_ipv6, loopback_block)

    l3_devices = [d for d in g_in if d.device_type in ('router', 'server')]

    manual_alloc_devices = set()
    for device in l3_devices:
        physical_interfaces = list(device.physical_interfaces())
        allocated = list(interface.ipv6_address for interface in physical_interfaces if interface.is_bound)
        if all(interface.ipv6_address for interface in
               physical_interfaces if interface.is_bound):
            manual_alloc_devices.add(device)  # add as a manual allocated device

    if manual_alloc_devices == set(l3_devices):
        log.info("Using user-specified IPv6 infrastructure addresses")
        manual_alloc_ipv6_infrastructure = True
    else:
        manual_alloc_ipv6_infrastructure = False
        # warn if any set
        allocated = []
        unallocated = []
        for node in l3_devices:
            allocated += sorted([i for i in node.physical_interfaces() if i.is_bound and i.ipv6_address])
            unallocated += sorted([i for i in node.physical_interfaces() if i.is_bound and not i.ipv6_address])

        #TODO: what if IP is set but not a prefix?
        if len(allocated):
            #TODO: if set is > 50% of nodes then list those that are NOT set
            log.warning("Using automatic IPv6 interface allocation. IPv6 interface addresses specified on interfaces %s will be ignored." % allocated)
        else:
            log.info("Automatically assigning IPv6 infrastructure addresses")

    if manual_alloc_ipv6_infrastructure:
        manual_ipv6_infrastructure_allocation(anm)
    else:
        ipv6.allocate_infra(g_ipv6, infra_block)
        #TODO: see if this is still needed or if can allocate direct from the ipv6 allocation plugin
        for node in g_ipv6.l3devices():
            for interface in node:
                edges = list(interface.edges())
                if len(edges):
                    edge = edges[0]  # first (only) edge
                    interface.ip_address = edge.ip  # TODO: make this consistent
                    interface.subnet = edge.dst.subnet  # from collision domain

    ipv6.allocate_vrf_loopbacks(g_ipv6, secondary_loopback_block)

    for node in g_ipv6.routers():
        #TODO: test this code
        node.loopback_zero.ip_address = node.loopback
        node.loopback_zero.subnet = netaddr.IPNetwork("%s/32" % node.loopback)
        for interface in node.loopback_interfaces():
            if not interface.is_loopback_zero:
                interface.ip_address = interface.loopback #TODO: fix this inconsistency elsewhere
Пример #46
0
def manual_ipv6_infrastructure_allocation(anm):
    """Applies manual IPv6 allocation"""

    import netaddr
    g_ipv6 = anm['ipv6']
    log.info('Using specified IPv6 infrastructure allocation')

    for node in g_ipv6.l3devices():
        for interface in node.physical_interfaces():
            if not interface['input'].is_bound:
                continue  # unbound interface
            ip_address = netaddr.IPAddress(interface['input'
                    ].ipv6_address)
            prefixlen = interface['input'].ipv6_prefixlen
            interface.ip_address = ip_address
            interface.prefixlen = prefixlen
            cidr_string = '%s/%s' % (ip_address, prefixlen)
            interface.subnet = netaddr.IPNetwork(cidr_string)

    broadcast_domains = [d for d in g_ipv6 if d.broadcast_domain]

    # TODO: allow this to work with specified ip_address/subnet as well as ip_address/prefixlen

    from netaddr import IPNetwork
    for coll_dom in broadcast_domains:
        connected_interfaces = [edge.dst_int for edge in
                                coll_dom.edges()]
        cd_subnets = [IPNetwork('%s/%s' % (i.subnet.network,
                      i.prefixlen)) for i in connected_interfaces]


        if len(cd_subnets) == 0:
            log.warning("Collision domain %s is not connected to any nodes" % coll_dom)
            continue

        try:
            assert len(set(cd_subnets)) == 1
        except AssertionError:
            mismatch_subnets = '; '.join('%s: %s/%s' % (i,
                    i.subnet.network, i.prefixlen) for i in
                    connected_interfaces)
            log.warning('Non matching subnets from collision domain %s: %s'
                         % (coll_dom, mismatch_subnets))
        else:
            coll_dom.subnet = cd_subnets[0]  # take first entry

        # apply to remote interfaces

        for edge in coll_dom.edges():
            edge.dst_int.subnet = coll_dom.subnet

    # also need to form aggregated IP blocks (used for e.g. routing prefix
    # advertisement)
    # import autonetkit
    # autonetkit.update_vis(anm)

    infra_blocks = {}
    for (asn, devices) in g_ipv6.groupby('asn').items():
        broadcast_domains = [d for d in devices if d.broadcast_domain]
        subnets = [cd.subnet for cd in broadcast_domains
        if cd.subnet is not None] # only if subnet is set
        infra_blocks[asn] = netaddr.cidr_merge(subnets)

    g_ipv6.data.infra_blocks = infra_blocks
Пример #47
0
        if infra_subnet is None or infra_prefix is None:
            log.debug('Using default IPv6 infra_subnet %s'% infra_block)
        else:
            log.warning('Unable to obtain IPv6 infra_subnet from input graph: %s, using default %s'% (e, infra_block))

    try:
        loopback_subnet = g_in.data.ipv6_loopback_subnet
        loopback_prefix = g_in.data.ipv6_loopback_prefix
        loopback_block = sn_preflen_to_network(loopback_subnet,
                loopback_prefix)
    except Exception, e:
        loopback_block = IPNetwork('%s/%s' % (ipv6_defaults["loopback_subnet"], ipv6_defaults["loopback_prefix"]))
        if loopback_subnet is None or loopback_prefix is None:
            log.debug('Using default IPv6 loopback_subnet %s'% loopback_block)
        else:
            log.warning('Unable to obtain IPv6 loopback_subnet from input graph: %s, using default %s'% (e, loopback_block))

    try:
        vrf_loopback_subnet = g_in.data.ipv6_vrf_loopback_subnet
        vrf_loopback_prefix = g_in.data.ipv6_vrf_loopback_prefix
        vrf_loopback_block = sn_preflen_to_network(vrf_loopback_subnet,
                vrf_loopback_prefix)
    except Exception, e:
        vrf_loopback_block = IPNetwork('%s/%s' % (ipv6_defaults["vrf_loopback_subnet"], ipv6_defaults["vrf_loopback_prefix"]))
        if vrf_loopback_subnet is None or vrf_loopback_prefix is None:
            log.debug('Using default IPv6 vrf_loopback_subnet %s'% vrf_loopback_block)
        else:
            log.warning('Unable to obtain IPv6 vrf_loopback_subnet from input graph: %s, using default %s'% (e, vrf_loopback_block))

    return (infra_block, loopback_block, vrf_loopback_block)
Пример #48
0
def validate_ipv4(anm):
    #TODO: make this generic to also handle IPv6
    if not anm.has_overlay("ipv4"):
        log.debug("No IPv4 overlay created, skipping ipv4 validation")
        return
    g_ipv4 = anm['ipv4']
    # interface IP uniqueness
    tests_passed = True

    #TODO: only include bound interfaces

    # check globally unique ip addresses
    all_ints = [i for n in g_ipv4.l3devices()
            for i in n.physical_interfaces()
            if i.is_bound] # don't include unbound interfaces
    all_int_ips = [i.ip_address for i in all_ints]

    if all_unique(all_int_ips):
        g_ipv4.log.debug("All interface IPs globally unique")
    else:
        tests_passed = False
        duplicates = duplicate_items(all_int_ips)
        duplicate_ips = set(duplicate_items(all_int_ips))
        duplicate_ints = [n for n in all_ints
                if n.ip_address in duplicate_ips]
        duplicates = ", ".join("%s: %s" % (i.node, i.ip_address)
            for i in duplicate_ints)
        g_ipv4.log.warning("Global duplicate IP addresses %s" % duplicates)

    for cd in g_ipv4.nodes("broadcast_domain"):
        cd.log.debug("Verifying subnet and interface IPs")
        neigh_ints = list(cd.neighbor_interfaces())
        neigh_ints = [i for i in neigh_ints if i.node.is_l3device()]
        neigh_int_subnets = [i.subnet for i in neigh_ints]
        if all_same(neigh_int_subnets):
            # log ok
            pass
        else:
            subnets = ", ".join("%s: %s" % (i.node, i.subnets)
                    for i in neigh_int_subnets)
            tests_passed = False
            log.warning("Different subnets on %s. %s" %
                    (cd, subnets))
            # log warning

        ip_subnet_mismatches = [i for i in neigh_ints
                    if i.ip_address not in i.subnet]
        if len(ip_subnet_mismatches):
            tests_passed = False
            mismatches = ", ".join("%s not in %s on %s" %
                    (i.ip_address, i.subnet, i.node)
                    for i in ip_subnet_mismatches)
            cd.log.warning("Mismatched IP subnets: %s" %
                    mismatches)
        else:
            cd.log.debug("All subnets match")

        neigh_int_ips = [i.ip_address for i in neigh_ints]
        if all_unique(neigh_int_ips):
            cd.log.debug("All interface IP addresses are unique")
            duplicates = duplicate_items(neigh_int_ips)
        else:
            tests_passed = False
            duplicate_ips = set(duplicate_items(neigh_int_ips))
            duplicate_ints = [n for n in neigh_ints
                    if n.ip_address in duplicate_ips]
            duplicates = ", ".join("%s: %s" % (i.node, i.ip_address)
                    for i in duplicate_ints)
            cd.log.warning("Duplicate IP addresses: %s" % duplicates)

    if tests_passed:
        g_ipv4.log.debug("All IP tests passed.")
    else:
        g_ipv4.log.warning("Some IP tests failed.")

    return tests_passed
Пример #49
0
    def add_edges_from(self,
                       ebunch,
                       bidirectional=False,
                       retain=None,
                       warn=True,
                       **kwargs):
        """Add edges. Unlike NetworkX, can only add an edge if both
        src and dst in graph already.
        If they are not, then they will not be added (silently ignored)


        Retains interface mappings if they are present (this is why ANK
            stores the interface reference on the edges, as it simplifies
            cross-layer access, as well as split, aggregate, etc retaining the
            interface bindings)_

        Bidirectional will add edge in both directions. Useful if going
        from an undirected graph to a
        directed, eg G_in to G_bgp
        #TODO: explain "retain" and ["retain"] logic

        if user wants to add from another overlay, first go g_x.edges()
        then add from the result

        allow (src, dst, ekey), (src, dst, ekey, data) for the ank utils
        """

        if not retain:
            retain = []
        try:
            retain.lower()
            retain = [retain]  # was a string, put into list
        except AttributeError:
            pass  # already a list

        if self.is_multigraph():
            #used_keys = self._graph.adj[u][v]
            from collections import defaultdict
            used_keys = defaultdict(dict)

        all_edges = []
        for in_edge in ebunch:
            """Edge could be one of:
            - NmEdge
            - (NmNode, NmNode)
            - (NmPort, NmPort)
            - (NmNode, NmPort)
            - (NmPort, NmNode)
            - (string, string)
            """
            # This is less efficient than nx add_edges_from, but cleaner logic
            # TODO: could put the interface data into retain?
            data = {'_ports': {}}  # to retain
            ekey = None  # default is None (nx auto-allocates next int)

            # convert input to a NmEdge
            src = dst = None
            if isinstance(in_edge, NmEdge):
                edge = in_edge  # simple case
                ekey = edge.ekey  # explictly set ekey
                src = edge.src.node_id
                dst = edge.dst.node_id

                # and copy retain data
                data = dict((key, edge.get(key)) for key in retain)
                ports = {
                    k: v
                    for k, v in edge.raw_interfaces.items() if k in self._graph
                }  # only if exists in this overlay
                # TODO: debug log if skipping a binding?
                data['_ports'] = ports

                # this is the only case where copy across data
                # but want to copy attributes for all cases

            elif len(in_edge) == 2:
                in_a, in_b = in_edge[0], in_edge[1]

                if isinstance(in_a, NmNode) and isinstance(in_b, NmNode):
                    src = in_a.node_id
                    dst = in_b.node_id

                elif isinstance(in_a, NmPort) and isinstance(in_b, NmPort):
                    src = in_a.node.node_id
                    dst = in_b.node.node_id
                    ports = {}
                    if src in self:
                        ports[src] = in_a.interface_id
                    if dst in self:
                        ports[dst] = in_b.interface_id
                    data['_ports'] = ports

                elif isinstance(in_a, NmNode) and isinstance(in_b, NmPort):
                    src = in_a.node_id
                    dst = in_b.node.node_id
                    ports = {}
                    if dst in self:
                        ports[dst] = in_b.interface_id
                    data['_ports'] = ports

                elif isinstance(in_a, NmPort) and isinstance(in_b, NmNode):
                    src = in_a.node.node_id
                    dst = in_b.node_id
                    ports = {}
                    if src in self:
                        ports[src] = in_a.interface_id
                    data['_ports'] = ports

                elif in_a in self and in_b in self:
                    src = in_a
                    dst = in_b

            elif len(in_edge) == 3:
                # (src, dst, ekey) format
                # or (src, dst, data) format
                in_a, in_b, in_c = in_edge[0], in_edge[1], in_edge[2]
                if in_a in self and in_b in self:
                    src = in_a
                    dst = in_b
                    # TODO: document the following logic
                    if self.is_multigraph() and not isinstance(in_c, dict):
                        ekey = in_c
                    else:
                        data = in_c

            elif len(in_edge) == 4:
                # (src, dst, ekey, data) format
                in_a, in_b = in_edge[0], in_edge[1]
                if in_a in self and in_b in self:
                    src = in_a
                    dst = in_b
                    ekey = in_edge[2]
                    data = in_edge[3]

            # TODO: if edge not set at this point, give error/warn

            # TODO: add check that edge.src and edge.dst exist
            if (src is None or dst is None) and warn:
                log.warning("Unsupported edge %s" % str(in_edge))
            if not (src in self and dst in self):
                if warn:
                    self.log.debug(
                        "Not adding edge %s, src/dst not in overlay" %
                        str(in_edge))
                continue

            # TODO: warn if not multigraph and edge already exists - don't
            # add/clobber
            #TODO: double check this logic + add test case
            data.update(**kwargs)
            if self.is_multigraph() and ekey is None:
                # specifically allocate a key
                if src in used_keys and dst in used_keys[src]:
                    pass  # already established
                else:
                    try:
                        used_keys[src][dst] = self._graph.adj[src][dst].keys()
                    except KeyError:
                        # no edges exist
                        used_keys[src][dst] = []

                # now have the keys mapping
                ekey = len(used_keys[src][dst])
                while ekey in used_keys[src][dst]:
                    ekey += 1

                used_keys[src][dst].append(ekey)

            edges_to_add = []
            if self.is_multigraph():
                edges_to_add.append((src, dst, ekey, dict(data)))
                if bidirectional:
                    edges_to_add.append((dst, src, ekey, dict(data)))
            else:
                edges_to_add.append((src, dst, dict(data)))
                if bidirectional:
                    edges_to_add.append((dst, src, dict(data)))

            #TODO: warn if not multigraph

            self._graph.add_edges_from(edges_to_add)
            all_edges += edges_to_add

        if self.is_multigraph():
            return [
                NmEdge(self.anm, self._overlay_id, src, dst, ekey)
                if ekey else NmEdge(self.anm, self._overlay_id, src,
                                    dst)  # default no ekey set
                for src, dst, ekey, _ in all_edges
            ]
        else:
            return [
                NmEdge(self.anm, self._overlay_id, src, dst)
                for src, dst, _ in all_edges
            ]
Пример #50
0
    def compile_devices(self):
        g_phy = self.anm['phy']

        to_memory, use_mgmt_interfaces, dst_folder = self._parameters()
        if use_mgmt_interfaces:
            log.info("Allocating VIRL management interfaces")
        else:
            log.info("Not allocating VIRL management interfaces")
#TODO: need to copy across the interface name from edge to the interface

# TODO: merge common router code, so end up with three loops: routers, ios
# routers, ios_xr routers

#TODO: Split out each device compiler into own function

#TODO: look for unused code paths here - especially for interface allocation

# store autonetkit_cisco version
        log.info("Generating device configurations")
        from pkg_resources import get_distribution

        # Copy across indices for external connectors (e.g may want to copy configs)
        external_connectors = [
            n for n in g_phy
            if n.host == self.host and n.device_type == "external_connector"
        ]
        for phy_node in external_connectors:
            DmNode = self.nidb.node(phy_node)
            DmNode.indices = phy_node.indices

        for phy_node in g_phy.l3devices(host=self.host):
            loopback_ids = self.loopback_interface_ids()
            # allocate loopbacks to routes (same for all ios variants)
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.indices = phy_node.indices

            for interface in DmNode.loopback_interfaces():
                if interface != DmNode.loopback_zero:
                    interface.id = loopback_ids.next()

            # numeric ids
            numeric_int_ids = self.numeric_interface_ids()
            for interface in DmNode.physical_interfaces():
                phy_numeric_id = phy_node.interface(interface).numeric_id
                if phy_numeric_id is None:
                    #TODO: remove numeric ID code
                    interface.numeric_id = numeric_int_ids.next()
                else:
                    interface.numeric_id = int(phy_numeric_id)

                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id

        #from autonetkit.compilers.device.ubuntu import UbuntuCompiler
        from autonetkit_cisco.compilers.device.ubuntu import UbuntuCompiler

        ubuntu_compiler = UbuntuCompiler(self.nidb, self.anm)
        for phy_node in g_phy.servers(host=self.host):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.add_stanza("ip")

            #TODO: look at server syntax also, same as for routers
            for interface in DmNode.physical_interfaces():
                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id

                #interface.id = self.numeric_to_interface_label_linux(interface.numeric_id)
                #print "numeric", interface.numeric_id, interface.id
                DmNode.ip.use_ipv4 = phy_node.use_ipv4
                DmNode.ip.use_ipv6 = phy_node.use_ipv6

                #TODO: clean up interface handling
            numeric_int_ids = self.numeric_interface_ids()
            for interface in DmNode.physical_interfaces():
                phy_int = phy_node.interface(interface)
                phy_numeric_id = phy_node.interface(interface).numeric_id
                if phy_numeric_id is None:
                    #TODO: remove numeric ID code
                    interface.numeric_id = numeric_int_ids.next()
                else:
                    interface.numeric_id = int(phy_numeric_id)

                phy_specified_id = phy_node.interface(interface).specified_id
                if phy_specified_id is not None:
                    interface.id = phy_specified_id

                #TODO: make this part of the base device compiler, which server/router inherits

            if use_mgmt_interfaces:  # not these are physical interfaces; configure after previous config steps
                mgmt_int = DmNode.add_interface(management=True,
                                                description="eth0")
                mgmt_int_id = "eth0"
                mgmt_int.id = mgmt_int_id

                # render route config
            DmNode = self.nidb.node(phy_node)
            ubuntu_compiler.compile(DmNode)

            if not phy_node.dont_configure_static_routing:
                DmNode.render.template = os.path.join("templates", "linux",
                                                      "static_route.mako")
                if to_memory:
                    DmNode.render.to_memory = True
                else:
                    DmNode.render.dst_folder = dst_folder
                    DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                        phy_node)

        #TODO: refactor out common logic

        ios_compiler = IosClassicCompiler(self.nidb, self.anm)
        host_routers = g_phy.routers(host=self.host)
        ios_nodes = (n for n in host_routers if n.syntax in ("ios", "ios_xe"))
        for phy_node in ios_nodes:
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.render.template = os.path.join("templates", "ios.mako")
            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            #TODO: write function that assigns interface number excluding those already taken

            # Assign interfaces
            if phy_node.device_subtype == "IOSv":
                int_ids = self.interface_ids_ios()
                numeric_to_interface_label = self.numeric_to_interface_label_ios
            elif phy_node.device_subtype == "CSR1000v":
                int_ids = self.interface_ids_csr1000v()
                numeric_to_interface_label = self.numeric_to_interface_label_ra
            else:
                # default if no subtype specified
                #TODO: need to set default in the load module
                log.warning("Unexpected subtype %s for %s" %
                            (phy_node.device_subtype, phy_node))
                int_ids = self.interface_ids_ios()
                numeric_to_interface_label = self.numeric_to_interface_label_ios

            if use_mgmt_interfaces:
                if phy_node.device_subtype == "IOSv":
                    #TODO: make these configured in the internal config file
                    # for platform/device_subtype keying
                    mgmt_int_id = "GigabitEthernet0/0"
                if phy_node.device_subtype == "CSR1000v":
                    mgmt_int_id = "GigabitEthernet1"

            for interface in DmNode.physical_interfaces():
                #TODO: use this code block once for all routers
                if not interface.id:
                    interface.id = numeric_to_interface_label(
                        interface.numeric_id)

            ios_compiler.compile(DmNode)
            if use_mgmt_interfaces:
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id

        try:
            from autonetkit_cisco.compilers.device.cisco import IosXrCompiler
            ios_xr_compiler = IosXrCompiler(self.nidb, self.anm)
        except ImportError:
            ios_xr_compiler = IosXrCompiler(self.nidb, self.anm)

        for phy_node in g_phy.routers(host=self.host, syntax='ios_xr'):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.render.template = os.path.join("templates", "ios_xr",
                                                  "router.conf.mako")
            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_ios_xr()
            for interface in DmNode.physical_interfaces():
                if not interface.id:
                    interface.id = self.numeric_to_interface_label_ios_xr(
                        interface.numeric_id)

            ios_xr_compiler.compile(DmNode)

            if use_mgmt_interfaces:
                mgmt_int_id = "mgmteth0/0/CPU0/0"
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id

        nxos_compiler = NxOsCompiler(self.nidb, self.anm)
        for phy_node in g_phy.routers(host=self.host, syntax='nx_os'):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.render.template = os.path.join("templates", "nx_os.mako")
            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_nxos()
            for interface in DmNode.physical_interfaces():
                if not interface.id:
                    interface.id = self.numeric_to_interface_label_nxos(
                        interface.numeric_id)

            DmNode.supported_features = ConfigStanza(mpls_te=False,
                                                     mpls_oam=False,
                                                     vrf=False)

            nxos_compiler.compile(DmNode)
            #TODO: make this work other way around

            if use_mgmt_interfaces:
                mgmt_int_id = "mgmt0"
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id

        staros_compiler = StarOsCompiler(self.nidb, self.anm)
        for phy_node in g_phy.routers(host=self.host, syntax='StarOS'):
            DmNode = self.nidb.node(phy_node)
            DmNode.add_stanza("render")
            DmNode.render.template = os.path.join("templates", "staros.mako")
            if to_memory:
                DmNode.render.to_memory = True
            else:
                DmNode.render.dst_folder = dst_folder
                DmNode.render.dst_file = "%s.conf" % naming.network_hostname(
                    phy_node)

            # Assign interfaces
            int_ids = self.interface_ids_nxos()
            for interface in DmNode.physical_interfaces():
                if not interface.id:
                    interface.id = self.numeric_to_interface_label_star_os(
                        interface.numeric_id)

            staros_compiler.compile(DmNode)
            #TODO: make this work other way around

            if use_mgmt_interfaces:
                mgmt_int_id = "ethernet 1/1"
                mgmt_int = DmNode.add_interface(management=True)
                mgmt_int.id = mgmt_int_id
Пример #51
0
"""Zmq based measurement server"""
# based on
# https://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/patterns/pushpull.html

# TODO: rewrite as callbacks rather than threads
import autonetkit.log as log

try:
    import zmq
except ImportError:
    log.warning("Unable to import zmq")
import json
import socket as python_socket
import telnetlib
from threading import Thread
import time
import sys


def streamer_device(port_in, port_out):
    from zmq.devices import ProcessDevice

    pd = ProcessDevice(zmq.QUEUE, zmq.PULL, zmq.PUSH)
    pd.bind_in('tcp://*:%s' % port_in)
    pd.bind_out('tcp://*:%s' % port_out)
    pd.setsockopt_in(zmq.IDENTITY, 'PULL')

    pd.setsockopt_out(zmq.IDENTITY, 'PUSH')
    pd.start()

Пример #52
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import time

import autonetkit.ank_messaging as ank_messaging
import autonetkit.config as config
import autonetkit.log as log

try:
    import Exscript
except ImportError:
    log.warning('Deployment requires Exscript: pip install https://github.com/knipknap/exscript/tarball/master'
                )


def deploy(
    host,
    username,
    dst_folder,
    key_filename=None,
    parallel_count=5,
    ):
    tar_file = package(dst_folder)
    transfer(host, username, tar_file, key_filename=key_filename)
    extract(
        host,
        username,
        tar_file,
        dst_folder,
        key_filename=key_filename,
Пример #53
0
 def __setitem__(self, key, value):
     if isinstance(value, dict):
         log.warning(
             "Adding dictionary %s: did you mean to add a config_stanza?" %
             key)
     self._odict[key] = value
Пример #54
0
def build_ibgp(anm):
    g_in = anm['input']
    g_bgp = anm['bgp']

    # TODO: build direct to ibgp graph - can construct combined bgp for vis
    #TODO: normalise input property

    ank_utils.copy_attr_from(g_in, g_bgp, "ibgp_role")
    ank_utils.copy_attr_from(g_in,
                             g_bgp,
                             "ibgp_l2_cluster",
                             "hrr_cluster",
                             default=None)
    ank_utils.copy_attr_from(g_in,
                             g_bgp,
                             "ibgp_l3_cluster",
                             "rr_cluster",
                             default=None)

    # TODO: add more detailed logging

    for n in g_bgp:
        # Tag with label to make logic clearer
        if n.ibgp_role is None:
            n.ibgp_role = "Peer"

            # TODO: if top-level, then don't mark as RRC

    ibgp_nodes = [n for n in g_bgp if not n.ibgp_role is "Disabled"]

    # Notify user of non-ibgp nodes
    non_ibgp_nodes = [n for n in g_bgp if n.ibgp_role is "Disabled"]
    if 0 < len(non_ibgp_nodes) < 10:
        log.info("Skipping iBGP for iBGP disabled nodes: %s", non_ibgp_nodes)
    elif len(non_ibgp_nodes) >= 10:
        log.info("Skipping iBGP for more than 10 iBGP disabled nodes:"
                 "refer to visualization for resulting topology.")

    # warn for any nodes that have RR set but no rr_cluster, or HRR set and no
    # hrr_cluster
    rr_mismatch = [
        n for n in ibgp_nodes if n.ibgp_role == "RR" and n.rr_cluster is None
    ]
    if len(rr_mismatch):
        log.warning(
            "Some routers are set as RR but have no rr_cluster: %s. Please specify an rr_cluster for peering."
            % ", ".join(str(n) for n in rr_mismatch))

    hrr_mismatch = [
        n for n in ibgp_nodes if n.ibgp_role == "HRR" and n.hrr_cluster is None
    ]
    if len(hrr_mismatch):
        log.warning(
            "Some routers are set as HRR but have no hrr_cluster: %s. Please specify an hrr_cluster for peering."
            % ", ".join(str(n) for n in hrr_mismatch))

    for _, asn_devices in ank_utils.groupby("asn", ibgp_nodes):
        asn_devices = list(asn_devices)

        # iBGP peer peers with
        peers = [n for n in asn_devices if n.ibgp_role == "Peer"]
        rrs = [n for n in asn_devices if n.ibgp_role == "RR"]
        hrrs = [n for n in asn_devices if n.ibgp_role == "HRR"]
        rrcs = [n for n in asn_devices if n.ibgp_role == "RRC"]

        over_links = []
        up_links = []
        down_links = []

        # 0. RRCs can only belong to either an rr_cluster or a hrr_cluster
        invalid_rrcs = [
            r for r in rrcs
            if r.rr_cluster is not None and r.hrr_cluster is not None
        ]
        if len(invalid_rrcs):
            message = ", ".join(str(r) for r in invalid_rrcs)
            log.warning(
                "RRCs can only have either a rr_cluster or hrr_cluster set. "
                "The following have both set, and only the rr_cluster will be used: %s",
                message)

        # TODO: do we also want to warn for RRCs with no cluster set? Do we also exclude these?
        # TODO: do we also want to warn for HRRs and RRs with no cluster set?
        # Do we also exclude these?

        # 1. Peers:
        # 1a. Peers connect over to peers
        over_links += [(s, t) for s in peers for t in peers]
        # 1b. Peers connect over to RRs
        over_links += [(s, t) for s in peers for t in rrs]

        # 2. RRs:
        # 2a. RRs connect over to Peers
        over_links += [(s, t) for s in rrs for t in peers]
        # 2b. RRs connect over to RRs
        over_links += [(s, t) for s in rrs for t in rrs]
        # 2c. RRs connect down to RRCs in same rr_cluster
        down_links += [(s, t) for s in rrs for t in rrcs
                       if s.rr_cluster == t.rr_cluster != None]
        # 2d. RRs connect down to HRRs in the same rr_cluster
        down_links += [(s, t) for s in rrs for t in hrrs
                       if s.rr_cluster == t.rr_cluster != None]

        # 3. HRRs
        # 3a. HRRs connect up to RRs in the same rr_cluster
        up_links += [(s, t) for s in hrrs for t in rrs
                     if s.rr_cluster == t.rr_cluster != None]
        # 3b. HRRs connect down to RRCs in same hrr_cluster (providing RRC has
        # no rr_cluster set)
        down_links += [
            (s, t) for s in hrrs for t in rrcs
            if s.hrr_cluster == t.hrr_cluster != None and t.rr_cluster is None
        ]

        # 4. RRCs
        # 4a. RRCs connect up to RRs in the same rr_cluster (regardless if RRC
        # has hrr_cluster set)
        up_links += [(s, t) for s in rrcs for t in rrs
                     if s.rr_cluster == t.rr_cluster != None]
        # 3b. RRCs connect up to HRRs in same hrr_cluster (providing RRC has no
        # rr_cluster set)
        up_links += [
            (s, t) for s in rrcs for t in hrrs
            if s.hrr_cluster == t.hrr_cluster != None and s.rr_cluster is None
        ]

        # Remove self-links
        over_links = [(s, t) for s, t in over_links if s != t]
        up_links = [(s, t) for s, t in up_links if s != t]
        down_links = [(s, t) for s, t in down_links if s != t]

        g_bgp.add_edges_from(over_links, type='ibgp', direction='over')
        g_bgp.add_edges_from(up_links, type='ibgp', direction='up')
        g_bgp.add_edges_from(down_links, type='ibgp', direction='down')
Пример #55
0
Файл: ank.py Проект: sk2/ANK-NG
def load_graphml(filename):
    import string
    import os
    """
    pickle_dir = os.getcwd() + os.sep + "cache"
    if not os.path.isdir(pickle_dir):
        os.mkdir(pickle_dir)

    path, file_with_ext = os.path.split(filename)
    file_name_only, extension = os.path.splitext(filename)

    pickle_file = "%s/%s.pickle" % (pickle_dir, file_name_only)
    if (os.path.isfile(pickle_file) and
        os.stat(filename).st_mtime < os.stat(pickle_file).st_mtime):
        # Pickle file exists, and source_file is older
        graph = nx.read_gpickle(pickle_file)
    else:
        # No pickle file, or is outdated
        try:
            graph = nx.read_graphml(filename)
        except IOError:
            print "Unable to read GraphML", filename
            return
        nx.write_gpickle(graph, pickle_file)
#TODO: node labels if not set, need to set from a sequence, ensure unique... etc
    """
    try:
        graph = nx.read_graphml(filename)
    except IOError:
        log.warning("Unable to read GraphML %s" % filename)
        return
    graph.graph['timestamp'] = os.stat(filename).st_mtime

    # remove selfloops
    graph.remove_edges_from(edge for edge in graph.selfloop_edges())

    letters_single = (c for c in string.lowercase)  # a, b, c, ... z
    letters_double = (
        "%s%s" % (a, b)
        for (a, b) in itertools.product(string.lowercase, string.lowercase)
    )  # aa, ab, ... zz
    letters = itertools.chain(
        letters_single, letters_double)  # a, b, c, .. z, aa, ab, ac, ... zz
    #TODO: need to get set of current labels, and only return if not in this set

    #TODO: add cloud, host, etc
    # prefixes for unlabelled devices, ie router -> r_a
    label_prefixes = {
        'router': 'r',
        'switch': 'sw',
        'server': 'se',
    }

    current_labels = set(graph.node[node].get("label")
                         for node in graph.nodes_iter())
    unique_label = (letter for letter in letters
                    if letter not in current_labels)

    # set our own defaults if not set
    #TODO: store these in config file
    ank_node_defaults = {'asn': 1, 'device_type': 'router'}
    node_defaults = graph.graph['node_default']
    for key, val in ank_node_defaults.items():
        if key not in node_defaults or node_defaults[key] == "None":
            node_defaults[key] = val

    for node in graph:
        for key, val in node_defaults.items():
            if key not in graph.node[node]:
                graph.node[node][key] = val

    # and ensure asn is integer, x and y are floats
    for node in graph:
        graph.node[node]['asn'] = int(graph.node[node]['asn'])
        try:
            x = float(graph.node[node]['x'])
        except KeyError:
            x = 0
        graph.node[node]['x'] = x
        try:
            y = float(graph.node[node]['y'])
        except KeyError:
            y = 0
        graph.node[node]['y'] = y
        try:
            graph.node[node]['label']
        except KeyError:
            device_type = graph.node[node]['device_type']
            graph.node[node]['label'] = "%s_%s" % (label_prefixes[device_type],
                                                   unique_label.next())

    ank_edge_defaults = {
        'type': 'physical',
    }
    edge_defaults = graph.graph['edge_default']
    for key, val in ank_edge_defaults.items():
        if key not in edge_defaults or edge_defaults[key] == "None":
            edge_defaults[key] = val

    for src, dst in graph.edges():
        for key, val in edge_defaults.items():
            if key not in graph[src][dst]:
                graph[src][dst][key] = val

    # allocate edge_ids
    for src, dst in graph.edges():
        graph[src][dst]['edge_id'] = "%s_%s" % (graph.node[src]['label'],
                                                graph.node[dst]['label'])


# apply defaults
# relabel nodes
#other handling... split this into seperate module!
# relabel based on label: assume unique by now!
    mapping = dict((n, d['label']) for n, d in graph.nodes(data=True))
    nx.relabel_nodes(graph, mapping, copy=False)
    return graph
Пример #56
0
def validate_ipv4(anm):
    #TODO: make this generic to also handle IPv6
    g_ipv4 = anm['ipv4']
    # interface IP uniqueness
    tests_passed = True

    # check globally unique ip addresses
    all_ints = [
        i for n in g_ipv4.nodes("is_l3device") for i in n.physical_interfaces
    ]
    all_int_ips = [i.ip_address for i in all_ints]

    if all_unique(all_int_ips):
        log.debug("All interface IPs globally unique")
    else:
        tests_passed = False
        duplicates = duplicate_items(all_int_ips)
        duplicate_ips = set(duplicate_items(all_int_ips))
        duplicate_ints = [n for n in all_ints if n.ip_address in duplicate_ips]
        duplicates = ", ".join("%s: %s" % (i.node, i.ip_address)
                               for i in duplicate_ints)
        log.warning("Global duplicate IP addresses %s" % duplicates)

    for cd in g_ipv4.nodes("collision_domain"):
        log.debug("Verifying subnet and interface IPs for %s" % cd)
        neigh_ints = list(cd.neighbor_interfaces())
        neigh_int_subnets = [i.subnet for i in neigh_ints]
        if all_same(neigh_int_subnets):
            # log ok
            pass
        else:
            subnets = ", ".join("%s: %s" % (i.node, i.subnets)
                                for i in neigh_int_subnets)
            tests_passed = False
            log.warning("Different subnets on %s. %s" % (cd, subnets))
            # log warning

        ip_subnet_mismatches = [
            i for i in neigh_ints if i.ip_address not in i.subnet
        ]
        if len(ip_subnet_mismatches):
            tests_passed = False
            mismatches = ", ".join("%s not in %s on %s" %
                                   (i.ip_address, i.subnet, i.node)
                                   for i in ip_subnet_mismatches)
            log.warning("Mismatched IP subnets for %s: %s" % (cd, mismatches))
        else:
            log.debug("All subnets match for %s" % cd)

        neigh_int_ips = [i.ip_address for i in neigh_ints]
        if all_unique(neigh_int_ips):
            log.debug("All interface IP addresses unique for %s" % cd)
            duplicates = duplicate_items(neigh_int_ips)
        else:
            tests_passed = False
            duplicate_ips = set(duplicate_items(neigh_int_ips))
            duplicate_ints = [
                n for n in neigh_ints if n.ip_address in duplicate_ips
            ]
            duplicates = ", ".join("%s: %s" % (i.node, i.ip_address)
                                   for i in duplicate_ints)
            log.warning("Duplicate IP addresses on %s. %s" % (cd, duplicates))

    if tests_passed:
        log.info("All IPv4 tests passed.")
    else:
        log.warning("Some IPv4 tests failed.")

    return tests_passed
Пример #57
0
def jsonify_anm_with_graphics(anm, nidb=None):
    """ Returns a dictionary of json-ified overlay graphs, with graphics data appended to each overlay"""
    from collections import defaultdict
    import math
    anm_json = {}
    test_anm_data = {}
    graphics_graph = anm["graphics"]._graph.copy()
    phy_graph = anm["phy"]._graph  # to access ASNs
    """simple layout of deps - more advanced layout could
    export to dot and import to omnigraffle, etc
    """
    g_deps = anm['_dependencies']
    nm_graph = g_deps._graph
    # build tree
    layers = defaultdict(list)
    nodes_by_layer = {}
    if len(nm_graph) > 0:
        topo_sort = nx.topological_sort(nm_graph)
        # trim out any nodes with no sucessors

        tree_root = topo_sort[0]
        # Note: topo_sort ensures that once reach node, would have reached its predecessors
        # start at first element after root
        for node in topo_sort:
            preds = nm_graph.predecessors(node)
            if len(preds):
                pred_level = max(nm_graph.node[p].get('level') for p in preds)
            else:
                # a root node
                pred_level = -1  # this node becomes level 0
            level = pred_level + 1
            nm_graph.node[node]['level'] = level
            layers[level].append(node)

            data = nm_graph.node[node]
            data['y'] = 100 * data['level']
            data['device_type'] = "ank_internal"

        MIDPOINT = 50  # assign either side of
        for layer, nodes in layers.items():
            # TODO: since sort is stable, first sort by parent x (avoids
            # zig-zags)
            nodes = sorted(nodes,
                           reverse=True,
                           key=lambda x: nm_graph.degree(x))
            for index, node in enumerate(nodes):
                # TODO: work out why weird offset due to the math.pow *
                #node_x = MIDPOINT  + 125*index * math.pow(-1, index)
                node_x = MIDPOINT + 125 * index
                nm_graph.node[node]['x'] = node_x
                nodes_by_layer[node] = layer

    import random
    attribute_cache = defaultdict(dict)
    # the attributes to copy
    # TODO: check behaviour for None if explicitly set
    # TODO: need to check if attribute is set in overlay..... using API
    copy_attrs = ["x", "y", "asn", "label", "device_type", "device_subtype"]
    for node, in_data in phy_graph.nodes(data=True):
        out_data = {
            key: in_data.get(key)
            for key in copy_attrs if key in in_data
        }
        attribute_cache[node].update(out_data)

    # Update for graphics (over-rides phy)
    for node, in_data in graphics_graph.nodes(data=True):
        out_data = {
            key: in_data.get(key)
            for key in copy_attrs if key in in_data
        }
        attribute_cache[node].update(out_data)

        # append label from function
        for node in anm['phy']:
            attribute_cache[node.id]['label'] = str(node)

    overlay_ids = sorted(anm.overlays(),
                         key=lambda x: nodes_by_layer.get(x, 0))

    for overlay_id in overlay_ids:
        try:
            #make a shallow copy
            # input_graph = anm[overlay_id]._graph
            # nm_graph = shallow_copy_nx_graph(input_graph)
            nm_graph = anm[overlay_id]._graph.copy()
        except Exception, e:
            log.warning("Unable to copy overlay %s: %s", overlay_id, e)
            continue

        if overlay_id == "_dependencies":
            # convert to undirected for visual clarify
            nm_graph = nx.Graph(nm_graph)

        for node in nm_graph:
            node_data = dict(attribute_cache.get(node, {}))
            # update with node data from this overlay
            # TODO: check is not None won't clobber specifically set in
            # overlay...
            graph_node_data = nm_graph.node[node]
            overlay_node_data = {
                key: graph_node_data.get(key)
                for key in graph_node_data
            }
            node_data.update(overlay_node_data)

            # check for any non-set properties
            if node_data.get("x") is None:
                new_x = random.randint(0, 800)
                node_data['x'] = new_x
                # store for other graphs to use
                log.debug("Allocated random x %s to node %s in overlay %s" %
                          (new_x, node, overlay_id))
                attribute_cache[node]['x'] = new_x
            else:
                # cache for next time, such as vswitch in l2 for l2_bc
                attribute_cache[node]['x'] = node_data['x']
            if node_data.get("y") is None:
                new_y = random.randint(0, 800)
                node_data['y'] = new_y
                # store for other graphs to use
                attribute_cache[node]['y'] = new_y
                log.debug("Allocated random y %s to node %s in overlay %s" %
                          (new_y, node, overlay_id))
            else:
                attribute_cache[node]['y'] = node_data['y']

            # TODO: may want to re-introduce graphics to store cross-layer data for virtual nodes
            # and cache device type and device subtype
            # TODO: catch for each, if node not in cache
            try:
                attribute_cache[node]['device_type'] = node_data['device_type']
            except KeyError:
                pass  # not set
            try:
                attribute_cache[node]['device_subtype'] = node_data[
                    'device_subtype']
            except KeyError:
                pass  # not set

            if node_data.get("label") == node:
                # try from cache
                node_data['label'] = attribute_cache.get(node, {}).get("label")
            if node_data.get("label") is None:
                node_data['label'] = str(node)  # don't need to cache

            # store on graph
            nm_graph.node[node] = node_data

            try:
                del nm_graph.node[node]['id']
            except KeyError:
                pass

            if nidb:
                nidb_graph = nidb.raw_graph()
                if node in nidb:
                    DmNode_data = nidb_graph.node[node]
                    try:
                        # TODO: check why not all nodes have _ports initialised
                        overlay_interfaces = nm_graph.node[node]["_ports"]
                    except KeyError:
                        continue  # skip copying interface data for this node

                    for interface_id in overlay_interfaces.keys():
                        # TODO: use raw_interfaces here
                        try:
                            nidb_interface_id = DmNode_data['_ports'][
                                interface_id]['id']
                        except KeyError:
                            # TODO: check why arrive here - something not
                            # initialised?
                            continue
                        nm_graph.node[node]['_ports'][interface_id][
                            'id'] = nidb_interface_id
                        id_brief = shortened_interface(nidb_interface_id)
                        nm_graph.node[node]['_ports'][interface_id][
                            'id_brief'] = id_brief

        anm_json[overlay_id] = ank_json_dumps(nm_graph)
        test_anm_data[overlay_id] = nm_graph
Пример #58
0
        graph.node[node]['label'] = 'none___%s' % index

    duplicates = [(k, v) for (k, v) in label_counts.items()
                  if k and len(v) > 1]
    for (label, nodes) in duplicates:
        for node in nodes:

            # TODO: need to check they don't all have same ASN... if so then warn

            try:
                graph.node[node]['label'] = '%s_%s' \
                    % (graph.node[node]['label'], graph.node[node]['asn'
                       ])
            except KeyError:
                log.warning(
                    'Unable to set new label for duplicate node %s: %s' %
                    (node, graph.node[node].get('label')))

    boolean_attributes = set(k for (n, d) in graph.nodes(data=True)
                             for (k, v) in d.items() if isinstance(v, bool))

    for node in graph:
        for attr in boolean_attributes:
            if attr not in graph.node[node]:
                graph.node[node][attr] = False

    boolean_attributes = set(k for (n1, d1) in graph.edge.items()
                             for (n2, d2) in d1.items()
                             for (k, v) in d2.items() if isinstance(v, bool))
    for (n1, d1) in graph.edge.items():
        for (n2, d2) in d1.items():
Пример #59
0
def get_uuid(anm):
    try:
        return config.settings['Http Post']['uuid']
    except KeyError:
        log.warning('UUID not set, returning singleuser uuid')
        return 'singleuser'
Пример #60
0
def send(nidb, command, hosts, server="measure_client", threads=3):
    # netaddr IP addresses not JSON serializable
    hosts = [str(h) for h in hosts]
    log.debug("Measure: %s %s" % (hosts, command))

    pika_host = config.settings['Rabbitmq']['server']

    messaging = ank_messaging.AnkMessaging()

    try:
        connection = pika.BlockingConnection(
            pika.ConnectionParameters(host=pika_host))
        channel = connection.channel()

        channel.exchange_declare(exchange='measure', type='direct')
    except pika.exceptions.AMQPConnectionError:
        log.warning(
            "Unable to connect to RabbitMQ on %s, exiting measurement" %
            pika_host)
        return

    data = {
        'command': command,
        "hosts": hosts,
        "threads": threads,
    }

    body = json.dumps(data)
    channel.basic_publish(exchange='measure', routing_key=server, body=body)
    #connection.close()

    hosts_received = set(hosts)

    # parsing function mappings
    parsing = {
        'vtysh -c "show ip route"': process_data.sh_ip_route,
        "traceroute": process_data.traceroute,
    }

    parse_result = []

    # wait for responses
    result = channel.queue_declare(exclusive=True)
    queue_name = result.method.queue
    channel.queue_bind(exchange='measure',
                       queue=queue_name,
                       routing_key="result")

    for method_frame, properties, body in channel.consume(queue_name):
        data = json.loads(body)
        completed = False
        for host, host_data in data.items():
            for command, command_result in host_data.items():
                command_result = command_result.replace("\\r\\n", "\n")
                if command in parsing:
                    log.info("%s %s" % (host, command))
                    parse_command = parsing[command]
                    host = process_data.reverse_tap_lookup(nidb, host)
                    parse_result = parse_command(host, nidb, command_result)
                    completed = True

                elif "traceroute" in command:
                    dst = command.split()[-1]  # last argument is the dst ip
                    src_host = process_data.reverse_tap_lookup(nidb, host)
                    dst_host = process_data.reverse_lookup(nidb, dst)
                    log.info("Trace from %s to %s" % (src_host, dst_host[1]))
                    parse_command = parsing["traceroute"]
                    log.info(command_result)
                    trace_result = parse_command(src_host, nidb,
                                                 command_result)
                    trace_result.insert(0, src_host)
                    log.info(trace_result)
                    parse_result.append(trace_result)
                    if str(trace_result[-1]) == str(
                            dst_host[1]
                    ):  #TODO: fix so direct comparison, not string, either here or in anm object comparison: eg compare on label?
                        #TODO: make this use custom ANK serializer function
                        trace_result = [str(t.id) for t in trace_result
                                        if t]  # make serializable
                        import autonetkit.ank_messaging
                        autonetkit.ank_messaging.highlight(
                            [], [], paths=[trace_result])
                    else:
                        log.info("Partial trace, not sending to webserver: %s",
                                 trace_result)
                elif "show ip ospf interface" in command:
                    print command_result
                    completed = True
                elif "conf t" in command:
                    print command_result
                    completed = True
                else:
                    print "No parser defined for command %s" % command
                    print "Raw output:"
                    print command_result
                    completed = True

        if host in hosts_received:
            hosts_received.remove(host)  # remove from list of waiting hosts

        if not len(hosts_received):
            completed = True

        if completed:
            break

    return parse_result