def create_ixp_radix( basedata ):
   ixp_radix = Radix()
   for ixp_name,ixp_entry in basedata['ixps'].iteritems():
      for prefix in ixp_entry['peeringlans']:
         node = ixp_radix.add( prefix )
         node.data['name'] = ixp_name 
   return ixp_radix
Exemple #2
0
def create_ixp_radix(conf):
    ixp_radix = Radix()
    for ixp in conf['ixps']:
        for prefix in ixp['peeringlans']:
            node = ixp_radix.add(prefix)
            node.data['name'] = ixp['name']
    return ixp_radix
Exemple #3
0
def has_announced_bigger(asn_stable_pfx, pfx):
    rad_tree = Radix()
    rad_tree.add(pfx)
    for p in asn_stable_pfx:
        if rad_tree.search_covered(p):
            return True
    return False
def create_ixp_radix( conf ):
   ixp_radix = Radix()
   for ixp in conf['ixps']:
      for prefix in ixp['peeringlans']:
         node = ixp_radix.add( prefix )
         node.data['name'] = ixp['name']
   return ixp_radix
Exemple #5
0
 def radix_tree(self) -> Radix:
     """Return a radix tree (from the py-radix library) for fast IP-IX lookups."""
     rtree = Radix()
     for obj in self.objects:
         for prefix in obj.prefixes:
             rnode = rtree.add(prefix.prefix)
             rnode.data["ix"] = obj.ix
     return rtree
def load_bgp_file(ymd):
    bgp_fn = c.bgp_fn(ymd)  # Find suitable BGP file
    print("bgp_fn = %s" % bgp_fn)
    bfa = bgp_fn.split('.')
    print("bfa = %s" % bfa)
    bfa_files = glob.glob(bfa[0] + '*')
    print("bfa_files %s" % bfa_files)

    f_hours = 0
    for fn in bfa_files:
        print("  %s" % fn)
        if fn.index(bfa[0]) == 0:
            print("    date matches")
            fna = fn.split('.')
            print("fna %s" % fna)
            if bfa[1] == fna[1]:
                print("    exact match - use it")
                break
            else:
                print("    Different number of hours, OK")
                bgp_fn = fn
    print("    using file %s <<<" % bgp_fn)
    rq_date_s = c.start_ymd
    n = 0
    rtree = Radix()  # Load the BGP file into rtree
    with gzip.open(bgp_fn, 'r') as zif:
        tb_n = 0
        tb = None
        for ln in zif:
            n += 1
            line = ln.decode("utf-8", "replace")
            la = line.strip().split()
            if len(la) != 2:  # BGP record had no Origin AS !
                print("line len != 2 >%s<" % la)
            else:
                pfx = str(la[0])
                origin = str(la[1])

            try:
                rnode = rtree.search_exact(pfx)
                if rnode:
                    rnode.data['origin'].add(origin)
                else:
                    rnode = rtree.add(pfx)
                    rnode.data['origin'] = set([origin])
            except:
                print("search_exact failed for pfx = %s" % pfx)
            if n % 20000 == 0:
                print(".", end='')
                sys.stdout.flush()

    sys.stderr.write("finished loading BGP data\n")
    sys.stderr.write("Loaded %d BGP lines" % n)
    return rtree
Exemple #7
0
    def __init__(self):
        """Create a BGP protocol.
        """
        self.fsm = None
        self.peer_id = None

        self.disconnected = False
        self._receive_buffer = b''
        self.fourbytesas = False
        self.add_path_ipv4_receive = False
        self.add_path_ipv4_send = False
        self.adj_rib_in = {k: {} for k in CONF.bgp.afi_safi}
        self.adj_rib_out = {k: {} for k in CONF.bgp.afi_safi}
        self.adj_rib_in_ipv4_tree = Radix()

        # statistic
        self.msg_sent_stat = {
            'Opens': 0,
            'Notifications': 0,
            'Updates': 0,
            'Keepalives': 0,
            'RouteRefresh': 0
        }
        self.msg_recv_stat = {
            'Opens': 0,
            'Notifications': 0,
            'Updates': 0,
            'Keepalives': 0,
            'RouteRefresh': 0
        }
        self.send_version = {
            'ipv4': 0,
            'flowspec': 0,
            'sr_policy': 0,
            'mpls_vpn': 0
        }
        self.receive_version = {
            'ipv4': 0,
            'flowspec': 0,
            'sr_policy': 0,
            'mpls_vpn': 0
        }

        self.flowspec_send_dict = {}
        self.flowspec_receive_dict = {}
        self.sr_send_dict = {}
        self.sr_receive_dict = {}
        self.mpls_vpn_send_dict = {}
        self.mpls_vpn_receive_dict = {}
    def __init__(me, fpath_geoip, fpath_known_networks):

        # load known networks into radix tree
        me.rt = Radix()
        with open(fpath_known_networks, 'rb') as known_net_file:
            known_networks = json.load(known_net_file)
            for p in known_networks:
                # Only keep prefixes that we know their country
                if 'country' in p:
                    n = me.rt.add(p['net'])
                    n.data['cc'] = p['country']

        # load geoIP data
        me.gi = GeoIP.open(fpath_geoip, GeoIP.GEOIP_STANDARD)
Exemple #9
0
def init_result():
    '''
   initialises the structure that hold data for collector peers to be compared
   '''
    r = {}
    for who in (1, 2):
        r[who] = {}
        r[who]['radix'] = Radix()  # holds the radix trees for both
        r[who]['path_len_cnt'] = Counter()  # path length counter
        r[who]['path_asn_cnt'] = Counter(
        )  # number of ASNs counter (different from path length because of prepending
        r[who][
            'asn_xpending'] = 0  # covers inpending, prepending (ie. where path_len != asn_count
    return r
class Delegated:

    # for radix tree data structure

    # method to instantiate a new instance of a radix trie and an ASN dict to play in
    def __init__(self):
        # initialize a radix tree
        self.rtree = Radix()
        # initialize an asn dict
        self.asn = {}

    # private function which consumes a generator
    # not very private Method to add one ASN to the dict
    def _asnfunc(self, item):
        self.asn[item['prefix']] = item

    # not very private Method to add one ASN to the dict
    def _ipfunc(self, item):
        # save in the radix tree under 'prefix/len' for the value
        key = '%s/%s' % (item['prefix'], item['len'])

        try:
            # set the radix instances up keyed on prefix/len.
            rn = self.rtree.add(key)
        except Exception as e:
            sys.stderr.write("Delegated.delegatedRadix().rtree.add(%s): %s\n" %
                             (key, str(e)))
            sys.exit(1)

        # dict copy because radix trie points to magic data ???????
        for k, v in item.items():
            rn.data[k] = v
        # and add a counter
        rn.data['cnt'] = 0

    # Method which just mutates its self to set value into .rtree and .asn
    def delegatedRadix(self, fd):
        '''
        given a file, read it as a delegated file into the radix tree.
        '''

        import sys

        _delegatedSwitcher(
            self._asnfunc, self._ipfunc,
            _delegatedFieldSelector([
                'type', 'prefix', 'len', 'cc', 'orgid', 'date', 'rir',
                'source', 'status'
            ], _readDelegated(fd)))
Exemple #11
0
    def __init__(self):

        # Patricia trie for reserved prefixes ipv4
        self.__reserved_tree_ipv4 = Radix()
        self.__reserved_tree_ipv4.add("0.0.0.0/8")
        self.__reserved_tree_ipv4.add("1.1.1.0/24")
        self.__reserved_tree_ipv4.add("10.0.0.0/8")
        self.__reserved_tree_ipv4.add("100.64.0.0/10")
        self.__reserved_tree_ipv4.add("127.0.0.0/8")
        self.__reserved_tree_ipv4.add("169.254.0.0/16")
        self.__reserved_tree_ipv4.add("172.16.0.0/12")
        self.__reserved_tree_ipv4.add("192.0.0.0/24")
        self.__reserved_tree_ipv4.add("192.0.2.0/24")
        self.__reserved_tree_ipv4.add("192.88.99.0/24")
        self.__reserved_tree_ipv4.add("192.168.0.0/16")
        self.__reserved_tree_ipv4.add("198.18.0.0/15")
        self.__reserved_tree_ipv4.add("198.51.100.0/24")
        self.__reserved_tree_ipv4.add("203.0.113.0/24")
        self.__reserved_tree_ipv4.add("224.0.0.0/4")
        self.__reserved_tree_ipv4.add("240.0.0.0/4")
        self.__reserved_tree_ipv4.add("255.255.255.255/32")

        # routable address space
        self.__routable_tree_ipv4 = Radix()
class AdjRibPostPolicy(RIB):
    def __init__(self):
        self.radixRib = Radix()
        self.policy = PolicyHandler()

    def process_adjInRib(self, node_hash, redisClient):
        # Got an event for the node, obtain the current RIB from redis based off node hash
        adjInRib = ast.literal_eval(redisClient.hget(node_hash, 'adjInRib'))

        if adjInRib:

            for route in adjInRib:
                route = self.policy.process_route(route)
                ribnode = self.radixRib.search_exact(str(route['prefix']))

                if ribnode is None:
                    # Particular prefix/prefix-len does not exist in tree, create the node
                    ribnode = self.radixRib.add(str(route['prefix']))

                    # Create a dictionary with only the path information and path hash as the key
                    for key in ['prefix', 'prefix_len']:
                        del route[key]

                    ribnode.data.update(route['paths'])

                else:
                    #Particular prefix/prefix-len already exists in tree
                    # Create a dictionary with only the path information and path hash as the key
                    for key in ['prefix', 'prefix_len']:
                        del route[key]

                    # At this stage actions aren't going reach this RIB. So empty out the path list before you
                    # update it.

                    ribnode.data.clear()
                    ribnode.data.update(route['paths'])
Exemple #13
0
def test_fill_ro_struct():
    file = os.path.join(PATH, "conflict_annotation", "inputs", "ro_file")
    rad_tree = Radix()
    fill_ro_struct(file, rad_tree)
    res = list()
    for node in rad_tree:
        res.append((node.prefix, node.data))
    assert res == [('60.136.0.0/16', {
        17676: {'jpnic'}
    }), ('60.145.0.0/16', {
        17676: {'jpnic'}
    }), ('60.145.0.0/24', {
        17676: {'jpnic'}
    }), ('192.0.0.0/16', {
        17: {'jpnic'}
    })]
Exemple #14
0
def test_fill_roa_struct():
    file = os.path.join(PATH, "conflict_annotation", "inputs", "roa_file")
    rad_tree = Radix()
    fill_roa_struct(file, rad_tree)
    res = list()
    for node in rad_tree:
        res.append((node.prefix, node.data))
    assert res == [('86.63.224.0/19', {
        35238: 19
    }), ('91.91.0.0/16', {
        35238: 16
    }), ('192.113.0.0/16', {
        16074: 16
    }), ('194.209.159.0/24', {
        16079: 32
    }), ('212.234.194.0/24', {
        16071: 28,
        16072: 24
    })]
Exemple #15
0
def test_annotate_if_valid_both():
    file = os.path.join(PATH, "conflict_annotation", "inputs", "ro_file")
    ro_rad_tree = Radix()
    fill_ro_struct(file, ro_rad_tree)
    input_dict = {
        "timestamp": 1445817600.0,
        "collector": "rrc01",
        "peer_as": 13030,
        "peer_ip": "195.66.224.175",
        "type": "F",
        "announce": {
            "prefix": "60.145.0.0/28",
            "asn": 17676,
            "as_path": "13030 3491 4651 9737 23969"
        },
        "conflict_with": {
            "prefix": "192.0.0.0",
            "asn": 17
        },
        "asn": 17676
    }
    expected = {
        "timestamp": 1445817600.0,
        "collector": "rrc01",
        "peer_as": 13030,
        "peer_ip": "195.66.224.175",
        "type": "F",
        "announce": {
            "prefix": "60.145.0.0/28",
            "asn": 17676,
            "valid": ['jpnic'],
            "as_path": "13030 3491 4651 9737 23969"
        },
        "conflict_with": {
            "prefix": "192.0.0.0",
            "asn": 17,
            "valid": ['jpnic']
        },
        "asn": 17676
    }
    annotate_if_route_objects(ro_rad_tree, input_dict)
    assert input_dict == expected
Exemple #16
0
def test_annotate_if_valid_rpki_ok1():
    file = os.path.join(PATH, "conflict_annotation", "inputs", "roa_file")
    rpki_rad_tree = Radix()
    fill_roa_struct(file, rpki_rad_tree)
    input_dict = {
        "timestamp": 1445817600.0,
        "collector": "rrc01",
        "peer_as": 13030,
        "peer_ip": "195.66.224.175",
        "type": "F",
        "announce": {
            "prefix": "212.234.194.0/24",
            "asn": 16071,
            "as_path": "13030 3491 4651 9737 23969"
        },
        "conflict_with": {
            "prefix": "212.234.194.0/24",
            "asn": 16072
        },
        "asn": 16072
    }
    expected = {
        "timestamp": 1445817600.0,
        "collector": "rrc01",
        "peer_as": 13030,
        "peer_ip": "195.66.224.175",
        "type": "F",
        "announce": {
            "prefix": "212.234.194.0/24",
            "asn": 16071,
            "valid": ["roa"],
            "as_path": "13030 3491 4651 9737 23969"
        },
        "conflict_with": {
            "prefix": "212.234.194.0/24",
            "asn": 16072,
            "valid": ["roa"]
        },
        "asn": 16072
    }
    annotate_if_roa(rpki_rad_tree, input_dict)
    assert input_dict == expected
Exemple #17
0
    def _load_banned_networks(self, list_file):
        self._banned_networks = None
        if not list_file:
            return

        try:
            from radix import Radix
        except ImportError:
            self.log.warning(
                "py-radix is not installed, can't ban networks",
            )
            return

        try:
            with open(list_file, "r") as f:
                self._banned_networks = Radix()
                networks = 0
                for l in f:
                    l = l.strip()
                    if not l or l.startswith("#"):
                        continue

                    try:
                        self._banned_networks.add(l)
                    except (TypeError, ValueError) as e:
                        self.log.warning(
                            "Invalid banned network: %s", l,
                        )
                    else:
                        networks += 1

            self.log.info("Loaded %s banned networks", networks)
        except Exception as e:
            self.log.warning(
                "Failed to load banned network list: %s", e,
            )
 def __init__(self):
     self.radixRib = Radix()
Exemple #19
0
    print_naked_characteristics(r, missing1_naked, missing2_naked)
    print_path_len_stats(r, len(pfxset1), len(pfxset2))
    print_up_path_similarities(r, overlap)


CMD_BGPDUMP = "/Users/emile/bin/bgpdump"
MAX_REPORTED_PATH_LEN = 6

if __name__ == '__main__':
    main()

sys.exit(0)

### OLD
peers = Counter()
tree = {1: Radix(), 2: Radix()}
meta = {}
for who in (1, 2):
    meta[who] = {}
    meta[who]['age'] = Counter()
meta[1]['asn'] = ASN1
meta[2]['asn'] = ASN2


def aap():
    last_change_ts = int(fields[1])
    ts_5m = (last_change_ts / 300) * 300
    pfx = fields[5]
    asn = fields[6]
    meta[who]['age'][ts_5m] += 1
    asns = asn.split(" ")
Exemple #20
0
from collections import defaultdict, Counter
import re
import GeoIP
import itertools
from networkx.readwrite import json_graph
import random
import os
import datetime
from radix import Radix
import math


probe_addr = dict()
g = GeoIP.open("data/GeoIPASNum.dat", GeoIP.GEOIP_STANDARD)
remapped_addr = {}
rt = Radix()
net_idx = {}
known_as = {}


def dpath(fname):
    return os.path.join(args.datadir, fname)


def name_hop(node, probe_id, seq):
    if node == "unknown_hop":
        return "Hop %s-%s" % (probe_id, seq)

    try:
        if ipaddr.IPv4Address(node).is_private:
            return "Private %s-%s" % (probe_id, seq)
Exemple #21
0
class Prefixes(object):
    def __init__(self):

        # Patricia trie for reserved prefixes ipv4
        self.__reserved_tree_ipv4 = Radix()
        self.__reserved_tree_ipv4.add("0.0.0.0/8")
        self.__reserved_tree_ipv4.add("1.1.1.0/24")
        self.__reserved_tree_ipv4.add("10.0.0.0/8")
        self.__reserved_tree_ipv4.add("100.64.0.0/10")
        self.__reserved_tree_ipv4.add("127.0.0.0/8")
        self.__reserved_tree_ipv4.add("169.254.0.0/16")
        self.__reserved_tree_ipv4.add("172.16.0.0/12")
        self.__reserved_tree_ipv4.add("192.0.0.0/24")
        self.__reserved_tree_ipv4.add("192.0.2.0/24")
        self.__reserved_tree_ipv4.add("192.88.99.0/24")
        self.__reserved_tree_ipv4.add("192.168.0.0/16")
        self.__reserved_tree_ipv4.add("198.18.0.0/15")
        self.__reserved_tree_ipv4.add("198.51.100.0/24")
        self.__reserved_tree_ipv4.add("203.0.113.0/24")
        self.__reserved_tree_ipv4.add("224.0.0.0/4")
        self.__reserved_tree_ipv4.add("240.0.0.0/4")
        self.__reserved_tree_ipv4.add("255.255.255.255/32")

        # routable address space
        self.__routable_tree_ipv4 = Radix()

    def check_prefix_is_reserved(self, prefix):
        node = self.__reserved_tree_ipv4.search_best(prefix)
        if node:
            return True
        return False

    def check_prefix_is_routable(self, prefix):
        node = self.__routable_tree_ipv4.search_best(prefix)
        if node:
            return True
        return False

    def add_routable_prefix(self, prefix, mask):
        if len(prefix.split('.')) != 4:
            return

        self.__routable_tree_ipv4.add(prefix + '/' + mask)
        return
Exemple #22
0
 def __init__(self):
     self.radix = Radix()
     self.peers = dict()
 def __init__(self):
     self.radixRib = Radix()
     self.policy = PolicyHandler()
Exemple #24
0
class EmulatedRIB(object):
    """Emulated RIB using a Radix object."""

    def __init__(self):
        self.radix = Radix()
        self.peers = dict()

    def update(self, prefix, peer, value):
        """Update the information stored concerning a specific prefix."""
        peer_sym = self.peers.get(peer, None)
        if peer_sym is None:
            peer_sym = self.peers[peer] = peer
        node = self.radix.add(prefix)
        node.data[peer_sym] = value
        return node

    def lookup(self, prefix, peer):
        peer_sym = self.peers.get(peer, None)
        if peer_sym is not None:
            node = self.radix.search_exact(prefix)
            if node is not None:
                return node.data.get(peer_sym, None)

    def pop(self, prefix, peer):
        node = self.radix.search_exact(prefix)
        if node is not None:
            val = node.data.pop(peer, None)
            if len(node.data) == 0:
                self.radix.delete(prefix)
            return val

    def delete(self, prefix):
        return self.radix.delete(prefix)

    def search_all_containing(self, prefix):
        tmp_node = self.radix.search_covering(prefix)
        if tmp_node is None:
            return []
        return tmp_node

    def search_all_contained(self, prefix):
        tmp_node = self.radix.search_covered(prefix)
        if tmp_node is None:
            return []
        return tmp_node

    def search_exact(self, prefix):
        return self.radix.search_exact(prefix)

    def nodes(self):
        return self.radix.nodes()

    def prefixes(self):
        return self.radix.prefixes()
                    .format(size, hex(version_ihl), identifier, hex(protocol),
                            socket.inet_ntoa(src), socket.inet_ntoa(dst),
                            match))

                if protocol is 0x01:  # ICMP protocol
                    icmp_parse(packet[20:])

                os.write(fd, packet)
            elif event & select.EPOLLOUT:
                print("EPOLLOUT")
                packet = os.read(fineno, 1024)
                print(len(packet), packet)


if __name__ == '__main__':
    rtree = Radix()
    if os.geteuid() != 0:
        print("Need root privileges.")
        exit(0)

    ftun, mtu = tun_alloc(TUN_IFNAME.encode())
    with open(IP_FILE) as f:
        route_prepare(rtree, f.readlines())
    try:
        monitor_prepare(TUN_IFNAME)
        loop(ftun, mtu, rtree)
    except Exception:
        print(traceback.format_exc())
    finally:
        clean_prepare(TUN_IFNAME)
 def __init__(self):
     # initialize a radix tree
     self.rtree = Radix()
     # initialize an asn dict
     self.asn = {}
Exemple #27
0
class BannedNetworks:
    def __init__(
        self,
        list_file,
        banned_domains,
        cache_seconds=60*60*24,
        neg_cache_seconds=60,
    ):
        self.log = logging.getLogger(self.__class__.__name__)
        self._banned_hosts = dict()
        self._load_banned_networks(list_file)
        self.banned_domains = banned_domains
        self.cache_seconds = cache_seconds
        self.neg_cache_seconds = neg_cache_seconds

    @asyncio.coroutine
    def is_banned_network(self, host, loop):
        try:
            addresses = yield from loop.getaddrinfo(
                host, None, proto=IPPROTO_TCP,
            )
        except gaierror as e:
            self.log.warning("Failed to resolve %s: %s", host, e)
            self._banned_hosts[host] = False
            loop.call_later(
                self.neg_cache_seconds,
                lambda: self._banned_hosts.pop(host, None),
            )
            return None

        is_banned = False
        for _, _, _, _, (ip, *remaining) in addresses:
            is_banned = bool(self._banned_networks.search_best(ip))
            if is_banned:
                break

        return is_banned

    def is_banned_domain(self, host):
        for domain in self.banned_domains:
            if host == domain or host.endswith("." + domain):
                return True

        return False

    @asyncio.coroutine
    def is_banned(self, host):
        if not self._banned_networks:
            return False

        host = host.lower()
        cached_result = self._banned_hosts.get(host, None)
        if cached_result is not None:
            return cached_result

        loop = asyncio.get_event_loop()
        is_banned = self.is_banned_domain(host)
        if not is_banned:
            is_banned = yield from self.is_banned_network(host, loop)

        if is_banned is None:
            # Failed to resolve
            return False

        self._banned_hosts[host] = is_banned
        loop.call_later(
            self.cache_seconds,
            lambda: self._banned_hosts.pop(host, None),
        )
        return is_banned

    def _load_banned_networks(self, list_file):
        self._banned_networks = None
        if not list_file:
            return

        try:
            from radix import Radix
        except ImportError:
            self.log.warning(
                "py-radix is not installed, can't ban networks",
            )
            return

        try:
            with open(list_file, "r") as f:
                self._banned_networks = Radix()
                networks = 0
                for l in f:
                    l = l.strip()
                    if not l or l.startswith("#"):
                        continue

                    try:
                        self._banned_networks.add(l)
                    except (TypeError, ValueError) as e:
                        self.log.warning(
                            "Invalid banned network: %s", l,
                        )
                    else:
                        networks += 1

            self.log.info("Loaded %s banned networks", networks)
        except Exception as e:
            self.log.warning(
                "Failed to load banned network list: %s", e,
            )
Exemple #28
0
 def __init__(self):
     self.radix = Radix()
     self.peers = dict()
Exemple #29
0
print("Insertion: \t{}s".format(obj.time_taken))

arr_copy = arr.copy()
from merge import MergeSort
obj = MergeSort(arr_copy)
obj.sort()
print("Merge Sort: \t{}s".format(obj.time_taken))

arr_copy = arr.copy()
from heap import HeapSort
obj = HeapSort(arr_copy)
obj.sort()
print("Heap Sort: \t{}s".format(obj.time_taken))

arr_copy = arr.copy()
from bst import Bst
obj = Bst(arr_copy)
obj.sort()
print("BST Sort: \t{}s".format(obj.time_taken))

arr_copy = arr.copy()
from avl import AVL
obj = AVL(arr_copy)
obj.sort()
print("AVL Sort: \t{}s".format(obj.time_taken))

arr_copy = arr.copy()
from radix import Radix
obj = Radix(arr_copy)
obj.sort()
print("Radix Sort: \t{}s".format(obj.time_taken))
Exemple #30
0
class EmulatedRIB(object):
    """Emulated RIB using a Radix object."""
    def __init__(self):
        self.radix = Radix()
        self.peers = dict()

    def update(self, prefix, peer, value):
        """Update the information stored concerning a specific prefix."""
        peer_sym = self.peers.get(peer, None)
        if peer_sym is None:
            peer_sym = self.peers[peer] = peer
        node = self.radix.add(prefix)
        node.data[peer_sym] = value
        return node

    def lookup(self, prefix, peer):
        peer_sym = self.peers.get(peer, None)
        if peer_sym is not None:
            node = self.radix.search_exact(prefix)
            if node is not None:
                return node.data.get(peer_sym, None)

    def pop(self, prefix, peer):
        node = self.radix.search_exact(prefix)
        if node is not None:
            val = node.data.pop(peer, None)
            if len(node.data) == 0:
                self.radix.delete(prefix)
            return val

    def delete(self, prefix):
        return self.radix.delete(prefix)

    def search_all_containing(self, prefix):
        tmp_node = self.radix.search_covering(prefix)
        if tmp_node is None:
            return []
        return tmp_node

    def search_all_contained(self, prefix):
        tmp_node = self.radix.search_covered(prefix)
        if tmp_node is None:
            return []
        return tmp_node

    def search_exact(self, prefix):
        return self.radix.search_exact(prefix)

    def nodes(self):
        return self.radix.nodes()

    def prefixes(self):
        return self.radix.prefixes()
 def radix_tree(self):
     rtree = Radix()
     for prefix, origins in self._data:
         rnode = rtree.add(prefix)
         rnode.data["origins"] = origins
     return rtree
class LocalRib(RIB):
    def __init__(self):
        self.radixRib = Radix()
        self.pathselection = PathSelection()

    def process_adjInRibPP(self, node_hash, redisClient):
        # Got an event for the node, obtain the current RIB from redis based off node hash
        adjInRibPP = ast.literal_eval(redisClient.hget(node_hash,
                                                       'adjInRibPP'))
        # Fetch the peers from Redis to determine local_asn vs remote_asn
        peers = ast.literal_eval(redisClient.hget(node_hash, 'peers'))

        if adjInRibPP:
            for route in adjInRibPP:

                route = self.pathselection.process_route(route)

                ribnode = self.radixRib.search_exact(str(route['prefix']))

                if ribnode is None:

                    # Particular prefix/prefix-len does not exist in tree, create the node
                    ribnode = self.radixRib.add(str(route['prefix']))

                    # Create a dictionary with only the path information and path hash as the key
                    for key in ['prefix', 'prefix_len']:
                        del route[key]

                    localrib_attr = {}

                    for path in route['paths']:
                        peer_hash = route['paths'][path]['peer_hash']

                        localrib_attr['nexthop'] = route['paths'][path][
                            'nexthop']
                        localrib_attr['event'] = route['paths'][path]['action']
                        localrib_attr['family'] = route['family']

                        if peer_hash in peers:
                            logger.debug(peers[peer_hash]['local_asn'])
                            logger.debug(peers[peer_hash]['remote_asn'])

                            if peers[peer_hash]['local_asn'] == peers[
                                    peer_hash]['remote_asn']:
                                localrib_attr['admin_distance'] = 200
                            else:
                                localrib_attr['admin_distance'] = 20

                    ribnode.data.update(localrib_attr)

                else:
                    # Particular prefix/prefix-len already exists in tree
                    # Create a dictionary with only the path information and path hash as the key
                    for key in ['prefix', 'prefix_len']:
                        del route[key]

                    localrib_attr = {}

                    for path in route['paths']:
                        peer_hash = route['paths'][path]['peer_hash']

                        localrib_attr['nexthop'] = route['paths'][path][
                            'nexthop']
                        localrib_attr['event'] = route['paths'][path]['action']
                        localrib_attr['family'] = route['family']

                        if peer_hash in peers:

                            logger.debug(peers[peer_hash]['local_asn'])
                            logger.debug(peers[peer_hash]['remote_asn'])

                            if peers[peer_hash]['local_asn'] == peers[
                                    peer_hash]['remote_asn']:
                                localrib_attr['admin_distance'] = 200
                            else:
                                localrib_attr['admin_distance'] = 20

                    # Actions do not reach this RIB. Make sure we flush out the existing dictionary
                    # before we update it (Especially for the delete action)

                    ribnode.data.clear()
                    ribnode.data.update(localrib_attr)
Exemple #33
0
class BGP(protocol.Protocol):
    """Protocol class for BGP 4"""
    def __init__(self):
        """Create a BGP protocol.
        """
        self.fsm = None
        self.peer_id = None

        self.disconnected = False
        self._receive_buffer = b''
        self.fourbytesas = False
        self.add_path_ipv4_receive = False
        self.add_path_ipv4_send = False
        self.adj_rib_in = {k: {} for k in CONF.bgp.afi_safi}
        self.adj_rib_out = {k: {} for k in CONF.bgp.afi_safi}
        self.adj_rib_in_ipv4_tree = Radix()

        # statistic
        self.msg_sent_stat = {
            'Opens': 0,
            'Notifications': 0,
            'Updates': 0,
            'Keepalives': 0,
            'RouteRefresh': 0
        }
        self.msg_recv_stat = {
            'Opens': 0,
            'Notifications': 0,
            'Updates': 0,
            'Keepalives': 0,
            'RouteRefresh': 0
        }
        self.send_version = {
            'ipv4': 0,
            'flowspec': 0,
            'sr_policy': 0,
            'mpls_vpn': 0
        }
        self.receive_version = {
            'ipv4': 0,
            'flowspec': 0,
            'sr_policy': 0,
            'mpls_vpn': 0
        }

        self.flowspec_send_dict = {}
        self.flowspec_receive_dict = {}
        self.sr_send_dict = {}
        self.sr_receive_dict = {}
        self.mpls_vpn_send_dict = {}
        self.mpls_vpn_receive_dict = {}

    @property
    def handler(self):
        # this is due to self.factory is assigned at runtime
        return self.factory.handler

    def init_rib(self):
        self.adj_rib_in = {k: {} for k in CONF.bgp.afi_safi}
        self.adj_rib_out = {k: {} for k in CONF.bgp.afi_safi}

    def connectionMade(self):
        """
        Starts the initial negotiation of the protocol
        """
        self.init_rib()
        # Set transport socket options
        self.transport.setTcpNoDelay(True)
        # set tcp option if you want
        #  self.transport.getHandle().setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, md5sig)

        LOG.info("[%s]TCP Connection established", self.factory.peer_addr)

        # Set the local BGP id from the local IP address if it's not set
        if self.factory.bgp_id is None:
            try:
                local_host_addr = netaddr.IPAddress(
                    self.transport.getHost().host)
                if 'IPv6' in local_host_addr.info.__iter__():
                    self.factory.bgp_id = int(netaddr.IPAddress('127.0.0.1'))
                else:
                    self.factory.bgp_id = int(local_host_addr)
            except Exception as e:
                LOG.error(e)
                error_str = traceback.format_exc()
                LOG.debug(error_str)
                self.factory.bgp_id = int(netaddr.IPAddress('127.0.0.1'))
        try:
            self.fsm.connection_made()
        except Exception as e:
            LOG.error(e)
            error_str = traceback.format_exc()
            LOG.debug(error_str)

    def connectionLost(self, reason):
        """Called when the associated connection was lost.
        :param reason: the reason of lost connection.
        """
        LOG.debug('Called connectionLost')
        self.init_rib()
        self.handler.on_connection_lost(self)

        # Don't do anything if we closed the connection explicitly ourselves
        if self.disconnected:
            self.factory.connection_closed(self)
            LOG.info('Connection lost return and do nothing')
            return

        LOG.info("[%s]Connection lost:%s", self.factory.peer_addr,
                 reason.getErrorMessage())

        try:
            # tell FSM that TCP connection is lost.
            self.fsm.connection_failed()
        except Exception as e:
            LOG.error(e)
            error_str = traceback.format_exc()
            LOG.debug(error_str)

    def dataReceived(self, data):
        """
        Appends newly received data to the receive buffer, and
        then attempts to parse as many BGP messages as possible.

        :param data: the data received from TCP buffer.
        """

        # Buffer possibly incomplete data first
        self._receive_buffer += data
        while self.parse_buffer():
            pass

    def parse_buffer(self):
        """
        Parse TCP buffer data.

        :return: True or False
        """
        buf = self._receive_buffer

        if len(buf) < bgp_cons.HDR_LEN:
            # Every BGP message is at least 19 octets. Maybe the rest
            # hasn't arrived yet.
            return False

        # Check whether the first 16 octets of the buffer consist of
        # the BGP marker (all bits one)
        if buf[:16] != 16 * b'\xff':
            self.fsm.header_error(bgp_cons.ERR_MSG_HDR_CONN_NOT_SYNC)
            return False
            # Parse the BGP header
        try:
            marker, length, msg_type = struct.unpack('!16sHB',
                                                     buf[:bgp_cons.HDR_LEN])
        except Exception as e:
            LOG.error(e)
            error_str = traceback.format_exc()
            LOG.debug(error_str)
            self.fsm.header_error(bgp_cons.ERR_MSG_HDR_CONN_NOT_SYNC)
            return False
            # Check the length of the message, must be less than 4096, bigger than 19
        if length < bgp_cons.HDR_LEN or length > bgp_cons.MAX_LEN:
            self.fsm.header_error(bgp_cons.ERR_MSG_HDR_BAD_MSG_LEN,
                                  struct.pack('!H', length))
            # Check whether the entire message is already available
        if len(buf) < length:
            return False
        msg = buf[bgp_cons.HDR_LEN:length]
        t = time.time()  # the time when received that packet.
        try:
            if msg_type == bgp_cons.MSG_OPEN:
                try:
                    self._open_received(timestamp=t, msg=msg)
                except excep.MessageHeaderError as e:
                    LOG.error(e)
                    self.fsm.header_error(suberror=e.sub_error)
                    return False
                except excep.OpenMessageError as e:
                    LOG.error(e)
                    self.fsm.open_message_error(suberror=e.sub_error)
                    return False

            elif msg_type == bgp_cons.MSG_UPDATE:
                self._update_received(timestamp=t, msg=msg)

            elif msg_type == bgp_cons.MSG_NOTIFICATION:
                self._notification_received(Notification().parse(msg))

            elif msg_type == bgp_cons.MSG_KEEPALIVE:
                try:
                    self._keepalive_received(timestamp=t, msg=msg)
                except excep.MessageHeaderError as e:
                    LOG.error(e)
                    self.fsm.header_error(suberror=e.sub_error)
                    return False
            elif msg_type in (bgp_cons.MSG_ROUTEREFRESH,
                              bgp_cons.MSG_CISCOROUTEREFRESH):
                route_refresh_msg = RouteRefresh().parse(msg)
                self._route_refresh_received(msg=route_refresh_msg,
                                             msg_type=msg_type)
            else:
                # unknown message type
                self.fsm.header_error(bgp_cons.ERR_MSG_HDR_BAD_MSG_TYPE,
                                      struct.pack('!H', msg_type))
        except Exception as e:
            LOG.error(e)
            error_str = traceback.format_exc()
            LOG.debug(error_str)
        self._receive_buffer = self._receive_buffer[length:]
        return True

    def closeConnection(self):
        """Close the connection"""

        if self.transport.connected:
            self.transport.loseConnection()
            self.disconnected = True

    def _update_received(self, timestamp, msg):
        # if self.msg_recv_stat['Updates'] % 1000 == 0:
        #     LOG.info(self.msg_recv_stat['Updates'])
        #     LOG.info(time.time())
        """Called when a BGP Update message was received."""
        result = Update().parse(timestamp, msg, self.fourbytesas,
                                self.add_path_ipv4_receive,
                                self.add_path_ipv4_send)
        if result['sub_error']:
            msg = {
                'attr': result['attr'],
                'nlri': result['nlri'],
                'withdraw': result['withdraw'],
                'hex': repr(result['hex'])
            }

            self.handler.on_update_error(self, timestamp, msg)

            LOG.error('[%s] Update message error: sub error=%s',
                      self.factory.peer_addr, result['sub_error'])
            self.msg_recv_stat['Updates'] += 1
            self.fsm.update_received()
            return

        afi_safi = None
        # process messages
        if result['nlri'] or result['withdraw']:
            afi_safi = 'ipv4'
        elif result['attr'].get(14):
            afi_safi = bgp_cons.AFI_SAFI_DICT[result['attr'][14]['afi_safi']]
        elif result['attr'].get(15):
            afi_safi = bgp_cons.AFI_SAFI_DICT[result['attr'][15]['afi_safi']]

        msg = {
            'attr': result['attr'],
            'nlri': result['nlri'],
            'withdraw': result['withdraw'],
            'afi_safi': afi_safi
        }

        self.update_receive_verion(result['attr'], result['nlri'],
                                   result['withdraw'])

        if CONF.bgp.rib:
            # try to update bgp rib in
            if msg.get('afi_safi') == 'ipv4':
                self.update_rib_in_ipv4(msg)
                # LOG.info(msg)
        self.handler.update_received(self, timestamp, msg)

        self.msg_recv_stat['Updates'] += 1
        self.fsm.update_received()

    def send_update(self, msg):
        """
        send update message to the peer
        :param msg: message dictionary
        :return:
        """
        try:
            msg_update = Update().construct(msg, self.fourbytesas,
                                            self.add_path_ipv4_send)
            reactor.callFromThread(self.write_tcp_thread, msg_update)
            self.msg_sent_stat['Updates'] += 1

            return True
        except Exception as e:
            LOG.error(e)
            return False

    def construct_update_to_bin(self, msg):
        """
        construct update message to binary
        :param msg: message dictionary
        :return:
        """
        try:
            msg_update = Update().construct(msg, self.fourbytesas,
                                            self.add_path_ipv4_send)
            return msg_update
        except Exception as e:
            LOG.error(e)
            return "construct failed"

    def send_bin_update(self, msg):
        """
        send binary update message to the peer
        :param msg: message dictionary
        :return:
        """
        try:
            reactor.callFromThread(self.write_tcp_thread, msg)
            return True
        except Exception as e:
            LOG.error(e)
            return False

    def write_tcp_thread(self, msg):
        self.transport.write(msg)

    def send_notification(self, error, sub_error, data=b''):
        """
        send BGP notification message

        :param error:
        :param sub_error:
        :param data:
        :return:
        """
        self.msg_sent_stat['Notifications'] += 1
        LOG.info(
            "[%s]Send a BGP Notification message to the peer "
            "[Error: %s, Suberror: %s, Error data: %s ]",
            self.factory.peer_addr, error, sub_error, repr(data))
        # message statistic
        self.msg_sent_stat['Notifications'] += 1
        # construct message
        msg_notification = Notification().construct(error, sub_error, data)
        # send message
        self.transport.write(msg_notification)

    def _notification_received(self, msg):
        """
        BGP notification message received.
        """
        self.msg_recv_stat['Notifications'] += 1
        error_str = bgp_cons.NOTIFICATION_ERROR_CODES_DICT.get(msg[0])
        if error_str:
            sub_error_str = bgp_cons.NOTIFICATION_SUB_ERROR_CODES_DICT.get(
                msg[0]).get(msg[1])
        else:
            sub_error_str = None
        LOG.info(
            '[%s]Notification message received, error: %s, sub error: %s, data=%s',
            self.factory.peer_addr, error_str, sub_error_str, msg[2])

        nofi_msg = {
            'error': error_str,
            'sub_error': sub_error_str,
            'data': repr(msg[2])
        }

        self.handler.notification_received(self, nofi_msg)

        self.fsm.notification_received(msg[0], msg[1])

        LOG.debug('offline')

    def send_keepalive(self):
        """
        send BGP keepalive message.
        """
        self.msg_sent_stat['Keepalives'] += 1
        LOG.info("[%s]Send a BGP KeepAlive message to the peer.",
                 self.factory.peer_addr)
        # message statistci
        # self.msg_sent_stat['Keepalives'] += 1
        # construct message
        msg_keepalive = KeepAlive().construct()
        # send message
        self.transport.write(msg_keepalive)

    def _keepalive_received(self, timestamp, msg):
        """
            process keepalive message

            :param timestamp:
            :param msg:
            :return:
        """

        # deal with all request in internal message queue
        # until the queue is empty
        while not self.handler.inter_mq.empty(
        ) and self.msg_recv_stat['Keepalives'] > 0:
            inter_msg = self.handler.inter_mq.get()
            LOG.debug('Get %s message %s from internal queue',
                      inter_msg['type'], inter_msg['msg'])
            if inter_msg['type'] == 'notification':
                self.send_notification(inter_msg['msg']['error'],
                                       inter_msg['msg']['sub_error'],
                                       inter_msg['msg']['data'])
            elif inter_msg['type'] == 'update':
                self.send_update(inter_msg['msg'])

        self.msg_recv_stat['Keepalives'] += 1

        self.handler.keepalive_received(self, timestamp)

        LOG.info("[%s]A BGP KeepAlive message was received from peer.",
                 self.factory.peer_addr)
        KeepAlive().parse(msg)

        self.fsm.keep_alive_received()

    def capability_negotiate(self):
        """
        Open message capability negotiation
        :return:
        """
        # if received open message from remote peer firstly
        # then copy peer's capability to local according to the
        # local support. best effort support.
        if cfg.CONF.bgp.running_config['capability']['remote']:
            unsupport_cap = []
            for capability in cfg.CONF.bgp.running_config['capability'][
                    'local']:
                if capability not in cfg.CONF.bgp.running_config['capability'][
                        'remote']:
                    unsupport_cap.append(capability)
            for capability in unsupport_cap:
                cfg.CONF.bgp.running_config['capability']['local'].pop(
                    capability)

    def send_open(self):
        """
        send open message

        :return:
        """
        # construct Open message
        self.capability_negotiate()
        open_msg = Open(
            version=bgp_cons.VERSION, asn=self.factory.my_asn, hold_time=self.fsm.hold_time,
            bgp_id=self.factory.bgp_id). \
            construct(cfg.CONF.bgp.running_config['capability']['local'])
        if 'add_path' in cfg.CONF.bgp.running_config['capability']['local']:
            # check add path feature, send add path condition:
            # local support send or both
            # remote support receive or both
            if cfg.CONF.bgp.running_config['capability']['local']['add_path'] in \
                    ['ipv4_send', 'ipv4_both']:
                if cfg.CONF.bgp.running_config['capability']['remote'].get('add_path') in \
                        ['ipv4_receive', 'ipv4_both']:
                    self.add_path_ipv4_send = True
        # send message
        self.transport.write(open_msg)
        self.msg_sent_stat['Opens'] += 1
        LOG.info("[%s]Send a BGP Open message to the peer.",
                 self.factory.peer_addr)
        LOG.info("[%s]Probe's Capabilities:", self.factory.peer_addr)
        for key in cfg.CONF.bgp.running_config['capability']['local']:
            LOG.info("--%s = %s", key,
                     cfg.CONF.bgp.running_config['capability']['local'][key])

    def _open_received(self, timestamp, msg):
        """
        porcess open message

        :param timestamp: timestamp that received this message
        :param msg: binary raw message data
        :return:
        """

        self.msg_recv_stat['Opens'] += 1
        open_msg = Open()
        parse_result = open_msg.parse(msg)
        if self.fsm.bgp_peering.peer_asn != open_msg.asn:
            raise excep.OpenMessageError(
                sub_error=bgp_cons.ERR_MSG_OPEN_BAD_PEER_AS)

        # Open message Capabilities negotiation
        cfg.CONF.bgp.running_config['capability'][
            'remote'] = open_msg.capa_dict
        LOG.info("[%s]A BGP Open message was received", self.factory.peer_addr)
        LOG.info('--version = %s', open_msg.version)
        LOG.info('--ASN = %s', open_msg.asn)
        LOG.info('--hold time = %s', open_msg.hold_time)
        LOG.info('--id = %s', open_msg.bgp_id)
        LOG.info("[%s]Neighbor's Capabilities:", self.factory.peer_addr)
        for key in cfg.CONF.bgp.running_config['capability']['remote']:
            if key == 'four_bytes_as':
                self.fourbytesas = True
            elif key == 'add_path':
                if cfg.CONF.bgp.running_config['capability']['remote']['add_path'] in \
                        ['ipv4_send', 'ipv4_both']:
                    if cfg.CONF.bgp.running_config['capability']['local']['add_path'] in \
                            ['ipv4_receive', 'ipv4_both']:
                        self.add_path_ipv4_receive = True

            LOG.info("--%s = %s", key,
                     cfg.CONF.bgp.running_config['capability']['remote'][key])

        self.peer_id = open_msg.bgp_id
        self.bgp_peering.set_peer_id(open_msg.bgp_id)

        self.negotiate_hold_time(open_msg.hold_time)
        self.fsm.open_received()

        self.handler.open_received(self, timestamp, parse_result)

    def send_route_refresh(self, afi, safi, res=0):
        """
        Send bgp route refresh message
        :param afi: address family
        :param safi: sub address family
        :param res: reserve, default is 0
        """
        # check if the peer support route refresh
        if 'cisco_route_refresh' in cfg.CONF.bgp.running_config['capability'][
                'remote']:
            type_code = bgp_cons.MSG_CISCOROUTEREFRESH
        elif 'route_refresh' in cfg.CONF.bgp.running_config['capability'][
                'remote']:
            type_code = bgp_cons.MSG_ROUTEREFRESH
        else:
            return False
        # check if the peer support this address family
        if (afi, safi) not in cfg.CONF.bgp.running_config['capability'][
                'remote']['afi_safi']:
            return False
        # construct message
        msg_routerefresh = RouteRefresh(afi, safi, res).construct(type_code)
        # send message
        self.transport.write(msg_routerefresh)
        self.msg_sent_stat['RouteRefresh'] += 1
        LOG.info("[%s]Send BGP RouteRefresh message to the peer.",
                 self.factory.peer_addr)
        return True

    def _route_refresh_received(self, msg, msg_type):
        """
        Route Refresh message received.

        :param msg: msg content
        :param msg_type: message type 5 or 128
        """
        self.msg_recv_stat['RouteRefresh'] += 1
        LOG.info('[%s]Route Refresh message received, afi=%s, res=%s, safi=%s',
                 self.factory.peer_addr, msg[0], msg[1], msg[2])
        nofi_msg = {'afi': msg[0], 'res': msg[1], 'safi': msg[2]}

        self.handler.route_refresh_received(self, nofi_msg, msg_type)

    def negotiate_hold_time(self, hold_time):
        """Negotiates the hold time"""

        self.fsm.hold_time = min(self.fsm.hold_time, hold_time)
        if self.fsm.hold_time != 0 and self.fsm.hold_time < 3:
            self.fsm.open_message_error(
                bgp_cons.ERR_MSG_OPEN_UNACCPT_HOLD_TIME)
            # Derived times
        self.fsm.keep_alive_time = self.fsm.hold_time / 3
        LOG.info("[%s]Hold time:%s,Keepalive time:%s", self.factory.peer_addr,
                 self.fsm.hold_time, self.fsm.keep_alive_time)

    def update_rib_out_ipv4(self, msg):
        try:
            for prefix in msg['withdraw']:
                if prefix in self.adj_rib_out['ipv4']:
                    self.send_version['ipv4'] += 1
                    self.adj_rib_out['ipv4'].pop(prefix)
            for prefix in msg['nlri']:
                if prefix not in self.adj_rib_out['ipv4'].keys():
                    self.send_version['ipv4'] += 1
                else:
                    if msg['attr'] == self.adj_rib_out['ipv4'][prefix]:
                        pass
                    else:
                        self.send_version['ipv4'] += 1
                self.adj_rib_out['ipv4'][prefix] = msg['attr']
            return True
        except Exception as e:
            LOG.error(e)
            return False

    def update_rib_in_ipv4(self, msg):
        try:
            for prefix in msg['withdraw']:
                if prefix in self.adj_rib_in['ipv4']:
                    self.receive_version['ipv4'] += 1
                    self.adj_rib_in['ipv4'].pop(prefix)
                    if self.adj_rib_in_ipv4_tree.search_exact(prefix):
                        self.adj_rib_in_ipv4_tree.delete(prefix)
            for prefix in msg['nlri']:
                if prefix not in self.adj_rib_in['ipv4'].keys():
                    self.receive_version['ipv4'] += 1
                else:
                    if msg['attr'] == self.adj_rib_in['ipv4'][prefix]:
                        pass
                    else:
                        self.receive_version['ipv4'] += 1
                self.adj_rib_in['ipv4'][prefix] = msg['attr']
                self.adj_rib_in_ipv4_tree.add(prefix)
            return True
        except Exception as e:
            LOG.error(e)
            LOG.debug(traceback.format_exc())
            return False

    def ip_longest_match(self, prefix_ip):
        results = {}
        if '/' in prefix_ip:
            if self.adj_rib_in['ipv4'].get(prefix_ip):
                return {
                    'prefix': prefix_ip,
                    'attr': self.adj_rib_in['ipv4'].get(prefix_ip)
                }
        if prefix_ip in self.adj_rib_in_ipv4_tree:
            prefix_node = self.adj_rib_in_ipv4_tree.search_best(prefix_ip)
            if prefix_node:
                if self.adj_rib_in['ipv4'].get(prefix_node.prefix):
                    return {
                        'prefix': prefix_node.prefix,
                        'attr': self.adj_rib_in['ipv4'].get(prefix_node.prefix)
                    }
        return results

    def update_send_version(self, peer_ip, attr, nlri, withdraw):
        if 14 in attr:
            if attr[14]['afi_safi'] == [1, 133]:
                LOG.info("send flowspec")
                for prefix in attr[14]['nlri']:
                    value = copy.deepcopy(attr)
                    value14 = value[14]
                    del value14['nlri']
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) not in self.flowspec_send_dict:
                        self.send_version['flowspec'] += 1
                        self.flowspec_send_dict[str(key)] = value
                    else:
                        if value == self.flowspec_send_dict[str(key)]:
                            pass
                        else:
                            self.send_version['flowspec'] += 1
                            self.flowspec_send_dict[str(key)] = value
            elif attr[14]['afi_safi'] == [1, 73]:
                LOG.info('send sr')
                key = "{"
                for k in sorted(attr[14]['nlri'].keys()):
                    key += '"' + k + '"'
                    key += ':'
                    key += '"' + str(attr[14]['nlri'][k]) + '"'
                    key += ','
                key = key[:-1]
                key += "}"
                if str(key) not in self.sr_send_dict:
                    self.send_version['sr_policy'] += 1
                    self.sr_send_dict[str(key)] = attr
                else:
                    if attr == self.sr_send_dict[str(key)]:
                        pass
                    else:
                        self.send_version['sr_policy'] += 1
                        self.sr_send_dict[str(key)] = attr
            elif attr[14]['afi_safi'] == [1, 128]:
                LOG.info("send mpls_vpn")
                for prefix in attr[14]['nlri']:
                    value = copy.deepcopy(attr)
                    value14 = value[14]
                    del value14['nlri']
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) not in self.mpls_vpn_send_dict:
                        self.send_version['mpls_vpn'] += 1
                        self.mpls_vpn_send_dict[str(key)] = value
                    else:
                        if value == self.mpls_vpn_send_dict[str(key)]:
                            pass
                        else:
                            self.send_version['mpls_vpn'] += 1
                            self.mpls_vpn_send_dict[str(key)] = value
        # flowspec sr mpls_vpn withdraw
        if 15 in attr:
            if attr[15]['afi_safi'] == [1, 133]:
                LOG.info("withdraw flowspec")
                for prefix in attr[15]['withdraw']:
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) in self.flowspec_send_dict:
                        self.send_version['flowspec'] += 1
                        del self.flowspec_send_dict[str(key)]
                    else:
                        LOG.info("Do not have %s in send flowspec dict" % key)
            elif attr[15]['afi_safi'] == [1, 73]:
                LOG.info('withdraw sr')
                key = "{"
                for k in sorted(attr[15]['withdraw'].keys()):
                    key += '"' + k + '"'
                    key += ':'
                    key += '"' + str(attr[15]['withdraw'][k]) + '"'
                    key += ','
                key = key[:-1]
                key += "}"
                if str(key) in self.sr_send_dict:
                    self.send_version['sr_policy'] += 1
                    del self.sr_send_dict[str(key)]
                else:
                    LOG.info("Do not have %s in send flowspec dict" % key)
            elif attr[15]['afi_safi'] == [1, 128]:
                LOG.info("withdraw mpls_vpn")
                for prefix in attr[15]['withdraw']:
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) in self.mpls_vpn_send_dict:
                        self.send_version['mpls_vpn'] += 1
                        del self.mpls_vpn_send_dict[str(key)]
                    else:
                        LOG.info("Do not have %s in send flowspec dict" % key)

    def update_receive_verion(self, attr, nlri, withdraw):
        if 14 in attr:
            if attr[14]['afi_safi'] == [1, 133]:
                LOG.info("recieve flowspec send")
                for prefix in attr[14]['nlri']:
                    value = copy.deepcopy(attr)
                    value14 = value[14]
                    del value14['nlri']
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) not in self.flowspec_receive_dict:
                        self.receive_version['flowspec'] += 1
                        self.flowspec_receive_dict[str(key)] = value
                    else:
                        if value == self.flowspec_receive_dict[str(key)]:
                            pass
                        else:
                            self.receive_version['flowspec'] += 1
                            self.flowspec_receive_dict[str(key)] = value
            elif attr[14]['afi_safi'] == [1, 73]:
                LOG.info('recieve sr send')
            elif attr[14]['afi_safi'] == [1, 128]:
                LOG.info("receive send mpls_vpn")
                for prefix in attr[14]['nlri']:
                    value = copy.deepcopy(attr)
                    value14 = value[14]
                    del value14['nlri']
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) not in self.mpls_vpn_receive_dict:
                        self.receive_version['mpls_vpn'] += 1
                        self.mpls_vpn_receive_dict[str(key)] = value
                    else:
                        if value == self.mpls_vpn_receive_dict[str(key)]:
                            pass
                        else:
                            self.receive_version['mpls_vpn'] += 1
                            self.mpls_vpn_receive_dict[str(key)] = value
        # receive flowspec sr mpls withdraw
        if 15 in attr:
            if attr[15]['afi_safi'] == [1, 133]:
                LOG.info("recieve flowspec withdraw")
                for prefix in attr[15]['withdraw']:
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) in self.flowspec_receive_dict:
                        self.receive_version['flowspec'] += 1
                        del self.flowspec_receive_dict[str(key)]
                    else:
                        LOG.info("Do not have %s in receive flowspec dict" %
                                 prefix)
            elif attr[15]['afi_safi'] == [1, 73]:
                LOG.info('recieve sr withdraw')
            elif attr[15]['afi_safi'] == [1, 128]:
                LOG.info("recieve withdraw mpls_vpn")
                for prefix in attr[15]['withdraw']:
                    key = "{"
                    for k in sorted(prefix.keys()):
                        key += '"' + k + '"'
                        key += ':'
                        key += '"' + str(prefix[k]) + '"'
                        key += ','
                    key = key[:-1]
                    key += "}"
                    if str(key) in self.mpls_vpn_receive_dict:
                        self.receive_version['mpls_vpn'] += 1
                        del self.mpls_vpn_receive_dict[str(key)]
                    else:
                        LOG.info("Do not have %s in receive mpls_vpn dict" %
                                 key)
 def __init__(self):
     self.radixRib = Radix()
     self.pathselection = PathSelection()
    if nz_rt.search_best(network=n, masklen=int(m)):
        overlaps = True
        net_p = ipaddress.ip_network(unicode(p, 'utf-8'))
        num_slash_24 = int(net_p.num_addresses/256)
    else:
        overlaps = False
        num_slash_24 = 0

    return dict(is_nz=overlaps, count24=num_slash_24)


def is_nz_as(as_number):
    return as_number in nz_as_list

""" Read the list of NZ prefixes from the data obtained from APNIC using a Radix Tree"""
nz_rt = Radix()

with open("../data/nz-networks-from-rir.tsv", "rb") as rir_data:
    nz_networks = csv.reader(rir_data, delimiter='\t')

    for net in nz_networks:
        [p, m] = net[0].split('/')
        nz_rt.add(network=p, masklen=int(m))

""" Read the list of NZ ASes from the data obtained from APNIC"""
nz_as_list = set()

with open('../data/as-from-rir.tsv', 'rb') as nz_as:
    as_in = csv.reader(nz_as, delimiter='\t')

    for asn in as_in:
class RIB(object):
    def __init__(self):
        self.radixRib = Radix()

    def serialize(self):
        '''To encode the py-radix object, we'll convert into a dictionary'''

        # The py-radix object contains all the prefixes and associated metadata for
        # each prefix in a single py-radix object. Convert this into a nested dict.
        rib = []

        # walk through the current list of nodes in the tree
        nodes = self.radixRib.nodes()
        for node in nodes:
            entry = {}
            entry["family"] = node.family
            entry["network"] = node.network
            entry["prefix"] = node.prefix
            entry["prefix_len"] = node.prefixlen
            entry["paths"] = node.data
            rib.append(entry)

        return rib

    def process_msg(self, route):
        # Start processing the individual routes Received

        addr = inet_aton(str(route['prefix']))
        logger.debug("Received route with prefix = " + str(route['prefix']) +
                     "/" + str(route['prefix_len']) + " and action=" +
                     str(route['action']))
        ribnode = self.radixRib.search_exact(packed=addr,
                                             masklen=int(route['prefix_len']))
        if ribnode is None:
            # Particular prefix/prefix-len does not exist in tree, create the node
            ribnode = self.radixRib.add(packed=addr,
                                        masklen=int(route['prefix_len']))

            # Create a dictionary with only the path information and path hash as the key
            for key in ['prefix', 'prefix_len']:
                del route[key]

            path_hash = route['hash']
            del route['hash']

            route_paths = {str(path_hash): route}

            # add the path to the route path dictionary
            ribnode.data.update(route_paths)
            logger.debug("Added the path to the route entry, path hash=" +
                         path_hash)
        else:
            #Particular prefix/prefix-len already exists in tree, update the path based on action
            # Create a dictionary with only the path information and path hash as the key
            for key in ['prefix', 'prefix_len']:
                del route[key]

            path_hash = route['hash']
            del route['hash']

            # if action == add, update the existing path
            # if action == del, delete the existing path

            #            if route["action"] == "del":
            #                if path_hash in ribnode.data:
            #                    del ribnode.data[path_hash]
            #                    logger.debug("path deleted from tree, hash="+path_hash)
            #                else:
            #                    logger.debug("Delete for a path that did not exist already")
            #            elif route["action"] == "add":
            route_paths = {str(path_hash): route}
            ribnode.data.update(route_paths)
            logger.debug("Path updated in the tree, hash=" + path_hash)