def testInsertRemove(self): pyt = pytricia.PyTricia() pyt['10.0.0.0/8'] = list(range(10)) self.assertListEqual(['10.0.0.0/8'], pyt.keys()) pyt.delete('10.0.0.0/8') self.assertListEqual([], pyt.keys()) self.assertFalse(pyt.has_key('10.0.0.0/8')) pyt['10.0.0.0/8'] = list(range(10)) self.assertListEqual(['10.0.0.0/8'], pyt.keys()) pyt.delete('10.0.0.0/8') self.assertListEqual([], pyt.keys()) self.assertFalse(pyt.has_key('10.0.0.0/8')) pyt['10.0.0.0/8'] = list(range(10)) self.assertListEqual(['10.0.0.0/8'], pyt.keys()) del pyt['10.0.0.0/8'] self.assertListEqual([], pyt.keys()) self.assertFalse(pyt.has_key('10.0.0.0/8')) with self.assertRaises(KeyError) as cm: t = pytricia.PyTricia() pyt['10.0.0.0/8'] = list(range(10)) t.delete('10.0.0.0/9') self.assertIsInstance(cm.exception, KeyError)
def init_detection(self) -> NoReturn: """ Updates rules everytime it receives a new configuration. """ log.info("Initiating detection...") log.info("Starting building detection prefix tree...") self.prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128), } raw_prefix_count = 0 for rule in self.rules: try: rule_translated_origin_asn_set = set() for asn in rule["origin_asns"]: this_translated_asn_list = flatten(translate_asn_range(asn)) rule_translated_origin_asn_set.update( set(this_translated_asn_list) ) rule["origin_asns"] = list(rule_translated_origin_asn_set) rule_translated_neighbor_set = set() for asn in rule["neighbors"]: this_translated_asn_list = flatten(translate_asn_range(asn)) rule_translated_neighbor_set.update( set(this_translated_asn_list) ) rule["neighbors"] = list(rule_translated_neighbor_set) conf_obj = { "origin_asns": rule["origin_asns"], "neighbors": rule["neighbors"], "policies": set(rule["policies"]), "community_annotations": rule["community_annotations"], } for prefix in rule["prefixes"]: for translated_prefix in translate_rfc2622(prefix): ip_version = get_ip_version(translated_prefix) if self.prefix_tree[ip_version].has_key(translated_prefix): node = self.prefix_tree[ip_version][translated_prefix] else: node = { "prefix": translated_prefix, "data": {"confs": []}, } self.prefix_tree[ip_version].insert( translated_prefix, node ) node["data"]["confs"].append(conf_obj) raw_prefix_count += 1 except Exception: log.exception("Exception") log.info( "{} prefixes integrated in detection prefix tree in total".format( raw_prefix_count ) ) log.info("Finished building detection prefix tree.") log.info("Detection initiated, configured and running.")
def patricia(mydict): pyt_src = pytricia.PyTricia() pyt_dst = pytricia.PyTricia() dict = {} for dict in mydict: check_exact_proceed(dict['src_ip'], dict['dst_ip'], int(dict['aasno']), pyt_src, pyt_dst) #Listing Patricia Source Trie print "Layer 3 --source" print list(pyt_src) print "\n" #Listing Patricia Destination Trie print "Layer 3 --Destination" print list(pyt_dst) print "\n" #Finding Length print len(pyt_src) print len(pyt_dst) print "Source-----" for item in pyt_src: print(item, pyt_src[item]) print "Destination-" for item in pyt_dst: print(item, pyt_dst[item]) print "Patricia tree formation completed" finding_parent_children(pyt_src, pyt_dst) return pyt_src, pyt_dst
def __importPrefix(cnx, _ip): # Prepare a cursor object cur = cnx.cursor() # Create MYSQL database query string __dbQuery_m = __createQuery("prefix_more") __dbQuery_l = __createQuery("prefix_less") # Execute SQL query try: flag = "prefix_more" _pt_m = pytricia.PyTricia() cur.execute(__dbQuery_m) result = cur.fetchall() for row in result: _pt_m.insert(row[0], row[0]) _net = _pt_m.get(_ip) if (_net): print(flag + "|" + _net) else: _flag = "prefix_less" _pt_l = pytricia.PyTricia() cur.execute(__dbQuery_l) result = cur.fetchall() for row in result: _pt_l.insert(row[0], row[0]) _net = _pt_l.get(_ip) if (_net): print(_flag + "|" + _net) except ValueError as e: return False
def init_mitigation(self): log.info("Initiating mitigation...") log.info("Starting building mitigation prefix tree...") self.prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128), } raw_prefix_count = 0 for rule in self.rules: try: for prefix in rule["prefixes"]: for translated_prefix in translate_rfc2622(prefix): ip_version = get_ip_version(translated_prefix) node = { "prefix": translated_prefix, "data": { "mitigation": rule["mitigation"] }, } self.prefix_tree[ip_version].insert( translated_prefix, node) raw_prefix_count += 1 except Exception: log.exception("Exception") log.info( "{} prefixes integrated in mitigation prefix tree in total". format(raw_prefix_count)) log.info("Finished building mitigation prefix tree.") log.info("Mitigation initiated, configured and running.")
def build_prefix_tree(self): log.info("Starting building autoignore prefix tree...") self.prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128), } raw_prefix_count = 0 for key in self.autoignore_rules: try: rule = self.autoignore_rules[key] for prefix in rule["prefixes"]: for translated_prefix in translate_rfc2622(prefix): ip_version = get_ip_version(translated_prefix) if self.prefix_tree[ip_version].has_key( translated_prefix): node = self.prefix_tree[ip_version][ translated_prefix] else: node = { "prefix": translated_prefix, "rule_key": key } self.prefix_tree[ip_version].insert( translated_prefix, node) raw_prefix_count += 1 except Exception: log.exception("Exception") log.info( "{} prefixes integrated in autoignore prefix tree in total". format(raw_prefix_count)) log.info("Finished building autoignore prefix tree.")
def _features_handler(self, ev): """ Handle Feature Events (e.g. datapath changes)""" datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser dpid = dpid_lib.dpid_to_str(datapath.id) ## Install flow-table miss entry match = parser.OFPMatch() actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] self.add_flow(datapath, 0, match, actions) # table-miss entry # Install Invalid-TTL entry, so that controller will send an ICMP_TIME_EXCEEDED type message match = parser.OFPMatch(ofproto.OFPR_INVALID_TTL) actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] self.add_flow(datapath, 2, match, actions) # invalid ttl entry ## Check if switch joined if dpid in self.switch_dpids: self.logger.info("!> Switch Joined Datapath | DPID: %s", dpid) ## Check if router joined and install routing table if dpid in self.router_dpids: self.logger.info("!> Router Joined Datapath | DPID: %s", dpid) with open(router_data_filepath, "r") as rdf: router_data = json.load(rdf) if "datapath" not in router_data: self.logger.info("ERROR: Router Data File Invalid") return router_data = router_data["datapath"] if dpid in router_data: # get data for the dpid of this switch/router router_data = router_data[dpid] if "routes" in router_data: self.routes_table[dpid] = pytricia.PyTricia() # initialise as a pytricia tree for entry in router_data["routes"]: self.routes_table[dpid].insert(str(entry["destination"]), entry) else: print("No routing-table data found for Router[{}]".format(dpid)) if "arp" in router_data: self.arp_table[dpid] = pytricia.PyTricia() # initialise as a pytricia tree. for entry in router_data["arp"]: self.arp_table[dpid].insert(str(entry["ip"]), entry) else: print("No arp-table data found for Router[{}]".format(dpid)) if "interfaces" in router_data: self.interfaces[dpid] = {} for entry in router_data["interfaces"]: port = entry.pop("port") self.interfaces[dpid][port] = entry else: print("No i/face data found for Router[{}]".format(dpid)) else: print("No data found for Router[{0}] in {1}".format(dpid, router_data_filepath))
def start_monitors(self): log.info("Initiating monitor...") for proc_id in self.process_ids: try: proc_id[1].terminate() except ProcessLookupError: log.exception("process terminate") self.process_ids.clear() self.prefixes.clear() log.info("Starting building monitor prefix tree...") self.prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128), } raw_prefix_count = 0 for rule in self.rules: try: for prefix in rule["prefixes"]: for translated_prefix in translate_rfc2622(prefix): ip_version = get_ip_version(translated_prefix) self.prefix_tree[ip_version].insert(translated_prefix, "") raw_prefix_count += 1 except Exception: log.exception("Exception") log.info( "{} prefixes integrated in monitor prefix tree in total".format( raw_prefix_count ) ) log.info("Finished building monitor prefix tree.") # only keep super prefixes for monitors log.info("Calculating monitored prefixes for monitor to supervise...") for ip_version in self.prefix_tree: for prefix in self.prefix_tree[ip_version]: worst_prefix = search_worst_prefix( prefix, self.prefix_tree[ip_version] ) if worst_prefix: self.prefixes.add(worst_prefix) dump_json(list(self.prefixes), self.prefix_file) log.info("Calculated monitored prefixes for monitor to supervise.") log.info("Initiating configured monitoring instances....") self.init_ris_instance() self.init_exabgp_instance() self.init_bgpstreamhist_instance() self.init_bgpstreamlive_instance() self.init_bgpstreamkafka_instance() log.info("All configured monitoring instances initiated.") log.info("Monitor initiated, configured and running.")
def __init__(self, pyt4=None, pyt6=None): try: self.pyt6 = pytricia.PyTricia(128) self.pyt4 = pytricia.PyTricia() self.pyt = None if pyt4: self._restore(self.pyt4, pyt4) if pyt6: self._restore(self.pyt6, pyt6) except Exception as err: print("Error in init: " + str(err)) sys.exit(2)
def testInit(self): with self.assertRaises(ValueError) as cm: t = pytricia.PyTricia('a') self.assertIsInstance(cm.exception, ValueError) with self.assertRaises(ValueError) as cm: t = pytricia.PyTricia(-1) self.assertIsInstance(cm.exception, ValueError) self.assertIsInstance(pytricia.PyTricia(1), pytricia.PyTricia) self.assertIsInstance(pytricia.PyTricia(128), pytricia.PyTricia) with self.assertRaises(ValueError) as cm: t = pytricia.PyTricia(129) self.assertIsInstance(cm.exception, ValueError)
def testInsert3(self): pyt = pytricia.PyTricia(128) val = pyt.insert("fe80::aebc:32ff:fec2:b659/64", "a") self.assertIs(val, None) self.assertEqual(len(pyt), 1) self.assertEqual(pyt["fe80::aebc:32ff:fec2:b659/64"], "a") self.assertIn("fe80::aebc:32ff:fec2:b659", pyt)
def testInsert2(self): pyt = pytricia.PyTricia() val = pyt.insert("10.0.0.0", 8, "a") self.assertIs(val, None) self.assertEqual(len(pyt), 1) self.assertEqual(pyt["10.0.0.0/8"], "a") self.assertIn("10.0.0.1", pyt)
def testIteration(self): pyt = pytricia.PyTricia() pyt["10.1.0.0/16"] = 'b' pyt["10.0.0.0/8"] = 'a' pyt["10.0.1.0/24"] = 'c' pyt["0.0.0.0/0"] = 'default route' self.assertListEqual(sorted(['0.0.0.0/0', '10.0.0.0/8','10.1.0.0/16','10.0.1.0/24']), sorted(list(pyt.__iter__())))
def testBasic(self): pyt = pytricia.PyTricia() pyt["10.0.0.0/8"] = 'a' pyt["10.1.0.0/16"] = 'b' # dumppyt(pyt) self.assertEqual(pyt["10.0.0.0/8"], 'a') self.assertEqual(pyt["10.1.0.0/16"], 'b') self.assertEqual(pyt["10.0.0.0"], 'a') self.assertEqual(pyt["10.1.0.0"], 'b') self.assertEqual(pyt["10.1.0.1"], 'b') self.assertEqual(pyt["10.0.0.1"], 'a') self.assertTrue('10.0.0.0' in pyt) self.assertTrue('10.1.0.0' in pyt) self.assertTrue('10.0.0.1' in pyt) self.assertFalse('9.0.0.0' in pyt) self.assertFalse('0.0.0.0' in pyt) self.assertTrue(pyt.has_key('10.0.0.0/8')) self.assertTrue(pyt.has_key('10.1.0.0/16')) self.assertFalse(pyt.has_key('10.2.0.0/16')) self.assertFalse(pyt.has_key('9.0.0.0/8')) self.assertFalse(pyt.has_key('10.0.0.1')) self.assertTrue(pyt.has_key('10.0.0.0/8')) self.assertTrue(pyt.has_key('10.1.0.0/16')) self.assertFalse(pyt.has_key('10.2.0.0/16')) self.assertFalse(pyt.has_key('9.0.0.0/8')) self.assertFalse(pyt.has_key('10.0.0.0')) self.assertItemsEqual(['10.0.0.0/8', '10.1.0.0/16'], pyt.keys())
def process(self, data, whitelist=[]): wl = pytricia.PyTricia() for x in PERM_WHITELIST: wl[x] = True for y in whitelist: y = str(_normalize(y['indicator'])) if '/' not in y: # weird bug work-around it'll insert 172.16.1.60 with a /0 at the end?? y = '{}/32'.format(y) wl[y] = True # this could be done with generators... rv = [] for y in data: y['indicator'] = _normalize(y['indicator']) try: if sys.version_info.major < 3: ipaddress.ip_network(unicode(y['indicator'])) else: ipaddress.ip_network(y['indicator']) if str(y['indicator']) not in wl: rv.append(y) except ValueError as e: print(e) print('skipping invalid address: %s' % y['indicator']) return rv
def testLongRawIP6(self): pyt = pytricia.PyTricia(128, socket.AF_INET6, 2) prefixes = [(226943873969804260003059691566684831744, 96 + 16), (226943873969804260003059691566684832256, 96 + 24), (226943873969804260003059691566684832512, 96 + 24), (226943873969804260003059691566684832516, 96 + 32)] values = ["A", "B", "C", "D"] for prefix, value in zip(prefixes, values): pyt.insert(prefix, value) self.assertEqual( pyt.get_key((226943873969804260003059691566684832258, 96 + 30)), (226943873969804260003059691566684832256, 96 + 24)) self.assertListEqual(sorted(pyt.keys()), sorted(prefixes)) self.assertEqual( pyt.parent((226943873969804260003059691566684832516, 96 + 32)), (226943873969804260003059691566684832512, 96 + 24)) self.assertListEqual( list( pyt.children( (226943873969804260003059691566684832512, 96 + 24))), [(226943873969804260003059691566684832516, 96 + 32)]) self.assertListEqual(sorted(list(pyt)), sorted(prefixes))
def main(): opts = parseArgs() pyt = pytricia.PyTricia() pyt.insert('0.0.0.0/0', "root") pyt.insert('10.0.0.0/8', "RFC1918 root") pyt.insert('172.12.0.0/12', "RFC1918 root") pyt.insert('192.168.0.0/16', "RFC1918 root") prefx = {} IGNORED_INTERFACE = ['bme0', 'bme0.32768', 'lo'] for device in opts['hosts']: try: session = Session(hostname=device, community=opts['community'], version=2) ipent = session.walk('IP-MIB::ipAdEntAddr') for item in ipent: ip = item.value ifIndex = session.get('IP-MIB::ipAdEntIfIndex.' + ip) mask = session.get('IP-MIB::ipAdEntNetMask.' + ip) ifName = session.get('IF-MIB::ifName.' + ifIndex.value) if ifName in IGNORED_INTERFACE: print("Skipping %s" % ifName) continue prefix = ip + "/" + str( IPNetwork('0.0.0.0/' + mask.value).prefixlen) pref = IPNetwork(prefix) pyt.insert(str(prefix), device + "_" + ifName.value) except Exception, err: print("\tTIMEOUT: " + fw) print("\t" + str(err)) continue
def get_as_prefs(input_file = "../caida_pref2as_datasets/my_routeviews-rv2-20180328-0000.pfx2as"): as2pref = {} pref2as_pyt = pytricia.PyTricia() try: with open(input_file, 'r') as f: for line in f.readlines(): ipnet_match = re.match('^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d+)\s+(\S+)$', line) if ipnet_match: ipnet = ipnet_match.group(1) ipmask = ipnet_match.group(2) asn_list = ipnet_match.group(3).rstrip().lstrip().split('_') prefix = "%s/%s" % (ipnet, ipmask) pref2as_pyt[prefix] = asn_list for asn in asn_list: if asn not in as2pref: as2pref[asn] = set([]) as2pref[asn].add(prefix) except: print("ERROR with processing asn and prefix data") # for json export (sets are not exported) for asn in as2pref: as2pref[asn] = list(as2pref[asn]) return (as2pref, pref2as_pyt)
def worker_commands(a_device, mp_queue): try: a_device['port'] except KeyError: a_device['port'] = 22 identifier = '{ip}'.format(**a_device) return_data = {} cmd = '' command_jnpr = 'show configuration protocols mpls | display set' command_csco = 'show running-config formal interface | i tunnel' SSHClass = netmiko.ssh_dispatcher(a_device['device_type']) try: net_connect = SSHClass(**a_device) if net_connect.device_type == 'juniper': cmd = net_connect.send_command(command_jnpr) elif net_connect.device_type == 'cisco_ios': cmd = net_connect.send_command(command_csco) except (NetMikoTimeoutException, NetMikoAuthenticationException) as e: return_data[identifier] = False # Add data to the queue (for parent process) mp_queue.put(return_data) return None #print cmd return_data[identifier] = pytricia.PyTricia() return_data[identifier] = generate_json(cmd, identifier, net_connect.device_type) mp_queue.put(return_data)
def load_all(self, ignore_lock: bool = False): for address_family in ['v4', 'v6']: if not ignore_lock and self.locked(address_family): continue available_dates = self.storagedb.smembers( f'{self.source}|{address_family}|dates') to_load = [ available_date for available_date in available_dates if available_date >= self.first_date and available_date <= self.last_date ] if not set(to_load).difference( set(self.trees[address_family][self.source].keys())): # Everything available has already been loaded continue if not ignore_lock: self.cache.sadd(f'lock|{self.source}|{address_family}', f'{self.first_date}_{self.last_date}') for d in to_load: if self.trees[address_family][self.source].get(d) is None: self.trees[address_family][ self.source][d] = pytricia.PyTricia() if not self.trees[address_family][self.source][d]: self.load_tree(d, address_family) self.loaded_dates[address_family].append(d) if not ignore_lock: self.cache.srem(f'lock|{self.source}|{address_family}', f'{self.first_date}_{self.last_date}')
def generate_local_policy(G, **args): """ Generate the local policy for each node in G. Args: G: graph. args: additional arguments to config the generation algorithm. Returns: The mapping from node id to a local_policy table. local_policy_table ::= Trie<prefix, Map<port, next_hop>> """ global global_policy random.seed(seed) policies = dict() max_ports = 30 for node in G.nodes(): if node not in policies: policies[node] = pytricia.PyTricia() # FIXME: select prefix by following a distribution for prefix in G.node[node]["ip-prefix"]: # FIXME: select ports by following a distribution ports = set([ random.randint(10000, 60000) for _ in range(random.randint(1, max_ports)) ]) for port in ports: policies[node][port] = random.choice( [i for i in G.neighbors(node)] + [None]) global_policy = policies
def process(data=[], whitelist=[]): wl = pytricia.PyTricia() for y in whitelist: y = str(_normalize(y['indicator'])) # weird bug work-around it'll insert 172.16.1.60 with a /0 at the end?? if '/' not in y: y = '{}/32'.format(y) wl[y] = True for i in data: if 'whitelist' in set(i['tags']): continue i['indicator'] = _normalize(i['indicator']) if not is_ipv4(i['indicator']): continue if i['indicator'] in V4_RESERVED: continue if str(i['indicator']) not in wl: yield i
def build_blocklist(): if is_blocklist_outdated(): logging.info("RKN block list outdated, downloading it") retrieve_blocklist() url_re = re.compile(b'^https://([^/]+)') logging.info("Loading RKN block list") pyt = pytricia.PyTricia() domains = [] with open(DUMP_PATH, "rb") as f: for line in f: parts = line.split(b";") if len(parts) <= 1: continue for ip in parts[0].split(b" | "): if b":" in ip or not ip: continue try: pyt[ip.decode()] = 1 except ValueError: continue if parts[2]: for url in parts[2].split(b" | "): m = url_re.match(url) if m: domains.append(m[1]) for ip in PROXY_IPS: pyt[ip] = 1 logging.info( f"RKN block list loaded: {len(pyt)} addresses, {len(domains)} domains." ) return pyt, domains
def __build_lookup_table(self): self.plt_ = pytricia.PyTricia(128) for pid in self.nmap_: for ipv4 in self.nmap_[pid].get('ipv4', []): self.plt_[ipv4] = pid for ipv6 in self.nmap_[pid].get('ipv6', []): self.plt_[ipv6] = pid
def testBasic(self): pyt = pytricia.PyTricia() pyt["10.0.0.0/8"] = 'a' pyt["10.1.0.0/16"] = 'b' self.assertEqual(pyt["10.0.0.0/8"], 'a') self.assertEqual(pyt["10.1.0.0/16"], 'b') self.assertEqual(pyt["10.0.0.0"], 'a') self.assertEqual(pyt["10.1.0.0"], 'b') self.assertEqual(pyt["10.1.0.1"], 'b') self.assertEqual(pyt["10.0.0.1"], 'a') self.assertTrue('10.0.0.0' in pyt) self.assertTrue('10.1.0.0' in pyt) self.assertTrue('10.0.0.1' in pyt) self.assertFalse('9.0.0.0' in pyt) self.assertFalse('0.0.0.0' in pyt) self.assertTrue(pyt.has_key('10.0.0.0/8')) self.assertTrue(pyt.has_key('10.1.0.0/16')) self.assertFalse(pyt.has_key('10.2.0.0/16')) self.assertFalse(pyt.has_key('9.0.0.0/8')) self.assertFalse(pyt.has_key('10.0.0.1')) self.assertTrue(pyt.has_key('10.0.0.0/8')) self.assertTrue(pyt.has_key('10.1.0.0/16')) self.assertFalse(pyt.has_key('10.2.0.0/16')) self.assertFalse(pyt.has_key('9.0.0.0/8')) self.assertFalse(pyt.has_key('10.0.0.0')) self.assertListEqual(sorted(['10.0.0.0/8','10.1.0.0/16']), sorted(pyt.keys()))
def testExceptions(self): pyt = pytricia.PyTricia(32) with self.assertRaises(ValueError) as cm: pyt.insert("1.2.3/24", "a") with self.assertRaises(KeyError) as cm: pyt["1.2.3.0/24"] with self.assertRaises(ValueError) as cm: pyt["1.2.3/24"] with self.assertRaises(ValueError) as cm: pyt.get("1.2.3/24") with self.assertRaises(ValueError) as cm: pyt.delete("1.2.3/24") with self.assertRaises(KeyError) as cm: pyt.delete("1.2.3.0/24") self.assertFalse(pyt.has_key('1.2.3.0/24')) with self.assertRaises(ValueError) as cm: pyt.has_key('1.2.3/24') self.assertFalse('1.2.3.0/24' in pyt)
def testGetKey(self): pyt = pytricia.PyTricia() pyt.insert("10.0.0.0/8", "a") self.assertEqual(pyt.get_key("10.0.0.0/8"), "10.0.0.0/8") self.assertEqual(pyt.get_key("10.42.42.42"), "10.0.0.0/8") self.assertIsNone(pyt.get_key("11.0.0.0/8")) pyt.insert("10.42.0.0/16", "b") self.assertEqual(pyt.get_key("10.42.42.42"), "10.42.0.0/16")
def __init__(self, address_family, fib, log, log_id): assert fib.address_family == address_family self.address_family = address_family # Patricia Trie of _Destination objects indexed by prefix self.destinations = pytricia.PyTricia() self.fib = fib self._log = log self._log_id = log_id
def read_topo(filename, local_policy=None): data = yaml.load(open(filename)) G = networkx.Graph() nodes = data["nodes"] links = data["links"] for n in nodes: nid = nodes[n]['id'] G.add_node(nid, name=n, **nodes[n]) G.node[nid]['type'] = nodes[n]['type'] G.node[nid]['ip-prefix'] = nodes[n].get('ip-prefix', []) G.node[nid]['routing'] = pytricia.PyTricia() G.node[nid]['fine_grained'] = pytricia.PyTricia() for prefix in nodes[n].get('ip-prefix', []): ip_prefixes[prefix] = nid for l in links: G.add_edge(*l) return G
def testGetKeyIP6(self): pyt = pytricia.PyTricia(128) pyt.insert("2001:db8:10::/48", "a") self.assertEqual(pyt.get_key("2001:db8:10::/48"), "2001:db8:10::/48") self.assertEqual(pyt.get_key("2001:db8:10:42::1"), "2001:db8:10::/48") self.assertIsNone(pyt.get_key("2001:db8:11::/48")) pyt.insert("2001:db8:10:42::/64", "b") self.assertEqual(pyt.get_key("2001:db8:10:42::1"), "2001:db8:10:42::/64")