def __init__( self, data # Data suitable for this class ): valid, message = data_is_valid(data) if not valid: raise ValueError("Invalid data: %s" % message) self.source = data['source'] self.bind = data.get('bind', None) self.update = pscheduler.iso8601_as_timedelta(data['update']) self.retry = pscheduler.iso8601_as_timedelta(data['retry']) self.fail_state = data.get('fail-state', False) self.exclusions = radix.Radix() if 'exclude' in data: try: for excl in data['exclude']: self.exclusions.add(excl) except ValueError: raise ValueError("Invalid IP or CIDR '%s'" % excl) # TODO: Would be nice to support a timeout so the system # doesn't sit for too long. self.cidrs = radix.Radix() self.length = 0 # Prime the timer with the epoch and do a first load of the list self.next_attempt = datetime.datetime.utcfromtimestamp(0) self.__populate_cidrs__()
def check_python_radix(): """Check if py-radix is ok.""" import radix # Check if search_best() is patched r = radix.Radix() r.add("10.0.0.0/8") r.add("10.0.0.0/16") if r.search_best("10.0.0.0/12").prefix != "10.0.0.0/8": # In buggy versions, r.search_best().prefix is equal to 10.0.0.0/16 message = "search_best() is broken !\n" message += " Please upgrade py-radix." raise CriticalException(message) # Check if the search_covering() method exists r = radix.Radix() try: r.search_covering("192.168.0.0/24") except AttributeError: message = "search_covering() does not exist !\n" message += " Please upgrade py-radix." raise CriticalException(message) # Check if the search_covered() method exists r = radix.Radix() try: r.search_covered("192.168.0.0/24") except AttributeError: message = "search_covered() does not exist !\n" message += " Please upgrade py-radix." raise CriticalException(message)
def test_06__deletes(self): tree = radix.Radix() node1 = tree.add("10.0.0.0/8") self.assertRaises(KeyError, tree.delete, "127.0.0.1") self.assertRaises(KeyError, tree.delete, "10.0.0.0/24") node = tree.search_best("10.0.0.10") self.assertEqual(node, node1)
def aggregate(tree): prefixes = list(tree.prefixes()) if len(prefixes) == 1: return tree r_tree = radix.Radix() # test 1: can we join adjacent prefixes into larger prefixes? for prefix in prefixes[:-1]: # current prefix cp = ip_network(prefix) # bail out if we have ::/0 if cp in ["::/0", "0.0.0.0/0"]: r_tree.add(str(cp)) continue # fetch next prefix # FIXME np = ip_network(prefixes[prefixes.index(prefix) + 1]) if cp.supernet().address_exclude(cp) == np: r_tree.add(str(cp.supernet())) # test 2: is the prefix already covered? elif tree.search_worst(prefix).prefix in [prefix, None]: r_tree.add(prefix) # test 2: is the prefix already covered? (for last item) if len(prefixes) > 1: last = r_tree.search_worst(prefixes[-1]) if last: if last.prefix == prefixes[-1]: r_tree.add(prefixes[-1]) else: r_tree.add(prefixes[-1]) return r_tree
def test_31_parent(self): tree = radix.Radix() root = tree.add('0.0.0.0/0') self.assertEqual(root.parent, None) parent_node = tree.add('10.0.0.0/23') node1 = tree.add('10.0.0.0/24') self.assertEqual(node1.parent, parent_node)
def main(): folder_num = int(input("Enter folder number")) overwrite = int(input("Enter 0 for overwrite , 1 to read only")) start_time = time.time() file_names, feaure_file = init_files(folder_num, overwrite) alarm("short") Dict = [] rtree = radix.Radix() count = 0 for i in range(0, len(file_names)): ann, num_updates, num_withdrawls = announcement_as(file_names[i]) if (i == 0): announcement_saver_to_radix_tree(ann, file_names[i], rtree) old_peers, avgpeers = get_peers(ann) old_origins, avgorigins = get_origins(ann) old_prefixes = get_Unique_prefixes(ann) old_num_updates = num_updates old_num_withdrawls = num_withdrawls else: old_peers, old_origins, old_prefixes = Dict_Builder( Dict, ann, file_names[i - 1], file_names[i], rtree, num_updates, num_withdrawls, old_peers, old_origins, old_prefixes, old_num_updates, old_num_withdrawls) old_num_updates = num_updates old_num_withdrawls = num_withdrawls alarm("short") print_Dict(Dict, feaure_file) printstats(Dict, get_folder_name_without_extension(folder_num)) print("--- %s seconds ---" % (time.time() - start_time)) print("end") alarm("short")
def __init__(self, name): global root_zone super(Inet6RootZone, self).__init__(name, None) root_zone.addDomain(self) self.rtree = radix.Radix()
def main(): import fileinput args = parse_args(sys.argv[1:]) if args.version: # pragma: no cover print("aggregate6 %s" % aggregate6.__version__) sys.exit() p_tree = radix.Radix() for line in fileinput.input(args.args): if not line.strip(): # pragma: no cover continue for elem in line.strip().split(): try: prefix_obj = ip_network(text(elem.strip())) prefix = text(prefix_obj) except ValueError: sys.stderr.write("ERROR: '%s' is not a valid IP network, \ ignoring.\n" % elem.strip()) continue if args.ipv4_only and prefix_obj.version == 4: p_tree.add(prefix) elif args.ipv6_only and prefix_obj.version == 6: p_tree.add(prefix) elif not args.ipv4_only and not args.ipv6_only: p_tree.add(prefix) for prefix in aggregate_tree(p_tree).prefixes(): print(prefix)
def __init__(self, debug_dir, debug_level, debug_name): self.rtree = radix.Radix() # Logger for the pipeline logger.setup_logger('fwtable', debug_dir + '/' + debug_name, debug_level) self.log = logging.getLogger('fwtable')
def createRadix(geoDate): rtree = radix.Radix() prefixASDict = getPrefixASDict(geoDate) prefixList = prefixASDict.keys() for prefix in prefixList: rtree.add(prefix) return rtree
def Create_Radix_Tree_From_RIBs(CURRENT_BGP_DATA_PATH, DATE): rtree = radix.Radix() for nfile, filename in enumerate(sorted( os.listdir(CURRENT_BGP_DATA_PATH))): print("Treating: %s (%s/12)" % (filename, 1 + nfile)) Timestamp = DATE[0] + "." + DATE[1] + "." + DATE[ 2] + "." + filename.split(".")[1] Timestamp = datetime.strptime(Timestamp, '%Y.%m.%d.%H%M') current_rib_file = os.path.join(CURRENT_BGP_DATA_PATH, filename) with gzip.open(current_rib_file, 'rb') as RIB_File: for line in RIB_File: Entry = line.decode() [Prefix, ASPath, DontCare] = Entry.split("|") if Prefix == "0.0.0.0/0" or "bird" in Prefix: continue rnode = rtree.search_exact(Prefix) if not rnode: rnode = rtree.add(Prefix) rnode.data["ASPath"] = [] rnode.data["ASPath"].append([ASPath, Timestamp]) return rtree
def test_15__packed_addresses6(self): tree = radix.Radix() p = '\xde\xad\xbe\xef\x124Vx\x9a\xbc\xde\xf0\x00\x00\x00\x00' node = tree.add(packed=p, masklen=108) self.assertEquals(node.family, socket.AF_INET6) self.assertEquals(node.prefix, "dead:beef:1234:5678:9abc:def0::/108") self.assertEquals(node.packed, p)
def __init__(self, start): self.hijacks = dict() self.target_date = start #begin: added by liumin 2020-12-03 self.aging_interval = datetime.timedelta(days=150) self.bgp_rtree = radix.Radix() #end self.target_datetime = datetime.datetime.strptime(user_args.startdate + " 00:00:00", "%Y%m%d %H:%M:%S") self.assigned_autsys = set() self.bogon_prefixes = set() self.FNULL = open(os.devnull, 'w') self.irr_rtree = radix.Radix() self.irr_data_dir = "irr_prefix_origins" self.rpki_rtree = radix.Radix() logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING")) self.initialize_data()
def test_14__packed_addresses4(self): tree = radix.Radix() p = '\xe0\x14\x0b@' node = tree.add(packed=p, masklen=26) self.assertEquals(node.family, socket.AF_INET) self.assertEquals(node.prefix, "224.20.11.64/26") self.assertEquals(node.packed, p)
def _build_radix_tree(self, asn_isp_raw, ip_asn_raw): self._rtree = radix.Radix() # build the asn -> ISP lookup asn_isp_map = {} lines = asn_isp_raw.decode('utf-8', 'ignore').splitlines() for line in lines: tokens = line.split() try: asn = int(line[:6]) # this occasionally fails, so skip if so except: continue isp = line[7:] asn_isp_map[asn] = isp # build the ipaddr -> ASN lookup lines = ip_asn_raw.decode('utf-8', 'ignore').splitlines() for line in lines: tokens = line.split() ipmask = tokens[0] asn = int(tokens[1]) rnode = self._rtree.add(ipmask) rnode.data['asn'] = asn try: rnode.data['isp'] = asn_isp_map[asn] except: rnode.data['isp'] = ''
def FormTree(self, prefixes): filePath = './data/' + str(date.today()) rtree = radix.Radix() i = 0 for prefix in prefixes[:1000]: i += 1 rnode = rtree.add(prefix) rnode.data['visibility'] = None rnode.data['last_seen'] = None rnodeParent = rtree.search_worst(prefix) if (rnodeParent.data['visibility'] == False): rnode.data['visibility'] = False print("Handling {}th prefix, Parent is already not visible". format(i)) else: visibility = self.IsVisible(prefix) rnode.data['visibility'] = visibility['visibility'] rnode.data['last_seen'] = visibility['last_seen'] print("Handling {}th prefix, visibility: {}, last seen: {}". format(i, visibility['visibility'], visibility['last_seen'])) if (i % 100 == 0): print("Processed " + str(i) + " prefixes, saving intermediate data.") f = open(filePath, 'wb+') pickle.dump(rtree, f) f.close() print("Processed " + str(i) + " prefixes, saving intermediate data.") return rtree
def parse_config(config_path): network = dict() with open(config_path) as config_file: config_dict = yaml.load(config_file) defaults = config_dict.get('defaults', {}) prefixes = { netaddr.IPNetwork(prefix): info for prefix, info in config_dict['prefixes'].items() } for key in prefixes: if 'subnet' in prefixes[key]: subnet = list(key.subnet(prefixes[key]['subnet'])) if subnet: for i in subnet: network[i] = deepcopy(prefixes[key]) else: network[key] = deepcopy(prefixes[key]) else: network[key] = deepcopy(prefixes[key]) for zone in network: if 'domain' not in network[zone]: network[zone]['domain'] = IP(str(zone.cidr)).reverseName()[:-1] rtree = radix.Radix() for prefix in network.keys(): node = rtree.add(str(prefix)) node.data['prefix'] = prefix return defaults, network, rtree
def __init__(self, starttime, endtime, announceQueue, countQueue, ribQueue, spatialResolution=1, af=4, timeWindow=900, asnFilter=None, collectors=[ "route-views.linx", "route-views2", "rrc00", "rrc10"]): threading.Thread.__init__ (self) self.__nbaddr = {4:{i: 2**(32-i) for i in range(33) }, 6: {i: 2**(128-i) for i in range(129) }} self.startts = int(dt2ts(starttime)) self.endts = int(dt2ts(endtime)) self.livemode = False if endtime > datetime.utcnow(): self.livemode = True self.announceQueue = announceQueue self.countQueue = countQueue self.ribQueue = ribQueue self.spatialResolution = spatialResolution self.af = af self.asnFilter = asnFilter self.timeWindow = timeWindow self.rtree = radix.Radix() self.collectors = collectors self.ts = None self.peers = None self.peersASN = defaultdict(set) self.peersPerASN = defaultdict(list) self.counter = { "all": pathCountDict(), "origas": defaultdict(pathCountDict), }
def start_monitors(self): for proc_id in self.process_ids: try: proc_id[1].terminate() except ProcessLookupError: log.exception('process terminate') self.process_ids.clear() self.prefixes.clear() self.prefix_tree = radix.Radix() for rule in self.rules: try: for prefix in rule['prefixes']: node = self.prefix_tree.add(prefix) node.data['origin_asns'] = rule['origin_asns'] node.data['neighbors'] = rule['neighbors'] node.data['mitigation'] = rule['mitigation'] except Exception as e: log.error('Exception', exc_info=True) # only keep super prefixes for monitors for prefix in self.prefix_tree.prefixes(): self.prefixes.add(self.prefix_tree.search_worst(prefix).prefix) self.init_ris_instances() self.init_exabgp_instances() self.init_bgpstreamhist_instance() self.init_bgpstreamlive_instance() self.init_betabmp_instance()
def test_14__packed_addresses4(self): tree = radix.Radix() p = struct.pack('4B', 0xe0, 0x14, 0x0b, 0x40) node = tree.add(packed=p, masklen=26) self.assertEquals(node.family, socket.AF_INET) self.assertEquals(node.prefix, "224.20.11.64/26") self.assertEquals(node.packed, p)
def test_21__lots_of_prefixes(self): tree = radix.Radix() num_nodes_in = 0 for i in range(0, 128): for j in range(0, 128): k = ((i + j) % 8) + 24 node = tree.add("1.%d.%d.0" % (i, j), k) node.data["i"] = i node.data["j"] = j num_nodes_in += 1 num_nodes_del = 0 for i in range(0, 128, 5): for j in range(0, 128, 3): k = ((i + j) % 8) + 24 tree.delete("1.%d.%d.0" % (i, j), k) num_nodes_del += 1 num_nodes_out = 0 for node in tree: i = node.data["i"] j = node.data["j"] k = ((i + j) % 8) + 24 prefix = "1.%d.%d.0/%d" % (i, j, k) self.assertEquals(node.prefix, prefix) num_nodes_out += 1 self.assertEquals(num_nodes_in - num_nodes_del, num_nodes_out) self.assertEquals(num_nodes_in - num_nodes_del, len(tree.nodes()))
def test_21__cpickle(self): if sys.version_info[0] >= 3: return tree = radix.Radix() num_nodes_in = 0 for i in range(0, 128): for j in range(0, 128): k = ((i + j) % 8) + 24 addr = "1.%d.%d.0" % (i, j) node = tree.add(addr, k) node.data["i"] = i node.data["j"] = j num_nodes_in += 1 tree_pickled = cPickle.dumps(tree) del tree tree2 = cPickle.loads(tree_pickled) for i in range(0, 128): for j in range(0, 128): k = ((i + j) % 8) + 24 addr = "1.%d.%d.0" % (i, j) node = tree2.search_exact(addr, k) self.assertNotEquals(node, None) self.assertEquals(node.data["i"], i) self.assertEquals(node.data["j"], j) node.data["j"] = j self.assertEquals(len(tree2.nodes()), num_nodes_in)
def _aggregate_phase1(tree): # check if prefix is already covered n_tree = radix.Radix() for prefix in tree.prefixes(): if tree.search_worst(prefix).prefix == prefix: n_tree.add(prefix) return n_tree
def test_22_search_best(self): tree = radix.Radix() tree.add('10.0.0.0/8') tree.add('10.0.0.0/13') tree.add('10.0.0.0/16') self.assertEquals( tree.search_best('10.0.0.0/15').prefix, '10.0.0.0/13')
def store_subnet_event(self, event): """ Store the subnet event in the database :param event: SubnetEvent instance :return: None """ logging.debug('store_subnet_event for event: %s', event) assert isinstance(event, SubnetEvent) # Check if the l3out has been seen before and if not, set up the DB if not self.is_l3out_known(event.epg): if event.tenant not in self.db: self.db[event.tenant] = {} if event.l3out not in self.db[event.tenant]: self.db[event.tenant][event.l3out] = radix.Radix() # Store the subnet event if event.is_deleted(): self.db[event.tenant][event.l3out].delete(event.subnet) else: # TODO new : should check to see if node is already in the radix and has the same l3instp # Add the subnet to the database logging.debug('Adding subnet %s to tenant %s epg %s', event.subnet, event.tenant, event.epg) subnet_node = self.db[event.tenant][event.l3out].add(event.subnet) subnet_node.data['l3instp'] = event.l3instp
def test_26_search_covered(self): tree = radix.Radix() tree.add('10.0.0.0/8') tree.add('10.0.0.0/13') tree.add('10.0.0.0/31') tree.add('11.0.0.0/16') tree.add('10.30.2.1/32') tree.add('10.30.2.0/25') tree.add('0.0.0.0/0') self.assertEquals( [n.prefix for n in tree.search_covered('11.0.0.0/8')], ['11.0.0.0/16']) self.assertEquals( sorted([n.prefix for n in tree.search_covered('10.0.0.0/9')]), ['10.0.0.0/13', '10.0.0.0/31', '10.30.2.0/25', '10.30.2.1/32']) self.assertEquals( sorted([n.prefix for n in tree.search_covered('10.0.0.0/8')]), [ '10.0.0.0/13', '10.0.0.0/31', '10.0.0.0/8', '10.30.2.0/25', '10.30.2.1/32' ]) self.assertEquals( [n.prefix for n in tree.search_covered('11.0.0.0/8')], ['11.0.0.0/16']) self.assertEquals( [n.prefix for n in tree.search_covered('10.30.2.64/32')], []) self.assertEquals( [n.prefix for n in tree.search_covered('21.0.0.0/8')], []) self.assertEquals([n.prefix for n in tree.search_covered('10.0.0.1')], []) self.assertEquals( sorted([n.prefix for n in tree.search_covered('0.0.0.0/0')]), [ '0.0.0.0/0', '10.0.0.0/13', '10.0.0.0/31', '10.0.0.0/8', '10.30.2.0/25', '10.30.2.1/32', '11.0.0.0/16' ])
def __init__(self, nets, root_net=None): if not self._tree: self._tree = radix.Radix() self.root_net = root_net self.create_tree(nets)
def test_28_search_covered_super_node_error(self): tree = radix.Radix() tree.add('27.0.100.0/24') tree.add('27.0.101.0/24') self.assertEquals( [n.prefix for n in tree.search_covered('31.3.104.0/21')], [])
def load_ip_ranges(): rtree = radix.Radix() data = json.load(open(IP_RANGES)) prefixes = data['prefixes'] for prefix in prefixes: rtree.add(prefix['ip_prefix']).data['prefix'] = prefix return rtree
def optimise_prefixes(*prefix_list): rt = radix.Radix() for prefix in prefix_list: rt.add(prefix) return set([rt.search_worst(prefix).prefix for prefix in prefix_list])