class BGPDataConsumer(object): """docstring for BGPDataAggregator""" def __init__( self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282", ): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord() def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def _start_rtr_manager(self): self.mgr.start() while not self.mgr.is_synced(): sleep(0.2) if status.error: print("Connection error") exit()
class BGPTest(object): def __init__(self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282"): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord() def _start_rtr_manager(self): self.mgr.start() while not self.mgr.is_synced(): sleep(0.2) if status.error: print("Connection error") exit() def start_stream(self, start_time=None, end_time=0, route_collector=""): """ Starts the """ if route_collector == "": route_collector = self.rc self.stream.add_filter('collector', route_collector) self.stream.add_filter('record-type', 'ribs') if (start_time is None) or not isinstance(start_time, datetime): start_time = datetime.utcnow() if isinstance(end_time, datetime): end = int(end_time.strftime("%s")) else: end = 0 start = int(datetime.utcnow().strftime("%s")) print(start) self.stream.add_interval_filter(start, 0) # print('Start stream with', start_time, end_time) self.stream.start() def get_records(self): while(self.stream.get_next_record(self.rec)): # Print the self.record information only if it is not a valid self.record if self.rec.status != "valid": pass else: elem = self.rec.get_next_elem() while(elem): # Print self.record and elem information print(self.rec.project, self.rec.collector, self.rec.type, self.rec.time, self.rec.status) print(elem.type, elem.peer_address, elem.peer_asn, elem.fields) # prefix = elem.fields["prefix"].split('/') # result = mgr.validate((int) elem.fields["as-path"].split(" ")[-1], prefix[0], prefix[1]) elem = self.rec.get_next_elem() print("done.")
class BGPDataAggregator(object): """docstring for BGPDataAggregator""" def __init__( self, filters={"collector": ["rrc00"]}, rpki_validator="rpki-validator.realmv6.org:8282", settings_file="../settings.json", ): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters["collector"]: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = RouteCollectorMeta(None, 0, 0, 0, 0) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) settings = get_settings(settings_file) settings["db"]["password"] = os.environ["PGPASS"] self.db = DBConnector(settings["db"]) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def push_data(self, timestamp): print("UPDATE:", timestamp) self.db.update_vp_meta(self.metadata_vp) for rc in self.metadata_rc.keys(): self.metadata_rc[rc] = RouteCollectorMeta( rc, timestamp, len(self.peers[rc].keys()), len(self.prefix4[rc]), len(self.prefix6[rc]), ) self.db.update_rc_meta(self.metadata_rc) def start_collecting(self, start_timestamp, end_timestamp=0): self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) next_timestamp = init_next_timestamp(start_timestamp, 5) print("Next Push to DB at:", next_timestamp) self.stream.start() rec = BGPRecord() while (self.stream.get_next_record(rec)): if rec.status == "valid": if rec.time >= next_timestamp: self.push_data(next_timestamp) next_timestamp += 300 elem = rec.get_next_elem() while elem: origin_asn = "" if elem.type is "R" or elem.type is "A": origin_asn = elem.fields["as-path"].split(" ")[-1] try: origin_asn = int(origin_asn) except ValueError: elem = rec.get_next_elem() continue prefix = elem.fields["prefix"] ip, mask_len = split_prefix(prefix) # Check if v4 or v6 is_v4 = check_ipv4(ip) validated = self.mgr.validate(origin_asn, ip, mask_len) old_elem = self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].get(prefix) if elem.type is "R" or elem.type is "A": self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)][prefix] = Route( origin_asn, rec.collector, prefix, is_v4, validated.state.value, ) if old_elem: if old_elem.type != validated.state.value: """Make use of the fact that: 0: valid in enum 1: unknown in enum 2: invalid in enum We designed the namedtuple the way to represent that. So valid is a pos 3 and so on. """ self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + old_elem.type] -= 1 self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address )][4 + validated.state.value] += 1 else: if not self.metadata_vp[rec.collector].get( (elem.peer_asn, elem.peer_address)): """Init the metadata-entry if it not exists already""" self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)] = [ elem.peer_asn, elem.peer_address, rec.collector, next_timestamp, 0, 0, 0, ] # Update the VantagePoint Metadate the same way like above. self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + validated.state.value] += 1 self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][3] = next_timestamp self.peers[rec.collector][elem.peer_asn] += 1 if is_v4: self.prefix4[rec.collector][prefix] += 1 else: self.prefix6[rec.collector][prefix] += 1 elif elem.type is "W": if old_elem: # Reduce the number of IPv4/v6 Addresses for this prefix if is_v4: self.prefix4[rec.collector][prefix] -= 1 if self.prefix4[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) else: self.prefix6[rec.collector][prefix] -= 1 if self.prefix6[rec.collector][prefix] == 0: del (self.prefix6[rec.collector][prefix]) # Reduce number of prefixes belonging to this ASN self.peers[rec.collector][elem.peer_asn] -= 1 if self.peers[rec.collector][elem.peer_asn] == 0: del (self.peers[rec.collector][elem.peer_asn]) # Update the metadata valid/unknown/invalid count self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + old_elem.type] -= 1 # Update the metadata timestamp self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][3] = next_timestamp # Remove the entry from the route_table self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].pop(prefix, None) else: # !!TODO: write log about that! pass elem = rec.get_next_elem()
class BGPLocalAggregator(object): """docstring for BGPDataAggregator""" def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream( '/Users/mx/Projects/Uni/bgp-group/bgp_dump.txt') self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) # for filter_type, filter_array in filters.items(): # for filter_value in filter_array: # self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = defaultdict(int) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp, start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def start_collecting(self, start_timestamp, end_timestamp=0): # self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) self.stream.start() rec = self.stream.get_next_record() while (rec): if rec.status == "valid": # self.i += 1 # if self.i % 1000000 == 0: # print(self.i // 1000000, end=' ') elem = rec.get_next_elem() while (elem): origin_asn = "" if elem.type is 'R' or elem.type is 'A': origin_asn = elem.fields['as-path'].split(' ')[-1] try: origin_asn = int(origin_asn) except ValueError: elem = rec.get_next_elem() continue prefix = elem.fields['prefix'] ip, mask_len = split_prefix(prefix) # Check if v4 or v6 is_v4 = check_ipv4(ip) validated = self.mgr.validate(origin_asn, ip, mask_len) old_elem = self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].get(prefix) if elem.type is 'R' or elem.type is 'A': self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)][prefix] = Route( origin_asn, rec.collector, prefix, is_v4, validated.state.value) if old_elem: if old_elem.type != validated.state.value: """Make use of the fact that: 0: valid in enum 1: unknown in enum 2: invalid in enum We designed the namedtuple the way to represent that. So valid is a pos 3 and so on. """ self.metadata_vp[rec.collector][elem.peer_asn][ 3 + old_elem.type] -= 1 self.metadata_vp[rec.collector][elem.peer_asn][ 3 + validated.state] += 1 else: if not self.metadata_vp[rec.collector].get( elem.peer_asn): """Init the metadata-entry if it not exists already""" self.metadata_vp[rec.collector][elem.peer_asn] = \ [elem.peer_asn, rec.collector, rec.time, 0, 0, 0] # Update the VantagePoint Metadate the same way like above. self.metadata_vp[rec.collector][elem.peer_asn][ 3 + validated.state.value] += 1 self.metadata_vp[rec.collector][ elem.peer_asn][2] = rec.time self.peers[rec.collector][elem.peer_asn] += 1 if is_v4: self.prefix4[rec.collector][prefix] += 1 else: self.prefix6[rec.collector][prefix] += 1 elif elem.type is 'W': if old_elem: # Reduce the number of IPv4/v6 Addresses for this prefix if is_v4: self.prefix4[rec.collector][prefix] -= 1 if self.prefix4[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) else: self.prefix6[rec.collector][prefix] -= 1 if self.prefix6[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) # Reduce number of prefixes belonging to this ASN self.peers[rec.collector][elem.peer_asn] -= 1 if self.peers[rec.collector][elem.peer_asn] == 0: del (self.prefix4[rec.collector][prefix]) # Update the metadata valid/unknown/invalid count self.metadata_vp[rec.collector][elem.peer_asn][ 3 + old_elem.type] -= 1 # Update the metadata timestamp self.metadata_vp[rec.collector][ elem.peer_asn][2] = rec.time # Remove the entry from the route_table self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].pop(prefix, None) else: ##!TODO: write log about that! pass elem = rec.get_next_elem() rec = self.stream.get_next_record()
class BGPCounter(object): """docstring for BGPDataAggregator""" def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.counter = Counter() start_timestamp = self.get_push_timestamp(datetime.now(timezone.utc)) # self.start_collecting(start_timestamp, int(datetime.now(timezone.utc).strftime("%s"))) self.start_collecting(start_timestamp, start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def get_push_timestamp(self, start_time): hours = [0, 8, 16, 24] # get closest push for i in range(0, len(hours)): if hours[i + 1] > start_time.hour: break start_time = start_time.replace(hour=hours[i], minute=0, second=0, microsecond=0) return int(start_time.timestamp()) def start_collecting(self, start_timestamp, end_timestamp=0): self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) self.stream.start() rec = BGPRecord() act_dump = "unknown" while (self.stream.get_next_record(rec)): self.i += 1 if self.i % 10000 == 0: print(self.i) if rec.status == "valid": if (act_dump != rec.dump_position): act_dump = rec.dump_position print('Dump Position:', rec.dump_position) elem = rec.get_next_elem() while (elem): self.counter.update(elem.type) elem = rec.get_next_elem() print(self.counter)