コード例 #1
1
ファイル: bgpstream_test.py プロジェクト: CAIDA/bgp-hackathon
    def calculate_effective_prefixes(self, start_interval=1451692800):
        '''
        This method calculates the number of effective /24
        prefixes that lie underneath a specified route prefix
        :param start_interval: the start interval of the data
        stream
        :return: number of effective prefixes under the route's
        root prefix
        '''

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('collector', self.collector_name)
        stream.add_filter('record-type', 'ribs')

        if isinstance(start_interval, datetime.datetime):
            interval_start_utc = self.convert_interval_to_utc(start_interval)
            stream.add_interval_filter(interval_start_utc - 300, interval_start_utc + 300)
        else:
            stream.add_interval_filter(start_interval - 300, start_interval + 300)

        stream.start()
        print "Starting routing table parsing"
        while (stream.get_next_record(rec)):
            elem = rec.get_next_elem()
            while elem:
                # Get the peer ASN and IP. We then construct a peer_id, since a collector
                # can establish multiple connections with the same ASN.
                peer_asn = elem.peer_asn
                peer_asn_ip = elem.peer_address

                # make this an unmodifiable tuple
                peer_id = (peer_asn, peer_asn_ip)
                peer_route_trie = self.peer_trie_handles[peer_id]

                # Do a single level search of the route prefix to
                # find the left and right prefix advertisements
                single_level_prefixes = \
                    peer_route_trie.single_level_search(elem.fields['prefix'])
                print single_level_prefixes

                prefix_count = len(single_level_prefixes)
                root_24_prefix_count = 0
                lr_24_prefix_count1 = lr_24_prefix_count2 = 0

                # The /24 prefixes below the advertised prefix are calculated
                # as all the /24 prefixes served by the root - sum of the /24
                # prefixes served by root's children
                if prefix_count == 1:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                elif prefix_count == 2:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count1 = \
                        2 ** (24 - int(str(single_level_prefixes[1]).lstrip('<')
                                        .rstrip('>').split('/')[1]))
                else:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count1 = \
                        2 ** (24 - int(str(single_level_prefixes[1]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count2 = \
                        2 ** (24 - int(str(single_level_prefixes[2]).lstrip('<')
                                       .rstrip('>').split('/')[1]))

                effective_24_prefix_count = \
                    root_24_prefix_count - (lr_24_prefix_count1 +
                                            lr_24_prefix_count2)

                print "Effective Prefix Count : ", \
                    effective_24_prefix_count

                for prefix in single_level_prefixes:
                    trie_node = peer_route_trie.search_exact(str(prefix))
                    as_path = trie_node.data['as-path'].split(" ")
                    as_path_headless = as_path[1:-1]
                    print "AS-Path : ", as_path
                    as_headless_len = len(as_path_headless)
                    if as_headless_len > 1:
                        for i in range(0, as_headless_len - 1):
                            print "Headless nodes : ", as_path_headless[i], \
                                as_path_headless[i + 1]
                            if as_path_headless[i] in self.aslink_datastore:
                                self.aslink_datastore[as_path_headless[i]] += 1
                            else:
                                self.aslink_datastore[as_path_headless[i]] = 1

                elem = rec.get_next_elem()
コード例 #2
0
    def get_data_graph(self):

        stream = BGPStream()
        rec = BGPRecord()
        stream.add_filter('record-type', 'ribs')
        stream.add_interval_filter(self.start, self.end)
        stream.start()

        as_graph = nx.Graph()

        while stream.get_next_record(rec):
            if rec.status == "valid":
                elem = rec.get_next_elem()
                while elem:
                    # the list is a list, which stores an as-path
                    as_path = [
                        k
                        for k, g in groupby(elem.fields['as-path'].split(" "))
                    ]
                    peer = str(elem.peer_asn)
                    # judge whether the as-path is legal
                    if len(as_path) > 1 and as_path[0] == peer:
                        if re.match(self.pattern, elem.fields['prefix']):
                            self.as_prefix[as_path[-1]].add(
                                elem.fields['prefix'])
                            # add edges to the graph
                            for i in range(0, len(as_path) - 1):
                                as_graph.add_edge(as_path[i], as_path[i + 1])
                    elem = rec.get_next_elem()

        as_graph = self.add_geo_loc(as_graph)

        return as_graph
コード例 #3
0
    def sendMessageToKafka(self, col_name, col_data):
        print "lalalala"
        stream = BGPStream()
        record = BGPRecord()

        time_start = int(col_data.get('ribs').get('latestDumpTime'))
        time_end = time_start + int(col_data.get('ribs').get('dumpPeriod'))

        stream.add_filter('collector', col_name)
        stream.add_filter('record-type', 'ribs')
        stream.add_interval_filter(time_start, time_end)
        print "Before Start"
        stream.start()
        print "After Start"

        while stream.get_next_record(record):
            if record.status == "valid":
                elem = record.get_next_elem()
                while elem:
                    # print "Record:{}".format(elem)
                    producer = Kafka_producer()
                    producer.send_data(col_name, json.dumps(elem))
                    elem = record.get_next_elem()
            else:
                print "## Current record not valid!"
        print "One Collector Finished"
コード例 #4
0
    def create_trie_from_bgpstream_info(self, interval_start=1451692800):

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('collector', self.collector_name)
        stream.add_filter('record-type', 'ribs')

        if isinstance(interval_start, datetime.datetime):
            interval_start_utc = self.convert_interval_to_utc(interval_start)
            stream.add_interval_filter(interval_start_utc - 300, interval_start_utc + 300)
        else:
            stream.add_interval_filter(interval_start - 300, interval_start + 300)

        stream.start()

        while (stream.get_next_record(rec)):
            elem = rec.get_next_elem()
            while elem:
                # Get the peer ASN and IP. We then construct a peer_id, since a collector
                # can establish multiple connections with the same ASN.
                peer_asn = elem.peer_asn
                peer_asn_ip = elem.peer_address

                # make this an unmodifiable tuple
                peer_id = (peer_asn, peer_asn_ip)

                peer_route_trie = self.peer_trie_handles[peer_id]
                trie_node = peer_route_trie.add(elem.fields['prefix'])
                trie_node.data['as-path'] = elem.fields['as-path']
                elem = rec.get_next_elem()
コード例 #5
0
def downloader(start_date, duration):
    """Download BGP paths from Routeviews and RIPE NCC from a start date for a certain duration."""

    # Start of UNIX time
    base = int(datetime.datetime.strptime(start_date, '%m/%d/%Y').strftime('%s'))
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    stream.add_interval_filter(base, base + int(duration))
    stream.add_filter('record-type', 'ribs')
    stream.start()
    path_set = set()
    f = open('rib.txt', 'w')
    while True:
        rec = stream.get_next_record()
        if rec is None:
            return
        if rec.status != "valid":
            continue
        else:
            elem = rec.get_next_elem()
            while(elem):
                path = elem.fields['as-path']
                if '{' in path or '(' in path:
                    elem = rec.get_next_elem()
                    continue
                prefix = elem.fields['prefix']
                # Focus on IPv4 prefixes
                if ":" not in prefix and path not in path_set:
                    f.write(path.replace(' ', '|') + '\n')
                    path_set.add(path)
                elem = rec.get_next_elem()
    f.close()
コード例 #6
0
def recv_bgpstream_rib(begin, until, collector):
    """
    Receive and parse BGP RIB records from a given bgpstream collector.
    """
    logging.info("CALL recv_bgpstream_rib")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(begin, until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping.")
            continue
        bgp_message = None
        while (elem):
            if (elem.type.upper() == 'A') or (elem.type.upper() == 'R'):
                bgp_message = BGPmessage(elem.time, 'update')
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a:  # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                bgp_message.add_announce(elem.fields['prefix'])
                output(bgp_message)
            elem = rec.get_next_elem()
        # end while (elem)
    # end while (stream...)
    output('FLUSH')
コード例 #7
0
    def _get_data(self, prefix, datetime):
        """
        output example: [['15547', '8220', '1853', '1205'],[..another AS path..]]
        :param prefix:
        :param datetime: end interval
        :return: list of AS paths
        """
        print('[*] ris.py: _get_data() called')
        print('[*] ris.py: _get_data() prefix: {}'.format(prefix))
        start = int(datetime) - 20000  # 20000 second seems to be the shortest interval to get data from BGPstream
        stop = int(datetime)
        result = []

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('prefix', prefix)
        stream.add_filter('record-type', 'ribs')
        stream.add_filter('project', 'ris')
        stream.add_interval_filter(start, stop)

        stream.start()

        while stream.get_next_record(rec):
            if rec.status == "valid":
                elem = rec.get_next_elem()
                while elem:
                    as_path = elem.fields['as-path'].split()
                    as_path.append(' ')  # for tree creation
                    result.append(as_path)
                    elem = rec.get_next_elem()
        print('[*] ris.py: _get_data() finished.')
        return result
コード例 #8
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # to track the peers that have elems in this record
        peer_signatures = set()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            peer_signatures.add(sig)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (Pfxs_v4_set, Pfxs_v6_set, ASNs_v4_set, ASNs_v6_set)
            if sig not in peers_data:
                peers_data[sig] =[set(),set(),
                                  set(),set()]

            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];                    
                if(":" in pfx):
                    peers_data[sig][1].add(pfx)
                else:
                    peers_data[sig][0].add(pfx)

            if('as-path' in elem.fields):
                path_split=elem.fields['as-path'].split()       
                if(":" in pfx):
                    for ASN in path_split:
                        peers_data[sig][3].add(ASN)
                else:
                    for ASN in path_split:
                        peers_data[sig][2].add(ASN)

            elem = rec.get_next_elem()

        # one peer that was present in this record (allows a true, per-collector
        # count of records since each record can contain elems for many peers)

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #9
0
def build_sql_db():
    conn = sqlite3.connect('bgp_stage.db')
    c = conn.cursor()

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    stream.add_filter('collector','rrc11')
    stream.add_interval_filter(1438417216,1438417316)
    stream.start()

    while(stream.get_next_record(rec)):
        if rec.status != "valid":
            continue
        else:
            elem = rec.get_next_elem()
            while(elem):
                if elem.type == "AB":
                    prefix = elem.fields["prefix"]
                    as_path = elem.fields["as-path"].split(" ")
                    origin = as_path[-1]
                    time = elem.time


                    #IP Prefix database
                    ip_min, ip_max = calculate_min_max(prefix)
                    c.execute("SELECT ip_min FROM prefix_as WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)", (ip_min, ip_max, origin))
                    row = c.fetchone()
                    if len(row) != 0:
                        c.execute("UPDATE prefix_as SET count = count + 1  WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)", (ip_min, ip_max, origin))
                    else:
                        c.execute("INSERT INTO prefix_as VALUES(?,?,?,?,?)", (ip_min, ip_max, origin, 1, time))


                    #AS link database
                    for as1,as2 in zip(as_path, as_path[1:]) :
                        c.execute("SELECT as_o FROM as_link WHERE as_o = (?) AND as_n = (?)",(as1,as2))
                        row = c.fetchone()
                        if len(row) != 0:
                            c.execute("UPDATE as_link SET count = count + 1 WHERE as_o = (?) AND as_n = (?)",
                                      (as1, as2))
                        else:
                            c.execute("INSERT INTO as_link VALUES(?,?,?,?)", (as1, as2, 1, 0))

                elif elem.type == "WA":
                    prefix = elem.fields["prefix"]
                    time = elem.time
                    #Needs research

                    print(rec.project, rec.collector, rec.type, rec.time, rec.status,
                        elem.type, elem.peer_address, elem.peer_asn, elem.fields)
                    print(prefix,elem.time, "W")

                print(rec.project, rec.collector, rec.type, rec.time, rec.status,
                      elem.type, elem.peer_address, elem.peer_asn, elem.fields)
                elem = rec.get_next_elem()
            conn.commit()
    conn.close()
コード例 #10
0
def dump2file(type, name, latestDumpTime, dumpDuration, dumpPeriod):
    print type + ' of ' + name + ': '
    csv_header = ['type', 'addr', 'as', 'prefix', 'next_hop', 'as_path']
    _file = None
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', c_name)
    if type == 'ribs':
        stream.add_filter('record-type', 'ribs')
        _file = open(c_name + '_ribs.csv', 'w+')
    elif type == 'updates':
        stream.add_filter('record-type', 'updates')
        _file = open(c_name + '_updates.csv', 'w+')
    stream.add_interval_filter(latestDumpTime, latestDumpTime + dumpPeriod)

    stream.start()

    count = 0
    useless_c = 0

    writer = csv.writer(_file)
    writer.writerow(csv_header)

    # Get next record
    while (stream.get_next_record(rec)):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            # print rec.project, rec.collector, rec.type, rec.time, rec.status
            print 'current rec not valid.'
        else:
            elem = rec.get_next_elem()
            while (elem):
                useless_c += 1
                if useless_c % 1000 == 0:
                    print 'Got ' + str(useless_c) + ' elem totally.'
                # Print record and elem information
                if isIPV6(elem):  # ipv6 packet ignored
                    elem = rec.get_next_elem()
                    continue
                count += 1
                # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                # print elem.type, elem.peer_address, elem.peer_asn
                # elem.fields contains four column: communities, next-hop, prefix, as-path
                field = elem.fields
                prefix = field['prefix'] if 'prefix' in field.keys() else ''
                next_hop = field['next-hop'] if 'next-hop' in field.keys(
                ) else ''
                as_path = field['as-path'] if 'as-path' in field.keys() else ''
                as_path = as_path.replace(' ', '|')

                writer.writerow([
                    elem.type, elem.peer_address, elem.peer_asn, prefix,
                    next_hop, as_path
                ])

                elem = rec.get_next_elem()
    _file.close()
    print 'count: ' + str(count)
    return count
コード例 #11
0
def main():
    (options, args) = getopts()
    start = options.start_time
    end = options.end_time

    target_prefs = Set()
    with open('./../../atlas/anchor_prefix.txt', 'rb') as br:
        for l in br:
            target_prefs.add(l.strip())

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:
        #for pref in target_prefs:
        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)
        # stream.add_filter('prefix','0.0.0.0/0')

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('collector', 'rrc00')

        # Consider this time interval:
        # Sat Aug  1 08:20:11 UTC 2015
        # stream.add_interval_filter(1438417216,1438417216)
        # stream.add_interval_filter(1451606400,1454785264
        stream.add_interval_filter(start, end)

        # Start the stream
        stream.start()

        # Get next record
        cnt = 0

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                pass
                # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
            else:
                cnt += 1
                elem = rec.get_next_elem()
                while elem:
                    if elem.type == 'S':
                        continue
                    # Print record and elem information
                    # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                    # print elem.type, elem.peer_address, elem.peer_asn, elem.fields, elem.pref
                    bw.write(
                        '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\n'.
                        format(rec.project, rec.collector, rec.type, rec.time,
                               rec.status, elem.type, elem.fields['prefix'],
                               elem.peer_address, elem.peer_asn, elem.fields))
                    bw.flush()
                    elem = rec.get_next_elem()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #12
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (dict(Pfx_v4)=Pfx_origins,  dict(Pfx_v6)=Pfx_origins))
            if sig not in peers_data:
                peers_data[sig] =[{},{}]


            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];   
                origin=""
                if('as-path' in elem.fields):
                    path_split=elem.fields['as-path'].split()      
                    if(len(path_split)!=0): 
                        origin=path_split[len(path_split)-1]
    
                if(":" in pfx):
                    if(pfx not in peers_data[sig][1]):
                        peers_data[sig][1][pfx]=set()
                    #discard as origin: AS sets, and ASN=23456 [AS_TRANS]
                    if(origin!="" and origin!="23456" and "{" not in origin): peers_data[sig][1][pfx].add(origin)
                else:
                    if(pfx not in peers_data[sig][0]):
                        peers_data[sig][0][pfx]=set()
                    #discard as origin: AS sets, and ASN=23456 [AS_TRANS]
                    if(origin!="" and origin!="23456" and "{" not in origin): peers_data[sig][0][pfx].add(origin)


            elem = rec.get_next_elem()

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #13
0
def initUpdates(seedingCheckOverride, lastBGPUpdateProcessed): # Consider passing in last BGP update processed because of new system.
	global cursor, startTime, endTime, seeded, stream, rec
	startTime = lastBGPUpdateProcessed
	#cursor.execute("""SELECT intVal AS lastBGPUpdateProcessed FROM metadata WHERE name = 'lastBGPUpdateProcessed'""")
	#result = cursor.fetchone()
	#if result == None:
	#	print "ERROR: NO Start Time Found in DB (aka lastBGPUpdateProcessed). Recommended snd time is 1489224750 - int(2592000 * 4) (which is 1478856750) because this is a 4 month backlog."
	#	exit()
		#cursor.execute("""INSERT INTO metadata (name, intVal) VALUES ('lastBGPUpdateProcessed', {0})""".format(1489224750 - int(2592000 * 4)))
		#conn.commit()
	#else:
	#	(startTime,) = result
	
	
	cursor.execute("""SELECT intVal AS endTime FROM metadata WHERE name = 'endTime'""")
	result = cursor.fetchone()
	if result == None:
		print "ERROR: NO End Time Found in DB. Recommended end time is 1489224749 because this is the timestamp of the first cert."
		print "ERROR: Other recommended end time is 1491775893 which is when all certs have been covered."
		exit()
	else:
		(endTime,) = result
	
	if not seedingCheckOverride:
		cursor.execute("""SELECT stringVal AS seeded FROM metadata WHERE name = 'seeded'""")
		result = cursor.fetchone()
		if result == None:
			# We will assume that the DB is not seeded if there is no entry and not throw any errors in this case.
			seeded = False
			print "line 59 DB not seeded upon call to initUpdates. DB should be seeded with RIBs before updates can be processed. Call initSeeding first. Exiting."
			exit()
		else:
			strValSeeded = ""
			(strValSeeded,) = result
			if strValSeeded == "false":
				seeded = False
				print "line 66 DB not seeded upon call to initUpdates. DB should be seeded with RIBs before updates can be processed. Call initSeeding first. Exiting."
				exit()
			elif strValSeeded == "true":
				seeded = True
			else:
				print "Invalid value for seeded metadata entry. Exiting."
				exit()
	else:
		seeded = True

	# create a new bgpstream instance
	stream = BGPStream()
	# create a reusable bgprecord instance
	rec = BGPRecord()
	stream.add_filter('record-type','updates') # here we collect updates. This could be changed to ribs to instead acquire periodic snapshots of the RIBs.
	# configure the stream to retrieve Updates from the route-views listener.
	stream.add_filter('collector', 'route-views2')
	# getting updates only from one peer gives us only the perferred route of this peer and no rejected routes.
	# only get updates from level3. Level3 is a provider to ViaWest so their choice is a likely choice of ViaWest.
	stream.add_filter('peer-asn', '3356')
	stream.add_interval_filter(startTime, endTime)
	stream.start()
コード例 #14
0
def main():
    (options, args) = getopts()
    start = options.start_time
    end = options.end_time

    target_prefs = Set()
    with open('./../../atlas/anchor_prefix.txt', 'rb') as br:
        for l in br:
            target_prefs.add(l.strip())
        
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:
        #for pref in target_prefs:
        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)
        # stream.add_filter('prefix','0.0.0.0/0')

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('collector', 'rrc00')

        # Consider this time interval:
        # Sat Aug  1 08:20:11 UTC 2015
        # stream.add_interval_filter(1438417216,1438417216)
        # stream.add_interval_filter(1451606400,1454785264
        stream.add_interval_filter(start, end)

        # Start the stream
        stream.start()

        # Get next record
        cnt = 0

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                pass
                # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
            else:
                cnt += 1
                elem = rec.get_next_elem()
                while elem:
                    if elem.type == 'S':
                        continue
                    # Print record and elem information
                    # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                    # print elem.type, elem.peer_address, elem.peer_asn, elem.fields, elem.pref
                    bw.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\n'.format(
                        rec.project, rec.collector, rec.type, rec.time, rec.status,
                        elem.type, elem.fields['prefix'], elem.peer_address, elem.peer_asn, elem.fields))
                    bw.flush()
                    elem = rec.get_next_elem()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #15
0
def main():
    (options, args) = getopts()
    limit = options.limit
    start = options.start_time
    end = options.end_time

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider RIPE RRC 10 only
    stream.add_filter('record-type', 'updates')
    stream.add_filter('collector', 'rrc00')
    stream.add_filter('prefix', '0.0.0.0/0')

    # Consider this time interval:
    # Sat Aug  1 08:20:11 UTC 2015
    # stream.add_interval_filter(1438417216,1438417216)
    # stream.add_interval_filter(1451606400,1454785264)
    #stream.add_interval_filter(1454630400, 1454716800)
    # 1 hour
    #1454284800 - 1454288400

    stream.add_interval_filter(start, end)

    # Start the stream
    stream.start()

    # Get next record
    prefixes_update = defaultdict(int)
    prefixes_withdraw = defaultdict(int)

    while stream.get_next_record(rec):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            pass
            # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while elem:
                if elem.type == 'A':
                    #print elem.fields['as-path']
                    prefixes_update[elem.fields['prefix']] += 1

                if elem.type == 'W':
                    prefixes_withdraw[elem.fields['prefix']] += 1

                #print rec.project, rec.collector, rec.type, rec.time, rec.status,
                #print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()

    for k in prefixes_update:
        if prefixes_update[k] >= limit:
            print k + "\t" + str(prefixes_update[k]) + "\t" + str(
                prefixes_withdraw[k])
コード例 #16
0
ファイル: bgpstream.py プロジェクト: copley/bgp-gen
 def run(self):
     stream = BGPStream()
     rec = BGPRecord()
     if self.upd_file is None:
         stream.add_filter('collector', self.collector)
         stream.add_filter('record-type', self.record_type)
         stream.add_interval_filter(self.from_date, self.to_date)
         stream.set_live_mode()
     else:
         stream.set_data_interface('singlefile')
         if self.upd_file:
             stream.set_data_interface_option('singlefile', 'upd-file',
                                              self.upd_file)
         if self.rib_file:
             stream.set_data_interface_option('singlefile', 'rib-file',
                                              self.rib_file)
     if self.prefix_filter is not None:
         for prefix in self.prefix_filter:
             stream.add_filter('prefix', prefix)
     if self.peer_as_filter:
         for asn in self.peer_as_filter:
             stream.add_filter('peer-asn', str(asn))
     if self.communities_filter:
         for community in self.communities_filter:
             stream.add_filter('community', community)
     stream.start()
     stream.get_next_record(rec)
     prev = rec.time
     while (stream.get_next_record(rec)):
         now = rec.time
         if rec.status == 'valid':
             elem = rec.get_next_elem()
             while (elem):
                 statement = None
                 peer_address = elem.peer_address
                 peer_asn = elem.peer_asn
                 if peer_asn in self.asn_to_nexthop:
                     if elem.type == 'A' or elem.type == 'R':
                         prefix = elem.fields['prefix']
                         as_path = elem.fields['as-path']
                         nexthop = elem.fields['next-hop']
                         if peer_asn in self.asn_to_nexthop:
                             nexthop = self.asn_to_nexthop[peer_asn]
                             statement = 'announce route %s next-hop %s as-path' \
                                 ' [ %s ]' % (prefix, nexthop, as_path)
                     elif elem.type == 'W':
                         prefix = elem.fields['prefix']
                         statement = 'withdraw route %s' % prefix
                 if statement:
                     sys.stdout.write("%s\n" % statement)
                     sys.stdout.flush()
                 elem = rec.get_next_elem()
         time.sleep(self.delay + now - prev)
         prev = now
コード例 #17
0
ファイル: most_req_prefix.py プロジェクト: KTrel/DATAPLANE_1
def main():
    (options, args) = getopts()
    limit = options.limit
    start = options.start_time
    end = options.end_time

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider RIPE RRC 10 only
    stream.add_filter('record-type', 'updates')
    stream.add_filter('collector', 'rrc00')
    stream.add_filter('prefix', '0.0.0.0/0')

    # Consider this time interval:
    # Sat Aug  1 08:20:11 UTC 2015
    # stream.add_interval_filter(1438417216,1438417216)
    # stream.add_interval_filter(1451606400,1454785264)
    #stream.add_interval_filter(1454630400, 1454716800)
    # 1 hour
    #1454284800 - 1454288400

    stream.add_interval_filter(start, end)

    # Start the stream
    stream.start()

    # Get next record
    prefixes_update = defaultdict(int)
    prefixes_withdraw = defaultdict(int)

    while stream.get_next_record(rec):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            pass
            # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while elem:
                if elem.type == 'A':
                    #print elem.fields['as-path']
                    prefixes_update[elem.fields['prefix']] += 1

                if elem.type == 'W':
                    prefixes_withdraw[elem.fields['prefix']] += 1

                #print rec.project, rec.collector, rec.type, rec.time, rec.status,
                #print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()

    for k in prefixes_update:
        if prefixes_update[k] >= limit:
            print k + "\t" + str(prefixes_update[k]) + "\t" + str(prefixes_withdraw[k])
コード例 #18
0
def download_data():
    peer_state = defaultdict(dict)
    results = defaultdict(defaultdict_list)
    current_bin = 0

    # create a new bgpstream instance
    stream = BGPStream()
    # create a reusable bgprecord instance
    rec = BGPRecord()
    bgprFilter = "type updates"

    bgprFilter += " and project ris "
    for prefix in prefixes:
        bgprFilter += " and prefix more %s " % prefix

    logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
    logging.info("Timestamps: %s, %s" % (startts, endts))
    stream.parse_filter_string(bgprFilter)
    stream.add_interval_filter(startts, endts)

    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status != "valid":
            print(rec.project, rec.collector, rec.type, rec.time, rec.status)
            # from IPython import embed
            # embed()

        if current_bin == 0:
            current_bin = rec.time

        # slide the time window:
        if current_bin + bin_size < rec.time:
            timebins = range(current_bin, rec.time, bin_size)
            for i in timebins[:-1]:
                results["other"]["timebin"].append(i)
                for pfx, p_s in peer_state.items():
                    for peeras, state in p_s.items():
                        results[pfx][peeras].append(state)

            current_bin = timebins[-1]

        elem = rec.get_next_elem()
        while (elem):
            # peerip g= elem.peer_address
            peeras = elem.peer_asn
            prefix = elem.fields["prefix"]

            peer_state[prefix][peeras] = elem.type

            elem = rec.get_next_elem()

    return results
コード例 #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    parser.description = textwrap.dedent('''\
        a proof-of-concept utility for watching updates from BGPstream
        and then printing out if an unexpected update is heard
        ''')
    parser.epilog = textwrap.dedent('''\
        Example: watch these route announcements
            %(prog)s -f routes.yaml ''')
    required = parser.add_argument_group('required arguments')
    required.add_argument("-f",
                          "--file",
                          required=True,
                          help="yaml file of prefixes to origin asn")
    parser.add_argument("-d",
                        "--debug",
                        action='store_true',
                        help="print out all updates containing these prefixes")
    args = parser.parse_args()

    routes = pytricia.PyTricia(48)  # longest reasonable pfx in dfz

    with open(args.file, 'r') as f:
        routesfile = yaml.safe_load(f)
    for pfx in routesfile:
        routes[pfx] = routesfile[pfx]

    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(int(time.time()), 0)
    stream.set_live_mode()
    stream.start()

    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
            while (elem):
                if 'as-path' in elem.fields:
                    path = elem.fields['as-path'].split()
                    prefix = elem.fields['prefix']
                    if prefix in routes and (routes[prefix] != path[-1]
                                             or args.debug):
                        print('Heard prefix:', elem.fields['prefix'],
                              'AS-PATH:', elem.fields['as-path'],
                              '  Found by project:', rec.project, 'collector:',
                              rec.collector, 'type:', rec.type, 'at time:',
                              rec.time, 'Type:', elem.type, 'Peer:',
                              elem.peer_address, 'AS', elem.peer_asn)

                elem = rec.get_next_elem()
コード例 #20
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time - 1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # to track the peers that have elems in this record
        peer_signatures = set()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            peer_signatures.add(sig)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (elem_cnt, peer_record_cnt, coll_record_cnt)
            if sig not in peers_data:
                peers_data[sig] = [0, 0, 0]
            peers_data[sig][0] += 1  # increment elem cnt for this peer
            elem = rec.get_next_elem()

        # done with elems, increment the 'coll_record_cnt' field for just
        # one peer that was present in this record (allows a true, per-collector
        # count of records since each record can contain elems for many peers)
        if len(peer_signatures):
            first = True
            for sig in peer_signatures:  # increment peer_record_cnt for all
                if first:
                    peers_data[sig][2] += 1  # increment the coll_record_cnt
                    first = False
                peers_data[sig][1] += 1

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #21
0
ファイル: RIPE_database.py プロジェクト: walberjose/COOL
def MOAS_prefixes():
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider Route Views Singapore only
    stream.add_filter('collector', 'route-views.sg')

    # Consider RIBs dumps only
    stream.add_filter('record-type', 'ribs')

    # Consider this time interval:
    # Sat, 01 Aug 2015 7:50:00 GMT -  08:10:00 GMT
    stream.add_interval_filter(1438415400, 1438416600)

    # Start the stream
    stream.start()

    # <prefix, origin-ASns-set > dictionary
    prefix_origin = defaultdict(set)
    prefix_origin_dict = defaultdict(list)

    # Get next record
    while (stream.get_next_record(rec)):
        elem = rec.get_next_elem()
        while (elem):
            # Get the prefix
            pfx = elem.fields['prefix']
            # Get the list of ASes in the AS path
            ases = elem.fields['as-path'].split(" ")
            if len(ases) > 0:
                # Get the origin ASn (rightmost)
                origin = ases[-1]
                if 262857 == origin:  #AS262857 - UFRN: AS262857 	177.20.128.0/19 	UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE
                    '''
                    BI 	177.20.128.0/19 	198.32.125.84 	280 	100 	0 	1916, 262857 	IGP
                    E 	177.20.128.0/19 	213.248.67.117 	0 	70 	0 	1299, 2914, 1916, 262857 	IGP
                    E 	177.20.128.0/19 	213.248.98.93 	0 	70 	0 	1299, 2914, 1916, 262857 	IGP
                    '''
                    print "Achou UFRN"
                # Insert the origin ASn in the set of
                # origins for the prefix
                prefix_origin[pfx].add(origin)
                prefix_origin_dict[pfx].append(ases)

            elem = rec.get_next_elem()

    # Print the list of MOAS prefix and their origin ASns
    for pfx in prefix_origin:
        if len(prefix_origin[pfx]) > 1:
            pass  #print pfx, ",".join(prefix_origin[pfx])
コード例 #22
0
def initSeeding(lastBGPUpdateProcessed):
	global cursor, startTime, seeded, stream, rec, endTime
	#cursor.execute("""SELECT intVal AS lastBGPUpdateProcessed FROM metadata WHERE name = 'lastBGPUpdateProcessed'""")
	#result = cursor.fetchone()
	#if result == None:
	#	print "ERROR: NO Start Time Found in DB (aka lastBGPUpdateProcessed). Recommended snd time is 1489224750 - int(2592000 * 4) (which is 1478856750) because this is a 4 month backlog."
	#	exit()
		#cursor.execute("""INSERT INTO metadata (name, intVal) VALUES ('lastBGPUpdateProcessed', {0})""".format(1489224750 - int(2592000 * 4)))
		#conn.commit()
	#else:
	#	(startTime,) = result
	startTime = lastBGPUpdateProcessed

	cursor.execute("""SELECT stringVal AS seeded FROM metadata WHERE name = 'seeded'""")
	result = cursor.fetchone()
	if result == None:
		# We will assume that the DB is not seeded if there is no entry and not throw any errors in this case.
		cursor.execute("""INSERT INTO metadata (name, stringVal) VALUES ('seeded', 'false')""")
		conn.commit()
		seeded = False
	else:
		strValSeeded = ""
		(strValSeeded,) = result
		if strValSeeded == "false":
			seeded = False
		elif strValSeeded == "true":
			seeded = True
			print "DB seeded already. Calling initUpdates(True)"
			initUpdates(True, lastBGPUpdateProcessed)
			return True
		else:
			print "Invalid value for seeded metadata entry. Exiting."
			exit()

	# create a new bgpstream instance
	stream = BGPStream()
	# create a reusable bgprecord instance
	rec = BGPRecord()
	stream.add_filter('record-type','ribs') # here we collect updates. This could be changed to ribs to instead acquire periodic snapshots of the RIBs.
	# configure the stream to retrieve Updates from the route-views listener.
	stream.add_filter('collector', 'route-views2')
	# getting updates only from one peer gives us only the perferred route of this peer and no rejected routes.
	# only get updates from level3
	stream.add_filter('peer-asn', '3356')
	stream.add_rib_period_filter(604800)
	stream.add_interval_filter(startTime, startTime + 104800)
	endTime = startTime + 1604801
	stream.start()
	print "bgp stream started from init seeding"
	return False
コード例 #23
0
ファイル: RIPE_database.py プロジェクト: walberjose/COOL
def getting_BGP_update():
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', 'rrc11')
    stream.add_interval_filter(1438417216, 1438417216)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status != "valid":
            print rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while (elem):
                print rec.project, rec.collector, rec.type, rec.time, rec.status,
                print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()
コード例 #24
0
def generate_stream():
    bs = BGPStream()
    rec = BGPRecord()
    #initialize MySql
    a = MySqlDAL()
    a.start()

    #initialize the publisher in port number 12345
    publisher = ZmqPublisher(12345)

    bs.add_interval_filter(calendar.timegm(time.gmtime()) - delay, 0)
    # bs.add_filter('collector', 'route-views.sg')
    bs.add_filter('record-type', 'updates')
    bs.start()

    print('Beginning to read from stream')
    input_id = 0
    while bs.get_next_record(rec):
        elem = rec.get_next_elem()
        while elem is not None:
            # sleep until it is time to send this record
            '''
            now = calendar.timegm(time.gmtime())
            sim_time = now - delay
            if elem.time > sim_time:
                time.sleep(elem.time - sim_time)
            '''
            if elem.type not in ['A', 'W']:
                continue

            input_id += 1
            msg = elem2bgplay(rec, elem)
            msg['type'] = 'A'
            msg['id'] = input_id
            print(msg)

            # Publish the message
            publisher.publish(msg)

            # Write it to DB
            if elem.type == 'A':
                a.add(msg)
            elif elem.type == 'W':
                a.remove(msg)
            else:
                print "Error: Unknown type: " + elem.type
            elem = rec.get_next_elem()
コード例 #25
0
def generate_stream():
    bs = BGPStream()
    rec = BGPRecord()
    #initialize MySql
    db_writer = MysqlWriter()
    db_writer.start()

    #initialize the publisher in port number 12345
    publisher = ZmqPublisher(12345)

    bs.add_interval_filter(calendar.timegm(time.gmtime()) - delay, 0)
    # bs.add_filter('collector', 'route-views.sg')
    bs.add_filter('record-type', 'updates')
    bs.start()

    print('Beginning to read from stream')
    input_id = 0
    while bs.get_next_record(rec):
        elem = rec.get_next_elem()
        while elem is not None:
            # sleep until it is time to send this record
            '''
            now = calendar.timegm(time.gmtime())
            sim_time = now - delay
            if elem.time > sim_time:
                time.sleep(elem.time - sim_time)
            '''
            if elem.type not in ['A', 'W']:
                continue

            input_id += 1
            msg = elem2bgplay(rec, elem)
            msg['type'] = 'A'
            msg['id'] = input_id
            print(msg)

            # Publish the message
            publisher.publish(msg)

            # Write it to DB
            if elem.type == 'A':
                db_writer.add(msg)
            elif elem.type == 'W':
                db_writer.remove(msg)
            else:
                print "Error: Unknown type: " + elem.type
            elem = rec.get_next_elem()
コード例 #26
0
def get_ribs(collector, start, period):
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    stream.add_interval_filter(
        int(start.timestamp()) - period, int(start.timestamp()))
    stream.add_filter('record-type', 'ribs')
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while (elem):
                yield (rec.project, rec.collector,
                       datetime.utcfromtimestamp(rec.time).isoformat(),
                       elem.peer_address, elem.peer_asn, elem.fields['prefix'],
                       elem.fields['as-path'])
                elem = rec.get_next_elem()
コード例 #27
0
def recv_bgpstream_updates(begin, until, collector, output_queue):
    logging.info ("CALL recv_bgpstream_updates")
    # wait for first RIB table dump to complete
    while (rib_ts < 0):
        time.sleep(RIB_TS_WAIT/10)
    time.sleep(RIB_TS_WAIT)
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type','updates')
    stream.add_interval_filter(rib_ts,until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        logging.info("Record TS: "+str(rec.time))
        while (elem):
            logging.info(" -- Record Element Type: " + elem.type + ", TS: " + str(elem.time))
            bgp_message = BGPmessage(elem.time, 'update')
            src_peer = dict()
            src_addr = elem.peer_address
            src_asn = elem.peer_asn
            src_peer['addr'] = src_addr
            src_peer['port'] = 0
            src_peer['asn'] = src_asn
            bgp_message.set_source(src_peer)
            if elem.type.upper() == 'A':
                bgp_message.add_announce(elem.fields['prefix'])
                bgp_message.set_nexthop(elem.fields['next-hop'])
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a: # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                output_queue.put(bgp_message)
            elif elem.type.upper() == 'W':
                bgp_message.add_withdraw(elem.fields['prefix'])
                output_queue.put(bgp_message)
            elem = rec.get_next_elem()
コード例 #28
0
ファイル: CreateRib.py プロジェクト: kelemeto76/bgp-hackathon
def main(rib, target_prefs):

        
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:

        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('record-type', 'ribs')
        #stream.add_filter('collector', 'rrc04')
        stream.add_filter('project', 'ris')
        stream.add_filter('project', 'routeviews')

        stream.add_interval_filter(start-60*60*8, start)
        stream.add_rib_period_filter(10000000000000)        

        # Start the stream
        stream.start()

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                continue

            #if rec.time < start:
            elem = rec.get_next_elem()
            while elem:

                if elem.type == 'A' or elem.type == 'R':
                    rib.add_to_rib(rec.collector, elem.peer_address, elem.fields['prefix'], elem.time, elem.fields['as-path'])

                elem = rec.get_next_elem()

            #else:

        rib.flush()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #29
0
def recv_bgpstream_rib(begin, until, collector, output_queue):
    logging.info ("CALL recv_bgpstream_rib")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type','updates')
    stream.add_interval_filter(begin,until)

    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        global rib_ts
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping.")
            continue
        if (rib_ts > 0) and (rec.time > (rib_ts + RIB_TS_INTERVAL/2)):
            logging.info("received full RIB table dump.")
            break
        bgp_message = None
        while (elem):
            if (elem.type.upper() == 'A') or (elem.type.upper() == 'R'):
                rib_ts = elem.time
                bgp_message = BGPmessage(elem.time, 'update')
                bgp_message.set_nexthop(elem.fields['next-hop'])
                src_peer = dict()
                src_addr = elem.peer_address
                src_asn = elem.peer_asn
                src_peer['addr'] = src_addr
                src_peer['port'] = 0
                src_peer['asn'] = src_asn
                bgp_message.set_source(src_peer)
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a: # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                bgp_message.add_announce(elem.fields['prefix'])
                output_queue.put(bgp_message)
            elem = rec.get_next_elem()
コード例 #30
0
def count_valid_withdrawals_per_second(t0,num_seconds):
    # div0 in progress tracking if <100 seconds
    assert num_seconds>100;

    # Create a new bgpstream instance
    stream = BGPStream();
    # use Routeviews Saopaulo records
    stream.add_filter('project','routeviews');
    stream.add_filter('collector','route-views.saopaulo');
    stream.add_filter('record-type','updates');
    # time interval
    stream.add_interval_filter(t0,t0+tt-1);

    # progress tracking
    prog = t0;    

    # Create a reusable bgprecord instance
    rec = BGPRecord();
    # list of zeroes
    ctr = [0]*tt;
    # Start the stream
    stream.start();

    # iterate over records
    while(stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem();
            while(elem):
                if elem.type == 'W':
                    ctr[rec.time-t0] += 1;
                elem = rec.get_next_elem();
        # el-cheapo progress indication with dots. comment out if you don't want the dots.
        # """
        if rec.time > prog and (rec.time - t0) % floor(num_seconds/100) == 0:
            sys.stdout.write('.');
            sys.stdout.flush();
            prog = rec.time;
        # """
    # print newline after the dots
    print "";
    return ctr;
コード例 #31
0
    def _start_stream(self, **filters):
        """Initializes, starts, and returns bgp stream with filters"""

        # Create a new bgpstream instance
        stream = BGPStream()

        # Add filters if params exist
        for key, value in filters.items():
            if (key in ['peer-asn', 'prefix', 'collector']
                    and value is not None):
                stream.add_filter(key, value)

        # Time params must be in epoch
        start_epoch = calendar.timegm(filters.get('start').timetuple())
        end_epoch = calendar.timegm(filters.get('end').timetuple())
        stream.add_interval_filter(start_epoch, end_epoch)

        # Start the stream
        stream.start()
        self.logger.debug("Started stream for caida announcements")
        return stream
コード例 #32
0
    def readUpdates(self):
        # create a new bgpstream instance
        stream = BGPStream()
        # create a reusable bgprecord instance
        rec = BGPRecord()
        bgprFilter = "type updates"

        bgprFilter += " and project ris "
        for prefix in self.prefixes:
            bgprFilter += " and prefix more %s " % prefix

        logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
        logging.info("Timestamps: %s, %s" % (self.startts, self.endts))
        stream.parse_filter_string(bgprFilter)
        stream.add_interval_filter(self.startts, self.endts)

        stream.start()
        while (stream.get_next_record(rec)):
            if (rec.status != "valid" and rec.status != "filtered-source"
                    and rec.status != "empty-source"):
                raise Exception(rec.project, rec.collector, rec.type, rec.time,
                                rec.status)

            zDt = rec.time
            elem = rec.get_next_elem()
            while (elem):
                # peerip g= elem.peer_address
                peeras = elem.peer_asn
                sPrefix = elem.fields["prefix"]

                if elem.type == "W":
                    self.withdraws[sPrefix][peeras] = True

                else:
                    sPath = elem.fields["as-path"]
                    self.paths[sPrefix][peeras] = sPath
                    self.withdraws[sPrefix][peeras] = False

                elem = rec.get_next_elem()
コード例 #33
0
def get_updates(collector, start, end):
    print("getting updates for {} from {} to {}".format(collector, start, end))
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    stream.add_interval_filter(int(start.timestamp()), int(end.timestamp()))
    stream.add_filter('record-type', 'updates')
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while (elem):
                if elem.type == 'A':
                    yield ('A', rec.project, rec.collector,
                           datetime.utcfromtimestamp(rec.time).isoformat(),
                           elem.peer_address, elem.peer_asn,
                           elem.fields['prefix'], elem.fields['as-path'])
                elif elem.type == 'W':
                    yield ('W', rec.project, rec.collector,
                           datetime.utcfromtimestamp(rec.time).isoformat(),
                           elem.peer_address, elem.peer_asn,
                           elem.fields['prefix'], None)
                elem = rec.get_next_elem()
コード例 #34
0
def recv_bgpstream_updates(begin, until, collector):
    """
    Receive and parse BGP update records from a given bgpstream collector
    """
    logging.info("CALL recv_bgpstream_updates")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(begin, until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        logging.info("Record TS: " + str(rec.time))
        while (elem):
            logging.info(" -- Record Element Type: " + elem.type + ", TS: " +
                         str(elem.time))
            bgp_message = BGPmessage(elem.time, 'update')
            if elem.type.upper() == 'A':
                bgp_message.add_announce(elem.fields['prefix'])
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a:  # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                output(bgp_message)
            elif elem.type.upper() == 'W':
                bgp_message.add_withdraw(elem.fields['prefix'])
                output(bgp_message)
            elem = rec.get_next_elem()
コード例 #35
0
            last_ts = int(float(f.read().strip()))
        logger.info("loading timestamp from file: %s",
                    datetime.utcfromtimestamp(last_ts))
    else:
        # Consider RIBs dumps only
        now = time.time()
        last_ts = int(now - now % 3600)
        logger.info("loading from: %s", datetime.utcfromtimestamp(last_ts))

    stream.add_filter('record-type', 'ribs')
    stream.add_filter('record-type', 'updates')

    stream.add_interval_filter(last_ts, 0)

    # Start the stream
    stream.start()

    client = KafkaClient(args.our_servers.split(","))
    count = 0
    for batch in group_by_n(
            messages_from_internal(iterate_stream(stream, args.collector)),
            1000):
        req = ProduceRequest("rib-{}".format(args.collector), 0, batch)
        for msg in reversed(req.messages):
            if msg.value is None:
                continue
            last_timestamp = json.loads(msg.value)["timestamp"]
            break

        count += len(batch)
        logger.info("sending %i", count)
コード例 #36
0
ファイル: rib-origins.py プロジェクト: smlng/bgp-origins
def main():
    parser = argparse.ArgumentParser(description='', epilog='')
    parser.add_argument('-b', '--begin',
                        help='Begin date (inclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-u', '--until',
                        help='Until date (exclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-c', '--collector',
                        help='Route collector from RIPE RIS or Route-Views project.',
                        type=str, required=True)
    parser.add_argument('-m', '--mongodb',
                        help='MongoDB connection parameters.',
                        type=str, default=None)
    parser.add_argument('-l', '--loglevel',
                        help='Set loglevel [DEBUG,INFO,WARNING,ERROR,CRITICAL].',
                        type=str, default='WARNING')

    args = vars(parser.parse_args())

    numeric_level = getattr(logging, args['loglevel'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level,
                        format='%(asctime)s : %(levelname)s : %(message)s')

    ts_begin = int((args['begin'] - datetime(1970, 1, 1)).total_seconds())
    ts_until = int((args['until'] - datetime(1970, 1, 1)).total_seconds())

    mongodbstr = None
    if args['mongodb']:
        mongodbstr = args['mongodb'].strip()
    # BEGIN
    logging.info("START")

    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector',args['collector'])
    stream.add_filter('record-type','ribs')
    stream.add_interval_filter(ts_begin,ts_until)

    # Start the stream
    stream.start()

    rib_ts = 0
    rib_origins = dict()
    while(stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        if rec.time > (rib_ts + RIB_TS_INTERVAL):
            rib_ts = rec.time
            if mongodbstr:
                store_rib_origins(rib_ts, rib_origins, mongodbstr)
            else:
                print_rib_origins(rib_ts, rib_origins)
            rib_origins = dict()
        while(elem):
            prefix = elem.fields['prefix']
            aspath = elem.fields['as-path'].split()
            for a in aspath: # remove AS-SETs
                if '{' in a:
                    aspath.remove(a)
            origin = aspath[-1]
            if prefix not in rib_origins:
                rib_origins[prefix] = list()
            if origin not in rib_origins[prefix]:
                rib_origins[prefix].append(origin)
            elem = rec.get_next_elem()
import time
from _pybgpstream import BGPStream, BGPRecord, BGPElem

start_time = time.time()

stream = BGPStream()
rec = BGPRecord()

stream.add_filter('prefix','140.78.0.0/16')
stream.add_filter('record-type','ribs')
stream.add_filter('collector','rrc11')
# stream.add_filter('project','ris')
stream.add_interval_filter(1464681000, 1464682200)  # 1464682200 ==> 05/31/2016 @ 8:10am (UTC)

stream.start()
#
# while(stream.get_next_record(rec)):
#     # Print the record information only if it is not a valid record
#     if rec.status == "valid":
#         elem = rec.get_next_elem()
#         while elem:
#             # Print record and elem information
#             print rec.collector, elem.peer_address, elem.peer_asn, elem.fields['as-path']
#             elem = rec.get_next_elem()

result = []
while stream.get_next_record(rec):
    if rec.status == "valid":
        elem = rec.get_next_elem()
        while elem:
            print rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields
コード例 #38
0
    def readrib(self):
        stream = None
        rec = None
        if self.txtFile is None:
            # create a new bgpstream instance
            stream = BGPStream()

            # create a reusable bgprecord instance
            rec = BGPRecord()
            bgprFilter = "type ribs"

            if self.af == 6:
                bgprFilter += " and ipversion 6"
            else:
                bgprFilter += " and ipversion 4"

            for c in self.collectors:
                bgprFilter += " and collector %s " % c

            # if not self.asnFilter is None:
            # bgprFilter += ' and path %s$' % self.asnFilter
            for p in self.includedPeers:
                bgprFilter += " and peer %s " % p

            for p in self.includedPrefix:
                bgprFilter += " and prefix more %s " % p

            logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
            logging.info("Timestamps: %s, %s" %
                         (self.startts - 3600, self.startts + 3600))
            stream.parse_filter_string(bgprFilter)
            stream.add_interval_filter(self.startts - 3600,
                                       self.startts + 3600)
            if self.livemode:
                stream.set_live_mode()

            stream.start()

        else:
            rec = txtReader.txtReader(self.txtFile)

        # for line in p1.stdout:
        while (self.txtFile
               and not rec.running) or (stream
                                        and stream.get_next_record(rec)):
            if rec.status != "valid":
                print rec.project, rec.collector, rec.type, rec.time, rec.status
            zDt = rec.time
            elem = rec.get_next_elem()

            while (elem):
                zOrig = elem.peer_address
                zAS = elem.peer_asn
                if zAS in self.excludedPeers or (len(
                        self.includedPeers) and zAS not in self.includedPeers):
                    elem = rec.get_next_elem()
                    continue
                zPfx = elem.fields["prefix"]
                sPath = elem.fields["as-path"]
                # print("%s: %s, %s, %s" % (zDt, zAS, zPfx, elem.fields))

                if zPfx == "0.0.0.0/0" or zPfx in self.excludedPrefix or (
                        len(self.includedPrefix)
                        and zPfx not in self.includedPrefix):
                    elem = rec.get_next_elem()
                    continue

                path = sPath.split(" ")
                origAS = path[-1]
                if origAS in self.excludedOriginASN or (
                        len(self.includedOriginASN)
                        and origAS not in self.includedOriginASN):
                    elem = rec.get_next_elem()
                    continue
                    # FIXME: this is not going to work in the case of
                    # delegated prefixes (and using IP addresses as spatial
                    # resolution)

                self.peersASN[zOrig].add(zAS)

                if len(path) < 2:
                    # Ignore paths with only one AS
                    elem = rec.get_next_elem()
                    continue

                node = self.rtree.add(zPfx)
                if zOrig in node.data:
                    # Already read this entry, we should read only one RIB per peer
                    elem = rec.get_next_elem()
                    continue

                if self.ribQueue is not None:
                    self.ribQueue.put((zDt, zOrig, zAS, zPfx, path))

                node.data[zOrig] = {
                    "path": set(path),
                    "count": 0,
                    "origAS": origAS
                }

                # print "%s, %s, %s, %s, %s" % (elem.time, elem.type, elem.peer_address, elem.peer_asn, elem.fields)

                if self.spatialResolution:
                    # compute weight for this path
                    count = self.nbIPs(node.prefixlen)
                    countBelow = sum([
                        n.data[zOrig]["count"]
                        for n in self.rtree.search_covered(zPfx)
                        if zOrig in n.data and n != node
                    ])
                    count -= countBelow
                    # assert count >= 0
                    node.data[zOrig]["count"] = count

                    # Update above nodes
                    parent = self.findParent(node, zOrig)
                    if not parent is None:
                        # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                        pcountBelow = sum([
                            n.data[zOrig]["count"]
                            for n in self.rtree.search_covered(parent.prefix)
                            if zOrig in n.data and n != parent
                        ])
                        oldpCount = parent.data[zOrig]["count"]
                        pCount = self.nbIPs(parent.prefixlen) - pcountBelow
                        pdiff = pCount - oldpCount
                        parent.data[zOrig]["count"] = pCount
                        pOrigAS = parent.data[zOrig]["origAS"]
                        asns = parent.data[zOrig]["path"]
                        self.incCount(pdiff, zOrig, pOrigAS, zAS, asns)
                        self.incTotalCount(pdiff, zOrig, pOrigAS, zAS)
                else:
                    count = 1
                    node.data[zOrig]["count"] = count

                asns = node.data[zOrig]["path"]
                self.incTotalCount(count, zOrig, origAS, zAS)
                self.incCount(count, zOrig, origAS, zAS, asns)

                elem = rec.get_next_elem()
コード例 #39
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (Pfxs_v4_set                    ,  Pfxs_v6_set, 
            #                   dict(Transit_ASN)=Pfxs_v4_set  ,  dict(Transit_ASN)=Pfxs_v6_set)
            if sig not in peers_data:
                peers_data[sig] =[set(),set(),{},{}]


            pfx=""
            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];                                    
                if(":" in pfx):
                    peers_data[sig][1].add(pfx)

                else:
                    peers_data[sig][0].add(pfx)
            
            if('as-path' in elem.fields):
                #Squash the AS Path to keep only distinct ASNs, i.e., remove prepending 
                path_split = [k for k, g in groupby(elem.fields['as-path'].split(" "))]    
                if(len(path_split)!=0): 
                    for i in range(1,len(path_split)-1):
                        transit=path_split[i]
                        if(":" in pfx):
                            if(transit not in peers_data[sig][3]):
                                peers_data[sig][3][transit]=set()
                            peers_data[sig][3][transit].add(pfx)                                
                        elif(pfx!=""):
                            if(transit not in peers_data[sig][2]):
                                peers_data[sig][2][transit]=set()
                            peers_data[sig][2][transit].add(pfx)  

            elem = rec.get_next_elem()

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #40
0
def load_data(start, stop, collectors, window, threshold):
    peers = {}

    # collectors is a list of the collectors we want to include
    # Start and stop define the interval we are looking in the data

    # Create a new BGPStream instance and a reusable BGPRecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Add filter for each collector.
    # If no collector is mentioned, it will consider 16 of them
    if collectors:
        for collector in collectors:
            print collector
            stream.add_filter('collector', collector)
    else:
        for i in range(0, 10):
            stream.add_filter('collector', 'rrc0' + str(i))
        for i in range(10, 16):
            stream.add_filter('collector', 'rrc' + str(i))

    stream.add_filter('record-type', 'updates')

    # Consider the interval from "start" to "stop" in seconds since epoch
    stream.add_interval_filter(start, stop)

    # Start the stream
    stream.start()

    # For each record (one record = one second, can have multiple elements for the same second) we handle its updates
    while stream.get_next_record(rec):
        timestamp = rec.time
        if rec.status != "valid":
            print rec.project, rec.collector, rec.type, timestamp, rec.status
        else:
            # Go through all elements of the record
            elem = rec.get_next_elem()
            while elem:
                # Consider only the A and W updates
                if elem.type not in ['A', 'W']:
                    elem = rec.get_next_elem()
                    continue

                peer = elem.peer_address
                updatetype = elem.type
                prefix = elem.fields['prefix']
                if peer not in peers:
                    peers[peer] = {
                        'A': [],
                        'W': []
                    }
                update = {'tst': timestamp, 'prefix': prefix}
                if updatetype == 'A':
                    handleUpdate(peers[peer]['A'], burst2writeA, update, peer, updatetype, timestamp, window, threshold)
                    saveGraphPoint(peers[peer]['A'], updatetype, peer, timestamp, collectors, threshold)
                else:
                    handleUpdate(peers[peer]['W'], burst2writeW, update, peer, updatetype, timestamp, window, threshold)
                    saveGraphPoint(peers[peer]['W'], updatetype, peer, timestamp, collectors, threshold)
                elem = rec.get_next_elem()

    # After processing all records, we write the graph json files with the graph points recorded for each peer
    for peer in graph_points:
        peer_file_name = peer.replace(':', '_')
        if not os.path.exists(peer_file_name):
            os.makedirs(peer_file_name)
        with open(peer_file_name+'/'+peer_file_name + '-graph.json', 'w') as outfile:
            json.dump(graph_points[peer], outfile, indent=2)

    # Write the last burst of A updates if there is one left
    if burst2writeA:
        for peer in burst2writeA:
            if burst2writeA[peer]:
                for timestamp in burst2writeA[peer]:
                    writeBurst(peer, burst2writeA, 'A', timestamp)

    # Write the last burst of W updates if there is one left
    if burst2writeW:
        for peer in burst2writeW:
            if burst2writeW[peer]:
                for timestamp in burst2writeW[peer]:
                    writeBurst(peer, burst2writeW, 'W', timestamp)

    # transform csv names in json file to use getJSON in plotGrap
    # step to CSV is used to avoid appending to the end of a json file directly as appending
    # to a json file overwrite the whole file
    jsonlist = []
    with open('csv_peernames-'+'-'.join(collectors)+'.csv', 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            jsonlist.append(row[0])

    jsondata = json.dumps(jsonlist, indent=2)
    fd = open('json_file_names-' + '-'.join(collectors) + '.json', 'w')
    fd.write(jsondata)
    fd.close()
def get_data(timestamp):
    graph = Graph(password="******")

    stream = BGPStream()
    rec = BGPRecord()
    rec_time = None

    # stream.add_filter('prefix', '198.41.0.0/24')  # A-root
    # stream.add_filter('prefix', '192.228.79.0/24')  # B-root, only 1 site
    # stream.add_filter('prefix', '192.33.4.0/24')  # C-root
    # stream.add_filter('prefix', '199.7.91.0/24')  # D-root
    # stream.add_filter('prefix', '192.203.230.0/24')  # E-root, IPv4 only
    # stream.add_filter('prefix', '192.5.5.0/24')  # F-root
    # stream.add_filter('prefix', '192.112.36.0/24')  # G-root, IPv4 only
    # stream.add_filter('prefix', '198.97.190.0/24')  # H-root
    # stream.add_filter('prefix', '192.36.148.0/24')  # I-root
    # stream.add_filter('prefix', '192.58.128.0/24')  # J-root
    stream.add_filter('prefix', '193.0.14.0/24')  # K-root
    # stream.add_filter('prefix', '199.7.83.0/24')  # L-root
    # stream.add_filter('prefix', '202.12.27.0/24')  # M-root

    # IPv6
    # stream.add_filter('prefix', '2001:503:ba3e::/48')  # A
    ## stream.add_filter('prefix', '2001:500:84::/48')  # B, only 1 site
    # stream.add_filter('prefix', '2001:500:2::/48')  # C
    # stream.add_filter('prefix', '2001:500:2d::/48')  # D
    # stream.add_filter('prefix', '2001:500:2f::/48')  # F
    # stream.add_filter('prefix', '2001:500:1::/48')  # H
    # stream.add_filter('prefix', '2001:7fe::/33')  # I
    # stream.add_filter('prefix', '2001:503:c27::/48')  # J
    # stream.add_filter('prefix', '2001:7fd::/48')  # K
    # stream.add_filter('prefix', '2001:500:9f::/48')  # L
    # stream.add_filter('prefix', '2001:dc3::/32')  # M

    stream.add_filter('record-type', 'ribs')
    # stream.add_filter('collector', 'rrc01')
    stream.add_filter('project', 'routeviews')
    stream.add_interval_filter(timestamp, timestamp)

    stream.start()

    result = {}
    while stream.get_next_record(rec):
        rec_time = rec.time
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while elem:
                print rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields
                as_path = elem.fields['as-path'].split()
                as_path.reverse()
                prefix = elem.fields['prefix']
                if prefix not in result:
                    result[prefix] = []
                result[prefix].append(as_path)
                elem = rec.get_next_elem()

    # get only unique lists in result
    for prefix in result:
        result[prefix] = [list(x) for x in set(tuple(x) for x in result[prefix])]
    print('timestamp {} ==> result: {}'.format(rec_time, result))

    for prefix in result:
        for path in result[prefix]:
            print('path: {}'.format(path))
            cur_node = None
            prev_node = None
            counter_as_prepend = 0
            for index, asn in enumerate(path):
                searched_node = graph.find('asn', property_key='label', property_value=asn)
                try:
                    cur_node = searched_node.next()  # see if the AS node is already in the db or not. If yes, cur_node == prev_node
                except StopIteration:
                    cur_node = Node('asn', label=str(asn))  # if not exists, then create a new one
                if index > 0:
                    if index == len(path) - 1:
                        cur_node['path'] = path  # attach AS path to the last ASN
                    if cur_node != prev_node:
                        if counter_as_prepend > 0:
                            cur_node['prepended'] = counter_as_prepend
                            counter_as_prepend = 0  # reset
                        peering = Relationship(cur_node, 'TO', prev_node, time=rec_time, prefix=prefix)
                        # peering['time'] = rec_time
                        # peering['prefix'] = prefix
                        graph.create(peering)
                    else:  # AS prepending
                        counter_as_prepend += 1
                prev_node = cur_node
コード例 #42
0
    def readupdates(self):
        #TODO implement txt file for update messages?
        if self.txtFile:
            return

        # create a new bgpstream instance
        stream = BGPStream()
        bgprFilter = "type updates"

        if self.af == 6:
            bgprFilter += " and ipversion 6"
        else:
            bgprFilter += " and ipversion 4"

        # bgprFilter += " and collector rrc10 "
        for c in self.collectors:
            bgprFilter += " and collector %s " % c

        # if self.asnFilter is not None:
        # # TOFIX filter is now deprecated, we need to have both
        # # announcements and withdrawals
        # bgprFilter += ' and (path %s$ or elemtype withdrawals)' % self.asnFilter

        logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
        logging.info("Timestamps: %s, %s" % (self.startts, self.endts))
        stream.parse_filter_string(bgprFilter)
        stream.add_interval_filter(self.startts, self.endts)
        if self.livemode:
            stream.set_live_mode()

        stream.start()
        # for line in p1.stdout:
        # create a reusable bgprecord instance
        rec = BGPRecord()
        while (stream.get_next_record(rec)):
            if rec.status != "valid":
                logging.warn("Invalid BGP record: %s, %s, %s, %s, %s" %
                             (rec.project, rec.collector, rec.type, rec.time,
                              rec.status))
            zDt = rec.time
            elem = rec.get_next_elem()
            while (elem):
                zOrig = elem.peer_address
                if zOrig not in self.peers:
                    # no need to update the counts for non-full feed peers
                    elem = rec.get_next_elem()
                    continue

                zAS = elem.peer_asn
                if zAS in self.excludedPeers or (len(
                        self.includedPeers) and zAS not in self.includedPeers):
                    elem = rec.get_next_elem()
                    continue
                zPfx = elem.fields["prefix"]
                if zPfx == "0.0.0.0/0" or zPfx in self.excludedPrefix or (
                        len(self.includedPrefix)
                        and zPfx not in self.includedPrefix):
                    elem = rec.get_next_elem()
                    continue

                msgTs = zDt
                # set first time bin!
                if self.ts is None:
                    self.slideTimeWindow(msgTs)

                elif self.ts + self.timeWindow <= msgTs:
                    self.slideTimeWindow(msgTs)

                elif self.ts > msgTs:
                    #Old update, ignore this to update the graph
                    logging.warn(
                        "Ignoring old update (peer IP: %s, timestamp: %s, current time bin: %s): %s"
                        % (zOrig, zDt, self.ts, (elem.type, zAS, elem.fields)))
                    elem = rec.get_next_elem()
                    continue

                node = self.rtree.search_exact(zPfx)

                if elem.type == "W":
                    # Withdraw: remove the corresponding node
                    if not node is None and zOrig in node.data:
                        origAS = node.data[zOrig]["origAS"]

                        if self.spatialResolution:
                            count = node.data[zOrig]["count"]
                            # Update count for above node
                            parent = self.findParent(node, zOrig)
                            if parent is None:
                                self.incTotalCount(-count, zOrig, origAS, zAS)
                                asns = node.data[zOrig]["path"]
                                self.incCount(-count, zOrig, origAS, zAS, asns)
                            else:
                                node.data[zOrig]["count"] = 0
                                # Add ips to above node and corresponding ASes
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if zOrig in n.data and n!=parent])
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                                # oldpCount = parent.data[zOrig]["count"]
                                # pCount = self.nbIPs(parent.prefixlen) - pcountBelow
                                # parent.data[zOrig]["count"] = pCount
                                # pdiff = pCount - oldpCount
                                # assert pdiff==count

                                # Update count for origAS and path from the
                                # parent node
                                porigAS = parent.data[zOrig]["origAS"]
                                pasns = parent.data[zOrig]["path"]
                                self.incCount(count, zOrig, porigAS, zAS,
                                              pasns)
                                self.incTotalCount(count, zOrig, porigAS, zAS)

                                # Update count for withdrawn origAS and path
                                asns = node.data[zOrig]["path"]
                                self.incCount(-count, zOrig, origAS, zAS, asns)
                                self.incTotalCount(-count, zOrig, origAS, zAS)

                        else:
                            asns = node.data[zOrig]["path"]
                            self.incCount(-1, zOrig, origAS, zAS, asns)
                            self.incTotalCount(-1, zOrig, origAS, zAS)

                        del node.data[zOrig]

                else:
                    # Announce: update counters
                    sPath = elem.fields["as-path"]
                    path = sPath.split(" ")
                    origAS = path[-1]

                    if origAS in self.excludedOriginASN or (
                            len(self.includedOriginASN)
                            and origAS not in self.includedOriginASN):
                        elem = rec.get_next_elem()
                        continue
                        # FIXME: this is not going to work in the case of
                        # delegated prefixes or implicit withdraws

                    if len(path) < 2:
                        # Ignoring paths with only one AS
                        elem = rec.get_next_elem()
                        continue

                    if self.announceQueue is not None:
                        self.announceQueue.put((zDt, zOrig, zAS, zPfx, path))

                    # Announce:
                    if node is None or not zOrig in node.data:
                        # Add a new node

                        node = self.rtree.add(zPfx)
                        if self.spatialResolution:
                            # Compute the exact number of IPs
                            count = self.nbIPs(node.prefixlen)
                            countBelow = sum([
                                n.data[zOrig]["count"]
                                for n in self.rtree.search_covered(zPfx)
                                if zOrig in n.data and n != node
                            ])
                            count -= countBelow
                            # Update the ASes counts
                            node.data[zOrig] = {
                                "path": set(path),
                                "count": count,
                                "origAS": origAS
                            }
                            asns = node.data[zOrig]["path"]
                            self.incCount(count, zOrig, origAS, zAS, asns)
                            self.incTotalCount(count, zOrig, origAS, zAS)

                            parent = self.findParent(node, zOrig)
                            if not parent is None:
                                # Update above nodes
                                # print("%s: (%s) %s, %s, %s" % (zDt, elem.type, zAS, zPfx, count))
                                pcountBelow = sum([
                                    n.data[zOrig]["count"] for n in
                                    self.rtree.search_covered(parent.prefix)
                                    if zOrig in n.data and n != parent
                                ])
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                                oldpCount = parent.data[zOrig]["count"]
                                pCount = self.nbIPs(
                                    parent.prefixlen) - pcountBelow
                                pdiff = pCount - oldpCount
                                parent.data[zOrig]["count"] = pCount
                                # print("parent %s: (%s) %s, %s, %s" % (zDt, zAS, parent.prefix, oldpCount, pCount))
                                # print [(n.prefix,n.data[zOrig]["count"]) for n in self.rtree.search_covered(parent.prefix) if zOrig in n.data and n!=parent ]
                                porigAS = parent.data[zOrig]["origAS"]
                                pasns = parent.data[zOrig]["path"]
                                self.incCount(pdiff, zOrig, porigAS, zAS,
                                              pasns)
                                self.incTotalCount(pdiff, zOrig, porigAS, zAS)

                        else:
                            self.incTotalCount(1, zOrig, origAS, zAS)
                            count = 1
                            # Update the ASes counts
                            node.data[zOrig] = {
                                "path": set(path),
                                "count": count,
                                "origAS": origAS
                            }
                            asn = node.data[zOrig]["path"]
                            self.incCount(count, zOrig, origAS, zAS, asns)

                    else:
                        #Update node path and counts
                        if self.spatialResolution:
                            count = node.data[zOrig]["count"]
                        else:
                            count = 1

                        porigAS = node.data[zOrig]["origAS"]
                        asns = node.data[zOrig]["path"]
                        self.incCount(-count, zOrig, porigAS, zAS, asns)
                        self.incTotalCount(-count, zOrig, porigAS, zAS)

                        node.data[zOrig]["path"] = set(path)
                        node.data[zOrig]["origAS"] = origAS
                        asns = node.data[zOrig]["path"]
                        self.incCount(count, zOrig, origAS, zAS, asns)
                        self.incTotalCount(count, zOrig, origAS, zAS)

                elem = rec.get_next_elem()
コード例 #43
0
def sendMessageToKafka(col_name, col_data):
    # print "Collector-{} Sending Message...".format(col_name)
    size_total = 0
    num_total = 0
    num_ipv4 = 0
    stream = BGPStream()
    record = BGPRecord()

    time_start = int(col_data.get('updates').get('latestDumpTime'))
    time_end = time_start + int(col_data.get('updates').get('dumpPeriod'))
    # print "Start Time:{}, End Time:{}".format(time_start, time_end)

    stream.add_filter('collector', col_name)
    stream.add_filter('record-type', 'ribs')
    # stream.add_interval_filter(time_start, time_end)
    # stream.add_interval_filter(time_start, time_start+300)
    stream.add_interval_filter(1503475200, 1503475200 + 7200)

    # print "Before Start>>>>>"
    stream.start()
    print col_name
    # print "After Start>>>>>>"
    producer = Kafka_producer()
    while stream.get_next_record(record):
        if record.status == "valid":
            elem = record.get_next_elem()
            while elem:
                if filter(lambda x: ':' in x.peer_address, [elem]):
                    num_total += 1
                    elem = record.get_next_elem()
                    continue
                #print "Element:{},{},{}".format(elem.type, elem.peer_address, elem.peer_asn)
                field = elem.fields
                #print type(field)
                prefix = field['prefix'] if field.has_key('prefix') else ''
                next_hop = field['next-hop'] if field.has_key(
                    'next-hop') else ''
                as_path = field['as-path'] if field.has_key('as-path') else ''
                as_path = as_path.replace(' ', '|')
                text = [
                    elem.type, elem.peer_address,
                    str(elem.peer_asn), prefix, next_hop, as_path,
                    str(record.time)
                ]
                text = ','.join(text)

                # producer = Kafka_producer()
                producer.send_data(col_name, text)
                num_total += 1
                num_ipv4 += 1
                # print "[{}]-{}".format(col_name, num_total)
                # size_total += len(text)
                #NUM_TOTAL += 1
                #print "[{}]-{}-{}-{}-{}".format(col_name, num_total, num_ipv4, size_total, time.ctime(record.time))
                #print "No.{} Message Send Success-[{}]".format(num_total, text)
                elem = record.get_next_elem()
        else:
            pass
            # print "## Current record not valid!"
            # break
        # print "One Collector Finished"
    else:
        # print "-----------------------------"
        # print "Collector[{}] And Records Send Finished\nTotal Num:{}, IPv4 Num:{}, Total Size:{}".format(col_name, num_total, num_ipv4, size_total)
        # print "-----------------------------"
        print "Collector:[{}]".format(col_name)
        print "Total Num:{}, IPv4 Num:{}, Total Size:{}".format(
            num_total, num_ipv4, size_total)
コード例 #44
0
ファイル: rib-origins-lt.py プロジェクト: smlng/bgp-origins
def main():
    parser = argparse.ArgumentParser(description='', epilog='')
    parser.add_argument('-b', '--begin',
                        help='Begin date (inclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-u', '--until',
                        help='Until date (exclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-c', '--collector',
                        help='Route collector from RIPE RIS or Route-Views project.',
                        type=str, required=True)
    parser.add_argument('-m', '--mongodb',
                        help='MongoDB connection parameters.',
                        type=str, default=None)
    parser.add_argument('-k', '--keepsnapshots',
                        help='Keep all snapshots, works only with -s.',
                        action='store_true')
    parser.add_argument('-s', '--snapshot',
                        help='Enable snapshoting.',
                        action='store_true')
    parser.add_argument('-l', '--loglevel',
                        help='Set loglevel [DEBUG,INFO,WARNING,ERROR,CRITICAL].',
                        type=str, default='WARNING')

    args = vars(parser.parse_args())

    numeric_level = getattr(logging, args['loglevel'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level,
                        format='#> %(asctime)s : %(levelname)s : %(message)s')

    ts_begin = int((args['begin'] - datetime(1970, 1, 1)).total_seconds())
    ts_until = int((args['until'] - datetime(1970, 1, 1)).total_seconds())

    mongodbstr = None
    if args['mongodb']:
        mongodbstr = args['mongodb'].strip()

    rib_ts = 0
    rib_origins = dict()
    origins_lt = list()
    if args['snapshot']:
        rib_ts, rib_origins = load_snapshot(mongodbstr)
    if rib_ts > ts_begin:
        logging.info ("SKIP, found snapshot with newer ts")
        ts_begin = rib_ts - RIB_TS_THRESHOLD

    # BEGIN
    logging.info("START")

    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector',args['collector'])
    stream.add_filter('record-type','ribs')
    stream.add_interval_filter(ts_begin,ts_until)

    # Start the stream
    stream.start()

    while(stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        #end if
        if rec.time > (rib_ts + RIB_TS_THRESHOLD):
            for p in rib_origins:
                for o in rib_origins[p]:
                    if rib_origins[p][o][1] < (rib_ts - RIB_TS_THRESHOLD):
                        origins_lt.append( (p,o,rib_origins[p][o][0],rib_origins[p][o][1]) )
                    #end if
                #end for
            #end for
            if args['snapshot'] and (len(rib_origins.keys()) > 0):
                store_snapshot(rec.time, rib_origins, mongodbstr)
                if not args['keepsnapshots']:
                    remove_snapshot(rib_ts, mongodbstr)
                # end if keepsnapshots
            # end if snapshot
            rib_ts = rec.time
            logging.info("ts: "+str(rib_ts))
            if len(origins_lt) > 0:
                if mongodbstr:
                    store_origins_lt(rib_ts,origins_lt, mongodbstr)
                else:
                    print_origins_lt(rib_ts,origins_lt)
                #end if
                for l in origins_lt:
                    del rib_origins[l[0]][l[1]]
                #end for
                origins_lt = list()
            # end if
        #end if
        while(elem):
            prefix = elem.fields['prefix']
            aspath = elem.fields['as-path'].split()
            for a in aspath: # remove AS-SETs
                if '{' in a:
                    aspath.remove(a)
                #end if
            #end for
            origin = aspath[-1]
            if prefix not in rib_origins:
                rib_origins[prefix] = dict()
            #end if
            if origin not in rib_origins[prefix]:
                rib_origins[prefix][origin] = (rib_ts,rib_ts)
            else:
                rib_origins[prefix][origin] = (rib_origins[prefix][origin][0],rib_ts)
            #end if
            elem = rec.get_next_elem()
        #end while
    #end while
    if args['snapshot']:
        print "NOTE: remaining origin lifetimes are stored in latest snapshot (%d)!\n" % rib_ts
        if (len(rib_origins.keys()) > 0):
            store_snapshot(rib_ts, rib_origins, mongodbstr)
        # end if
    else:
        print "NOTE: output remaining origin lifetimes with current ts (%d)\n" % rib_ts
        origins_lt = list()
        for p in rib_origins:
            for o in rib_origins[p]:
                origins_lt.append( (p,o,rib_origins[p][o][0],rib_ts) )
        if mongodbstr:
            store_origins_lt(rib_ts,origins_lt, mongodbstr)
        else:
            print_origins_lt(rib_ts,origins_lt)
def get_data(timestamp):
    graph = Graph(password="******")

    stream = BGPStream()
    rec = BGPRecord()
    rec_time = None

    # IPv4
    stream.add_filter('prefix', '198.41.0.0/24')  # A-root
    stream.add_filter('prefix', '192.33.4.0/24')  # C-root
    stream.add_filter('prefix', '199.7.91.0/24')  # D-root
    stream.add_filter('prefix', '192.203.230.0/24')  # E-root, IPv4 only
    stream.add_filter('prefix', '192.5.5.0/24')  # F-root
    stream.add_filter('prefix', '192.112.36.0/24')  # G-root, IPv4 only
    stream.add_filter('prefix', '198.97.190.0/24')  # H-root
    stream.add_filter('prefix', '192.36.148.0/24')  # I-root
    stream.add_filter('prefix', '192.58.128.0/24')  # J-root
    stream.add_filter('prefix', '193.0.14.0/24')  # K-root
    stream.add_filter('prefix', '199.7.83.0/24')  # L-root
    stream.add_filter('prefix', '202.12.27.0/24')  # M-root

    # IPv6
    stream.add_filter('prefix', '2001:503:ba3e::/48')  # A
    stream.add_filter('prefix', '2001:500:2::/48')  # C
    stream.add_filter('prefix', '2001:500:2d::/48')  # D
    stream.add_filter('prefix', '2001:500:2f::/48')  # F
    stream.add_filter('prefix', '2001:500:1::/48')  # H
    stream.add_filter('prefix', '2001:7fe::/33')  # I
    stream.add_filter('prefix', '2001:503:c27::/48')  # J
    stream.add_filter('prefix', '2001:7fd::/48')  # K
    stream.add_filter('prefix', '2001:500:9f::/48')  # L
    stream.add_filter('prefix', '2001:dc3::/32')  # M

    stream.add_filter('record-type', 'ribs')
    # stream.add_filter('collector', 'route-views.soxrs')
    stream.add_filter('project', 'routeviews')
    stream.add_interval_filter(timestamp, timestamp)

    stream.start()

    result = {}
    while stream.get_next_record(rec):
        rec_time = rec.time
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while elem:
                print('{} {} {} {} {}'.format(rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields))
                as_path = elem.fields['as-path'].split()
                as_path.reverse()
                prefix = elem.fields['prefix']
                if prefix not in result:
                    result[prefix] = []
                result[prefix].append(as_path)
                elem = rec.get_next_elem()

    # get only unique lists in result
    for prefix in result:
        result[prefix] = [list(x) for x in set(tuple(x) for x in result[prefix])]
    print('timestamp {} ==> result: {}'.format(rec_time, result))

    for prefix in result:
        for path in result[prefix]:
            print('path: {}'.format(path))
            cur_node = None
            prev_node = None
            counter_as_prepend = 0
            for index, asn in enumerate(path):
                cur_node = asn
                graph.run('MERGE(s:asn{{name:"{0}", label:"{0}"}})'.format(asn))  # create new node if not exist.
                if index > 0:
                    if cur_node != prev_node:
                        query = 'MATCH (s:asn),(d:asn) ' \
                                'WHERE s.name="{0}" AND d.name="{1}" ' \
                                'MERGE (s)-[r:TO {{prefix: "{3}", time: {2}, prepended: {4}}}]->(d)'\
                            .format(cur_node, prev_node, rec_time, prefix, counter_as_prepend)
                        graph.run(query)
                        if counter_as_prepend > 0:
                            counter_as_prepend = 0  # reset
                    else:  # AS prepending
                        counter_as_prepend += 1
                # else:  # origin AS
                #     graph.run('MATCH (s:asn)'
                #               'WHERE s.name="{0}"'
                #               'SET s.origin="{1}_{2}"'
                #               'RETURN s'.format(asn, prefix, rec_time))
                prev_node = cur_node
コード例 #46
0
ファイル: BGPTest.py プロジェクト: fu-ilab-swp18/bgp-group
class BGPTest(object):

    def __init__(self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282"):
        self.rc = route_collector

        rpki = rpki_validator.split(":")
        self.mgr = RTRManager(rpki[0], rpki[1])

        # self._start_rtr_manager()

        self.stream = BGPStream()
        self.rec = BGPRecord()

    def _start_rtr_manager(self):
        self.mgr.start()
        while not self.mgr.is_synced():
            sleep(0.2)
            if status.error:
                print("Connection error")
                exit()

    def start_stream(self, start_time=None, end_time=0, route_collector=""):
        """ Starts the
        """

        if route_collector == "":
            route_collector = self.rc
        self.stream.add_filter('collector', route_collector)
        self.stream.add_filter('record-type', 'ribs')

        if (start_time is None) or not isinstance(start_time, datetime):
            start_time = datetime.utcnow()

        if isinstance(end_time, datetime):
            end = int(end_time.strftime("%s"))
        else:
            end = 0

        start = int(datetime.utcnow().strftime("%s"))
        print(start)

        self.stream.add_interval_filter(start, 0)
        # print('Start stream with', start_time, end_time)
        self.stream.start()

    def get_records(self):
        while(self.stream.get_next_record(self.rec)):

            # Print the self.record information only if it is not a valid self.record
            if self.rec.status != "valid":
                pass
            else:
                elem = self.rec.get_next_elem()
                while(elem):
                    # Print self.record and elem information
                    print(self.rec.project, self.rec.collector,
                          self.rec.type, self.rec.time, self.rec.status)
                    print(elem.type, elem.peer_address,
                          elem.peer_asn, elem.fields)
                    # prefix = elem.fields["prefix"].split('/')
                    # result = mgr.validate((int) elem.fields["as-path"].split(" ")[-1], prefix[0], prefix[1])
                    elem = self.rec.get_next_elem()

        print("done.")