コード例 #1
1
ファイル: bgpstream_test.py プロジェクト: CAIDA/bgp-hackathon
    def calculate_effective_prefixes(self, start_interval=1451692800):
        '''
        This method calculates the number of effective /24
        prefixes that lie underneath a specified route prefix
        :param start_interval: the start interval of the data
        stream
        :return: number of effective prefixes under the route's
        root prefix
        '''

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('collector', self.collector_name)
        stream.add_filter('record-type', 'ribs')

        if isinstance(start_interval, datetime.datetime):
            interval_start_utc = self.convert_interval_to_utc(start_interval)
            stream.add_interval_filter(interval_start_utc - 300, interval_start_utc + 300)
        else:
            stream.add_interval_filter(start_interval - 300, start_interval + 300)

        stream.start()
        print "Starting routing table parsing"
        while (stream.get_next_record(rec)):
            elem = rec.get_next_elem()
            while elem:
                # Get the peer ASN and IP. We then construct a peer_id, since a collector
                # can establish multiple connections with the same ASN.
                peer_asn = elem.peer_asn
                peer_asn_ip = elem.peer_address

                # make this an unmodifiable tuple
                peer_id = (peer_asn, peer_asn_ip)
                peer_route_trie = self.peer_trie_handles[peer_id]

                # Do a single level search of the route prefix to
                # find the left and right prefix advertisements
                single_level_prefixes = \
                    peer_route_trie.single_level_search(elem.fields['prefix'])
                print single_level_prefixes

                prefix_count = len(single_level_prefixes)
                root_24_prefix_count = 0
                lr_24_prefix_count1 = lr_24_prefix_count2 = 0

                # The /24 prefixes below the advertised prefix are calculated
                # as all the /24 prefixes served by the root - sum of the /24
                # prefixes served by root's children
                if prefix_count == 1:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                elif prefix_count == 2:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count1 = \
                        2 ** (24 - int(str(single_level_prefixes[1]).lstrip('<')
                                        .rstrip('>').split('/')[1]))
                else:
                    root_24_prefix_count = \
                        2 ** (24 - int(str(single_level_prefixes[0]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count1 = \
                        2 ** (24 - int(str(single_level_prefixes[1]).lstrip('<')
                                       .rstrip('>').split('/')[1]))
                    lr_24_prefix_count2 = \
                        2 ** (24 - int(str(single_level_prefixes[2]).lstrip('<')
                                       .rstrip('>').split('/')[1]))

                effective_24_prefix_count = \
                    root_24_prefix_count - (lr_24_prefix_count1 +
                                            lr_24_prefix_count2)

                print "Effective Prefix Count : ", \
                    effective_24_prefix_count

                for prefix in single_level_prefixes:
                    trie_node = peer_route_trie.search_exact(str(prefix))
                    as_path = trie_node.data['as-path'].split(" ")
                    as_path_headless = as_path[1:-1]
                    print "AS-Path : ", as_path
                    as_headless_len = len(as_path_headless)
                    if as_headless_len > 1:
                        for i in range(0, as_headless_len - 1):
                            print "Headless nodes : ", as_path_headless[i], \
                                as_path_headless[i + 1]
                            if as_path_headless[i] in self.aslink_datastore:
                                self.aslink_datastore[as_path_headless[i]] += 1
                            else:
                                self.aslink_datastore[as_path_headless[i]] = 1

                elem = rec.get_next_elem()
コード例 #2
0
    def get_data_graph(self):

        stream = BGPStream()
        rec = BGPRecord()
        stream.add_filter('record-type', 'ribs')
        stream.add_interval_filter(self.start, self.end)
        stream.start()

        as_graph = nx.Graph()

        while stream.get_next_record(rec):
            if rec.status == "valid":
                elem = rec.get_next_elem()
                while elem:
                    # the list is a list, which stores an as-path
                    as_path = [
                        k
                        for k, g in groupby(elem.fields['as-path'].split(" "))
                    ]
                    peer = str(elem.peer_asn)
                    # judge whether the as-path is legal
                    if len(as_path) > 1 and as_path[0] == peer:
                        if re.match(self.pattern, elem.fields['prefix']):
                            self.as_prefix[as_path[-1]].add(
                                elem.fields['prefix'])
                            # add edges to the graph
                            for i in range(0, len(as_path) - 1):
                                as_graph.add_edge(as_path[i], as_path[i + 1])
                    elem = rec.get_next_elem()

        as_graph = self.add_geo_loc(as_graph)

        return as_graph
コード例 #3
0
    def create_trie_from_bgpstream_info(self, interval_start=1451692800):

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('collector', self.collector_name)
        stream.add_filter('record-type', 'ribs')

        if isinstance(interval_start, datetime.datetime):
            interval_start_utc = self.convert_interval_to_utc(interval_start)
            stream.add_interval_filter(interval_start_utc - 300, interval_start_utc + 300)
        else:
            stream.add_interval_filter(interval_start - 300, interval_start + 300)

        stream.start()

        while (stream.get_next_record(rec)):
            elem = rec.get_next_elem()
            while elem:
                # Get the peer ASN and IP. We then construct a peer_id, since a collector
                # can establish multiple connections with the same ASN.
                peer_asn = elem.peer_asn
                peer_asn_ip = elem.peer_address

                # make this an unmodifiable tuple
                peer_id = (peer_asn, peer_asn_ip)

                peer_route_trie = self.peer_trie_handles[peer_id]
                trie_node = peer_route_trie.add(elem.fields['prefix'])
                trie_node.data['as-path'] = elem.fields['as-path']
                elem = rec.get_next_elem()
コード例 #4
0
    def sendMessageToKafka(self, col_name, col_data):
        print "lalalala"
        stream = BGPStream()
        record = BGPRecord()

        time_start = int(col_data.get('ribs').get('latestDumpTime'))
        time_end = time_start + int(col_data.get('ribs').get('dumpPeriod'))

        stream.add_filter('collector', col_name)
        stream.add_filter('record-type', 'ribs')
        stream.add_interval_filter(time_start, time_end)
        print "Before Start"
        stream.start()
        print "After Start"

        while stream.get_next_record(record):
            if record.status == "valid":
                elem = record.get_next_elem()
                while elem:
                    # print "Record:{}".format(elem)
                    producer = Kafka_producer()
                    producer.send_data(col_name, json.dumps(elem))
                    elem = record.get_next_elem()
            else:
                print "## Current record not valid!"
        print "One Collector Finished"
コード例 #5
0
def recv_bgpstream_rib(begin, until, collector):
    """
    Receive and parse BGP RIB records from a given bgpstream collector.
    """
    logging.info("CALL recv_bgpstream_rib")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(begin, until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping.")
            continue
        bgp_message = None
        while (elem):
            if (elem.type.upper() == 'A') or (elem.type.upper() == 'R'):
                bgp_message = BGPmessage(elem.time, 'update')
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a:  # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                bgp_message.add_announce(elem.fields['prefix'])
                output(bgp_message)
            elem = rec.get_next_elem()
        # end while (elem)
    # end while (stream...)
    output('FLUSH')
コード例 #6
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # to track the peers that have elems in this record
        peer_signatures = set()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            peer_signatures.add(sig)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (Pfxs_v4_set, Pfxs_v6_set, ASNs_v4_set, ASNs_v6_set)
            if sig not in peers_data:
                peers_data[sig] =[set(),set(),
                                  set(),set()]

            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];                    
                if(":" in pfx):
                    peers_data[sig][1].add(pfx)
                else:
                    peers_data[sig][0].add(pfx)

            if('as-path' in elem.fields):
                path_split=elem.fields['as-path'].split()       
                if(":" in pfx):
                    for ASN in path_split:
                        peers_data[sig][3].add(ASN)
                else:
                    for ASN in path_split:
                        peers_data[sig][2].add(ASN)

            elem = rec.get_next_elem()

        # one peer that was present in this record (allows a true, per-collector
        # count of records since each record can contain elems for many peers)

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #7
0
def downloader(start_date, duration):
    """Download BGP paths from Routeviews and RIPE NCC from a start date for a certain duration."""

    # Start of UNIX time
    base = int(datetime.datetime.strptime(start_date, '%m/%d/%Y').strftime('%s'))
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    stream.add_interval_filter(base, base + int(duration))
    stream.add_filter('record-type', 'ribs')
    stream.start()
    path_set = set()
    f = open('rib.txt', 'w')
    while True:
        rec = stream.get_next_record()
        if rec is None:
            return
        if rec.status != "valid":
            continue
        else:
            elem = rec.get_next_elem()
            while(elem):
                path = elem.fields['as-path']
                if '{' in path or '(' in path:
                    elem = rec.get_next_elem()
                    continue
                prefix = elem.fields['prefix']
                # Focus on IPv4 prefixes
                if ":" not in prefix and path not in path_set:
                    f.write(path.replace(' ', '|') + '\n')
                    path_set.add(path)
                elem = rec.get_next_elem()
    f.close()
コード例 #8
0
def build_sql_db():
    conn = sqlite3.connect('bgp_stage.db')
    c = conn.cursor()

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    stream.add_filter('collector','rrc11')
    stream.add_interval_filter(1438417216,1438417316)
    stream.start()

    while(stream.get_next_record(rec)):
        if rec.status != "valid":
            continue
        else:
            elem = rec.get_next_elem()
            while(elem):
                if elem.type == "AB":
                    prefix = elem.fields["prefix"]
                    as_path = elem.fields["as-path"].split(" ")
                    origin = as_path[-1]
                    time = elem.time


                    #IP Prefix database
                    ip_min, ip_max = calculate_min_max(prefix)
                    c.execute("SELECT ip_min FROM prefix_as WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)", (ip_min, ip_max, origin))
                    row = c.fetchone()
                    if len(row) != 0:
                        c.execute("UPDATE prefix_as SET count = count + 1  WHERE ip_min = (?) AND ip_max = (?) AND as_o = (?)", (ip_min, ip_max, origin))
                    else:
                        c.execute("INSERT INTO prefix_as VALUES(?,?,?,?,?)", (ip_min, ip_max, origin, 1, time))


                    #AS link database
                    for as1,as2 in zip(as_path, as_path[1:]) :
                        c.execute("SELECT as_o FROM as_link WHERE as_o = (?) AND as_n = (?)",(as1,as2))
                        row = c.fetchone()
                        if len(row) != 0:
                            c.execute("UPDATE as_link SET count = count + 1 WHERE as_o = (?) AND as_n = (?)",
                                      (as1, as2))
                        else:
                            c.execute("INSERT INTO as_link VALUES(?,?,?,?)", (as1, as2, 1, 0))

                elif elem.type == "WA":
                    prefix = elem.fields["prefix"]
                    time = elem.time
                    #Needs research

                    print(rec.project, rec.collector, rec.type, rec.time, rec.status,
                        elem.type, elem.peer_address, elem.peer_asn, elem.fields)
                    print(prefix,elem.time, "W")

                print(rec.project, rec.collector, rec.type, rec.time, rec.status,
                      elem.type, elem.peer_address, elem.peer_asn, elem.fields)
                elem = rec.get_next_elem()
            conn.commit()
    conn.close()
コード例 #9
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (dict(Pfx_v4)=Pfx_origins,  dict(Pfx_v6)=Pfx_origins))
            if sig not in peers_data:
                peers_data[sig] =[{},{}]


            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];   
                origin=""
                if('as-path' in elem.fields):
                    path_split=elem.fields['as-path'].split()      
                    if(len(path_split)!=0): 
                        origin=path_split[len(path_split)-1]
    
                if(":" in pfx):
                    if(pfx not in peers_data[sig][1]):
                        peers_data[sig][1][pfx]=set()
                    #discard as origin: AS sets, and ASN=23456 [AS_TRANS]
                    if(origin!="" and origin!="23456" and "{" not in origin): peers_data[sig][1][pfx].add(origin)
                else:
                    if(pfx not in peers_data[sig][0]):
                        peers_data[sig][0][pfx]=set()
                    #discard as origin: AS sets, and ASN=23456 [AS_TRANS]
                    if(origin!="" and origin!="23456" and "{" not in origin): peers_data[sig][0][pfx].add(origin)


            elem = rec.get_next_elem()

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #10
0
ファイル: bgpstream.py プロジェクト: copley/bgp-gen
 def run(self):
     stream = BGPStream()
     rec = BGPRecord()
     if self.upd_file is None:
         stream.add_filter('collector', self.collector)
         stream.add_filter('record-type', self.record_type)
         stream.add_interval_filter(self.from_date, self.to_date)
         stream.set_live_mode()
     else:
         stream.set_data_interface('singlefile')
         if self.upd_file:
             stream.set_data_interface_option('singlefile', 'upd-file',
                                              self.upd_file)
         if self.rib_file:
             stream.set_data_interface_option('singlefile', 'rib-file',
                                              self.rib_file)
     if self.prefix_filter is not None:
         for prefix in self.prefix_filter:
             stream.add_filter('prefix', prefix)
     if self.peer_as_filter:
         for asn in self.peer_as_filter:
             stream.add_filter('peer-asn', str(asn))
     if self.communities_filter:
         for community in self.communities_filter:
             stream.add_filter('community', community)
     stream.start()
     stream.get_next_record(rec)
     prev = rec.time
     while (stream.get_next_record(rec)):
         now = rec.time
         if rec.status == 'valid':
             elem = rec.get_next_elem()
             while (elem):
                 statement = None
                 peer_address = elem.peer_address
                 peer_asn = elem.peer_asn
                 if peer_asn in self.asn_to_nexthop:
                     if elem.type == 'A' or elem.type == 'R':
                         prefix = elem.fields['prefix']
                         as_path = elem.fields['as-path']
                         nexthop = elem.fields['next-hop']
                         if peer_asn in self.asn_to_nexthop:
                             nexthop = self.asn_to_nexthop[peer_asn]
                             statement = 'announce route %s next-hop %s as-path' \
                                 ' [ %s ]' % (prefix, nexthop, as_path)
                     elif elem.type == 'W':
                         prefix = elem.fields['prefix']
                         statement = 'withdraw route %s' % prefix
                 if statement:
                     sys.stdout.write("%s\n" % statement)
                     sys.stdout.flush()
                 elem = rec.get_next_elem()
         time.sleep(self.delay + now - prev)
         prev = now
コード例 #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    parser.description = textwrap.dedent('''\
        a proof-of-concept utility for watching updates from BGPstream
        and then printing out if an unexpected update is heard
        ''')
    parser.epilog = textwrap.dedent('''\
        Example: watch these route announcements
            %(prog)s -f routes.yaml ''')
    required = parser.add_argument_group('required arguments')
    required.add_argument("-f",
                          "--file",
                          required=True,
                          help="yaml file of prefixes to origin asn")
    parser.add_argument("-d",
                        "--debug",
                        action='store_true',
                        help="print out all updates containing these prefixes")
    args = parser.parse_args()

    routes = pytricia.PyTricia(48)  # longest reasonable pfx in dfz

    with open(args.file, 'r') as f:
        routesfile = yaml.safe_load(f)
    for pfx in routesfile:
        routes[pfx] = routesfile[pfx]

    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(int(time.time()), 0)
    stream.set_live_mode()
    stream.start()

    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
            while (elem):
                if 'as-path' in elem.fields:
                    path = elem.fields['as-path'].split()
                    prefix = elem.fields['prefix']
                    if prefix in routes and (routes[prefix] != path[-1]
                                             or args.debug):
                        print('Heard prefix:', elem.fields['prefix'],
                              'AS-PATH:', elem.fields['as-path'],
                              '  Found by project:', rec.project, 'collector:',
                              rec.collector, 'type:', rec.type, 'at time:',
                              rec.time, 'Type:', elem.type, 'Peer:',
                              elem.peer_address, 'AS', elem.peer_asn)

                elem = rec.get_next_elem()
コード例 #12
0
    def get_stream(self):
        logging.debug(
            f"[ZombieRecordFinder-{self.collector}] try to create BGPstream")

        _start = datetime.datetime(self.year, self.month, 10)
        _end = datetime.datetime(self.year, self.month, 20)

        stream = BGPStream()
        stream.add_interval_filter(dt2ts(_start), dt2ts(_end))
        stream.add_filter('collector', self.collector)
        for _, p in self.zombies:
            stream.add_filter('prefix-exact', p)
        return stream
コード例 #13
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time - 1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # to track the peers that have elems in this record
        peer_signatures = set()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            peer_signatures.add(sig)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (elem_cnt, peer_record_cnt, coll_record_cnt)
            if sig not in peers_data:
                peers_data[sig] = [0, 0, 0]
            peers_data[sig][0] += 1  # increment elem cnt for this peer
            elem = rec.get_next_elem()

        # done with elems, increment the 'coll_record_cnt' field for just
        # one peer that was present in this record (allows a true, per-collector
        # count of records since each record can contain elems for many peers)
        if len(peer_signatures):
            first = True
            for sig in peer_signatures:  # increment peer_record_cnt for all
                if first:
                    peers_data[sig][2] += 1  # increment the coll_record_cnt
                    first = False
                peers_data[sig][1] += 1

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #14
0
ファイル: RIPE_database.py プロジェクト: walberjose/COOL
def MOAS_prefixes():
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider Route Views Singapore only
    stream.add_filter('collector', 'route-views.sg')

    # Consider RIBs dumps only
    stream.add_filter('record-type', 'ribs')

    # Consider this time interval:
    # Sat, 01 Aug 2015 7:50:00 GMT -  08:10:00 GMT
    stream.add_interval_filter(1438415400, 1438416600)

    # Start the stream
    stream.start()

    # <prefix, origin-ASns-set > dictionary
    prefix_origin = defaultdict(set)
    prefix_origin_dict = defaultdict(list)

    # Get next record
    while (stream.get_next_record(rec)):
        elem = rec.get_next_elem()
        while (elem):
            # Get the prefix
            pfx = elem.fields['prefix']
            # Get the list of ASes in the AS path
            ases = elem.fields['as-path'].split(" ")
            if len(ases) > 0:
                # Get the origin ASn (rightmost)
                origin = ases[-1]
                if 262857 == origin:  #AS262857 - UFRN: AS262857 	177.20.128.0/19 	UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE
                    '''
                    BI 	177.20.128.0/19 	198.32.125.84 	280 	100 	0 	1916, 262857 	IGP
                    E 	177.20.128.0/19 	213.248.67.117 	0 	70 	0 	1299, 2914, 1916, 262857 	IGP
                    E 	177.20.128.0/19 	213.248.98.93 	0 	70 	0 	1299, 2914, 1916, 262857 	IGP
                    '''
                    print "Achou UFRN"
                # Insert the origin ASn in the set of
                # origins for the prefix
                prefix_origin[pfx].add(origin)
                prefix_origin_dict[pfx].append(ases)

            elem = rec.get_next_elem()

    # Print the list of MOAS prefix and their origin ASns
    for pfx in prefix_origin:
        if len(prefix_origin[pfx]) > 1:
            pass  #print pfx, ",".join(prefix_origin[pfx])
コード例 #15
0
    def getBGPStream(self):
        logging.info(
            f"creating BGPstream {self.collector} {self.record_type} {self.start.year}-{self.start.month} "
        )

        stream = BGPStream()
        stream.add_filter('collector', self.collector)
        stream.add_filter('record-type', self.record_type)
        if self.record_type == "updates":
            stream.add_interval_filter(dt2ts(self.start), dt2ts(self.end))
        elif self.record_type == "ribs":
            _start = dt2ts(self.start - timedelta(hours=2))
            _end = dt2ts(self.start + timedelta(hours=2))
            stream.add_interval_filter(_start, _end)

        return stream
コード例 #16
0
ファイル: RIPE_database.py プロジェクト: walberjose/COOL
def getting_BGP_update():
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', 'rrc11')
    stream.add_interval_filter(1438417216, 1438417216)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status != "valid":
            print rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while (elem):
                print rec.project, rec.collector, rec.type, rec.time, rec.status,
                print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()
コード例 #17
0
def generate_stream():
    bs = BGPStream()
    rec = BGPRecord()
    #initialize MySql
    a = MySqlDAL()
    a.start()

    #initialize the publisher in port number 12345
    publisher = ZmqPublisher(12345)

    bs.add_interval_filter(calendar.timegm(time.gmtime()) - delay, 0)
    # bs.add_filter('collector', 'route-views.sg')
    bs.add_filter('record-type', 'updates')
    bs.start()

    print('Beginning to read from stream')
    input_id = 0
    while bs.get_next_record(rec):
        elem = rec.get_next_elem()
        while elem is not None:
            # sleep until it is time to send this record
            '''
            now = calendar.timegm(time.gmtime())
            sim_time = now - delay
            if elem.time > sim_time:
                time.sleep(elem.time - sim_time)
            '''
            if elem.type not in ['A', 'W']:
                continue

            input_id += 1
            msg = elem2bgplay(rec, elem)
            msg['type'] = 'A'
            msg['id'] = input_id
            print(msg)

            # Publish the message
            publisher.publish(msg)

            # Write it to DB
            if elem.type == 'A':
                a.add(msg)
            elif elem.type == 'W':
                a.remove(msg)
            else:
                print "Error: Unknown type: " + elem.type
            elem = rec.get_next_elem()
コード例 #18
0
def generate_stream():
    bs = BGPStream()
    rec = BGPRecord()
    #initialize MySql
    db_writer = MysqlWriter()
    db_writer.start()

    #initialize the publisher in port number 12345
    publisher = ZmqPublisher(12345)

    bs.add_interval_filter(calendar.timegm(time.gmtime()) - delay, 0)
    # bs.add_filter('collector', 'route-views.sg')
    bs.add_filter('record-type', 'updates')
    bs.start()

    print('Beginning to read from stream')
    input_id = 0
    while bs.get_next_record(rec):
        elem = rec.get_next_elem()
        while elem is not None:
            # sleep until it is time to send this record
            '''
            now = calendar.timegm(time.gmtime())
            sim_time = now - delay
            if elem.time > sim_time:
                time.sleep(elem.time - sim_time)
            '''
            if elem.type not in ['A', 'W']:
                continue

            input_id += 1
            msg = elem2bgplay(rec, elem)
            msg['type'] = 'A'
            msg['id'] = input_id
            print(msg)

            # Publish the message
            publisher.publish(msg)

            # Write it to DB
            if elem.type == 'A':
                db_writer.add(msg)
            elif elem.type == 'W':
                db_writer.remove(msg)
            else:
                print "Error: Unknown type: " + elem.type
            elem = rec.get_next_elem()
コード例 #19
0
def get_ribs(collector, start, period):
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    stream.add_interval_filter(
        int(start.timestamp()) - period, int(start.timestamp()))
    stream.add_filter('record-type', 'ribs')
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while (elem):
                yield (rec.project, rec.collector,
                       datetime.utcfromtimestamp(rec.time).isoformat(),
                       elem.peer_address, elem.peer_asn, elem.fields['prefix'],
                       elem.fields['as-path'])
                elem = rec.get_next_elem()
コード例 #20
0
def recv_bgpstream_updates(begin, until, collector, output_queue):
    logging.info ("CALL recv_bgpstream_updates")
    # wait for first RIB table dump to complete
    while (rib_ts < 0):
        time.sleep(RIB_TS_WAIT/10)
    time.sleep(RIB_TS_WAIT)
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type','updates')
    stream.add_interval_filter(rib_ts,until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        logging.info("Record TS: "+str(rec.time))
        while (elem):
            logging.info(" -- Record Element Type: " + elem.type + ", TS: " + str(elem.time))
            bgp_message = BGPmessage(elem.time, 'update')
            src_peer = dict()
            src_addr = elem.peer_address
            src_asn = elem.peer_asn
            src_peer['addr'] = src_addr
            src_peer['port'] = 0
            src_peer['asn'] = src_asn
            bgp_message.set_source(src_peer)
            if elem.type.upper() == 'A':
                bgp_message.add_announce(elem.fields['prefix'])
                bgp_message.set_nexthop(elem.fields['next-hop'])
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a: # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                output_queue.put(bgp_message)
            elif elem.type.upper() == 'W':
                bgp_message.add_withdraw(elem.fields['prefix'])
                output_queue.put(bgp_message)
            elem = rec.get_next_elem()
コード例 #21
0
ファイル: CreateRib.py プロジェクト: kelemeto76/bgp-hackathon
def main(rib, target_prefs):

        
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:

        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('record-type', 'ribs')
        #stream.add_filter('collector', 'rrc04')
        stream.add_filter('project', 'ris')
        stream.add_filter('project', 'routeviews')

        stream.add_interval_filter(start-60*60*8, start)
        stream.add_rib_period_filter(10000000000000)        

        # Start the stream
        stream.start()

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                continue

            #if rec.time < start:
            elem = rec.get_next_elem()
            while elem:

                if elem.type == 'A' or elem.type == 'R':
                    rib.add_to_rib(rec.collector, elem.peer_address, elem.fields['prefix'], elem.time, elem.fields['as-path'])

                elem = rec.get_next_elem()

            #else:

        rib.flush()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #22
0
def init_stream(config_files, start_time, end_time):
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('project', 'ris')
    stream.add_filter('project', 'routeviews')
    stream.add_filter('record-type', 'ribs')
    for exp_id in config_files:
        config_file = config_files[exp_id]
        stream.add_filter('prefix', config_file['superprefix'])
    stream.add_interval_filter(start_time, end_time)
    return stream, rec
コード例 #23
0
def recv_bgpstream_rib(begin, until, collector, output_queue):
    logging.info ("CALL recv_bgpstream_rib")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type','updates')
    stream.add_interval_filter(begin,until)

    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        global rib_ts
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping.")
            continue
        if (rib_ts > 0) and (rec.time > (rib_ts + RIB_TS_INTERVAL/2)):
            logging.info("received full RIB table dump.")
            break
        bgp_message = None
        while (elem):
            if (elem.type.upper() == 'A') or (elem.type.upper() == 'R'):
                rib_ts = elem.time
                bgp_message = BGPmessage(elem.time, 'update')
                bgp_message.set_nexthop(elem.fields['next-hop'])
                src_peer = dict()
                src_addr = elem.peer_address
                src_asn = elem.peer_asn
                src_peer['addr'] = src_addr
                src_peer['port'] = 0
                src_peer['asn'] = src_asn
                bgp_message.set_source(src_peer)
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a: # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                bgp_message.add_announce(elem.fields['prefix'])
                output_queue.put(bgp_message)
            elem = rec.get_next_elem()
コード例 #24
0
    def _start_stream(self, **filters):
        """Initializes, starts, and returns bgp stream with filters"""

        # Create a new bgpstream instance
        stream = BGPStream()

        # Add filters if params exist
        for key, value in filters.items():
            if (key in ['peer-asn', 'prefix', 'collector']
                    and value is not None):
                stream.add_filter(key, value)

        # Time params must be in epoch
        start_epoch = calendar.timegm(filters.get('start').timetuple())
        end_epoch = calendar.timegm(filters.get('end').timetuple())
        stream.add_interval_filter(start_epoch, end_epoch)

        # Start the stream
        stream.start()
        self.logger.debug("Started stream for caida announcements")
        return stream
コード例 #25
0
def filter_play_updates(collectors, start_time_ts, stop_time_ts):
    stream = BGPStream()

    is_ripe = False

    for c in collectors:
        if 'rrc' in c:
            is_ripe = True
        stream.add_filter('collector', c)

    stream.set_data_interface_option('broker', 'url',
                                     'https://bgpstream-dev.caida.org/broker')

    stream.add_filter('prefix', '0.0.0.0/0')  # Only focus on IPV4
    stream.add_filter('record-type', 'ribs')
    stream.add_filter('record-type', 'updates')

    # Find the time when the last rib was available (every 2 hurs with routeviews)
    # 28800 if RIPE RIS, 7200 if RouteViews
    offset = 7200
    if is_ripe:
        offset = 28880

    rib_timestamp = start_time_ts
    while rib_timestamp % offset != 0:
        rib_timestamp -= 1
    rib_timestamp -= 600

    stream.add_interval_filter(rib_timestamp, stop_time_ts)
    stream.add_rib_period_filter(100000000000000000)

    return stream
コード例 #26
0
    def _get_data(self, prefix, datetime):
        """
        output example: [['15547', '8220', '1853', '1205'],[..another AS path..]]
        :param prefix:
        :param datetime: end interval
        :return: list of AS paths
        """
        print('[*] ris.py: _get_data() called')
        print('[*] ris.py: _get_data() prefix: {}'.format(prefix))
        start = int(datetime) - 20000  # 20000 second seems to be the shortest interval to get data from BGPstream
        stop = int(datetime)
        result = []

        stream = BGPStream()
        rec = BGPRecord()

        stream.add_filter('prefix', prefix)
        stream.add_filter('record-type', 'ribs')
        stream.add_filter('project', 'ris')
        stream.add_interval_filter(start, stop)

        stream.start()

        while stream.get_next_record(rec):
            if rec.status == "valid":
                elem = rec.get_next_elem()
                while elem:
                    as_path = elem.fields['as-path'].split()
                    as_path.append(' ')  # for tree creation
                    result.append(as_path)
                    elem = rec.get_next_elem()
        print('[*] ris.py: _get_data() finished.')
        return result
コード例 #27
0
def main():
    (options, args) = getopts()
    start = options.start_time
    end = options.end_time

    target_prefs = Set()
    with open('./../../atlas/anchor_prefix.txt', 'rb') as br:
        for l in br:
            target_prefs.add(l.strip())

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:
        #for pref in target_prefs:
        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)
        # stream.add_filter('prefix','0.0.0.0/0')

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('collector', 'rrc00')

        # Consider this time interval:
        # Sat Aug  1 08:20:11 UTC 2015
        # stream.add_interval_filter(1438417216,1438417216)
        # stream.add_interval_filter(1451606400,1454785264
        stream.add_interval_filter(start, end)

        # Start the stream
        stream.start()

        # Get next record
        cnt = 0

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                pass
                # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
            else:
                cnt += 1
                elem = rec.get_next_elem()
                while elem:
                    if elem.type == 'S':
                        continue
                    # Print record and elem information
                    # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                    # print elem.type, elem.peer_address, elem.peer_asn, elem.fields, elem.pref
                    bw.write(
                        '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\n'.
                        format(rec.project, rec.collector, rec.type, rec.time,
                               rec.status, elem.type, elem.fields['prefix'],
                               elem.peer_address, elem.peer_asn, elem.fields))
                    bw.flush()
                    elem = rec.get_next_elem()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #28
0
def dump2file(type, name, latestDumpTime, dumpDuration, dumpPeriod):
    print type + ' of ' + name + ': '
    csv_header = ['type', 'addr', 'as', 'prefix', 'next_hop', 'as_path']
    _file = None
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', c_name)
    if type == 'ribs':
        stream.add_filter('record-type', 'ribs')
        _file = open(c_name + '_ribs.csv', 'w+')
    elif type == 'updates':
        stream.add_filter('record-type', 'updates')
        _file = open(c_name + '_updates.csv', 'w+')
    stream.add_interval_filter(latestDumpTime, latestDumpTime + dumpPeriod)

    stream.start()

    count = 0
    useless_c = 0

    writer = csv.writer(_file)
    writer.writerow(csv_header)

    # Get next record
    while (stream.get_next_record(rec)):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            # print rec.project, rec.collector, rec.type, rec.time, rec.status
            print 'current rec not valid.'
        else:
            elem = rec.get_next_elem()
            while (elem):
                useless_c += 1
                if useless_c % 1000 == 0:
                    print 'Got ' + str(useless_c) + ' elem totally.'
                # Print record and elem information
                if isIPV6(elem):  # ipv6 packet ignored
                    elem = rec.get_next_elem()
                    continue
                count += 1
                # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                # print elem.type, elem.peer_address, elem.peer_asn
                # elem.fields contains four column: communities, next-hop, prefix, as-path
                field = elem.fields
                prefix = field['prefix'] if 'prefix' in field.keys() else ''
                next_hop = field['next-hop'] if 'next-hop' in field.keys(
                ) else ''
                as_path = field['as-path'] if 'as-path' in field.keys() else ''
                as_path = as_path.replace(' ', '|')

                writer.writerow([
                    elem.type, elem.peer_address, elem.peer_asn, prefix,
                    next_hop, as_path
                ])

                elem = rec.get_next_elem()
    _file.close()
    print 'count: ' + str(count)
    return count
コード例 #29
0
def initUpdates(seedingCheckOverride, lastBGPUpdateProcessed): # Consider passing in last BGP update processed because of new system.
	global cursor, startTime, endTime, seeded, stream, rec
	startTime = lastBGPUpdateProcessed
	#cursor.execute("""SELECT intVal AS lastBGPUpdateProcessed FROM metadata WHERE name = 'lastBGPUpdateProcessed'""")
	#result = cursor.fetchone()
	#if result == None:
	#	print "ERROR: NO Start Time Found in DB (aka lastBGPUpdateProcessed). Recommended snd time is 1489224750 - int(2592000 * 4) (which is 1478856750) because this is a 4 month backlog."
	#	exit()
		#cursor.execute("""INSERT INTO metadata (name, intVal) VALUES ('lastBGPUpdateProcessed', {0})""".format(1489224750 - int(2592000 * 4)))
		#conn.commit()
	#else:
	#	(startTime,) = result
	
	
	cursor.execute("""SELECT intVal AS endTime FROM metadata WHERE name = 'endTime'""")
	result = cursor.fetchone()
	if result == None:
		print "ERROR: NO End Time Found in DB. Recommended end time is 1489224749 because this is the timestamp of the first cert."
		print "ERROR: Other recommended end time is 1491775893 which is when all certs have been covered."
		exit()
	else:
		(endTime,) = result
	
	if not seedingCheckOverride:
		cursor.execute("""SELECT stringVal AS seeded FROM metadata WHERE name = 'seeded'""")
		result = cursor.fetchone()
		if result == None:
			# We will assume that the DB is not seeded if there is no entry and not throw any errors in this case.
			seeded = False
			print "line 59 DB not seeded upon call to initUpdates. DB should be seeded with RIBs before updates can be processed. Call initSeeding first. Exiting."
			exit()
		else:
			strValSeeded = ""
			(strValSeeded,) = result
			if strValSeeded == "false":
				seeded = False
				print "line 66 DB not seeded upon call to initUpdates. DB should be seeded with RIBs before updates can be processed. Call initSeeding first. Exiting."
				exit()
			elif strValSeeded == "true":
				seeded = True
			else:
				print "Invalid value for seeded metadata entry. Exiting."
				exit()
	else:
		seeded = True

	# create a new bgpstream instance
	stream = BGPStream()
	# create a reusable bgprecord instance
	rec = BGPRecord()
	stream.add_filter('record-type','updates') # here we collect updates. This could be changed to ribs to instead acquire periodic snapshots of the RIBs.
	# configure the stream to retrieve Updates from the route-views listener.
	stream.add_filter('collector', 'route-views2')
	# getting updates only from one peer gives us only the perferred route of this peer and no rejected routes.
	# only get updates from level3. Level3 is a provider to ViaWest so their choice is a likely choice of ViaWest.
	stream.add_filter('peer-asn', '3356')
	stream.add_interval_filter(startTime, endTime)
	stream.start()
コード例 #30
0
def get_updates(collector, start, end):
    print("getting updates for {} from {} to {}".format(collector, start, end))
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    stream.add_interval_filter(int(start.timestamp()), int(end.timestamp()))
    stream.add_filter('record-type', 'updates')
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while (elem):
                if elem.type == 'A':
                    yield ('A', rec.project, rec.collector,
                           datetime.utcfromtimestamp(rec.time).isoformat(),
                           elem.peer_address, elem.peer_asn,
                           elem.fields['prefix'], elem.fields['as-path'])
                elif elem.type == 'W':
                    yield ('W', rec.project, rec.collector,
                           datetime.utcfromtimestamp(rec.time).isoformat(),
                           elem.peer_address, elem.peer_asn,
                           elem.fields['prefix'], None)
                elem = rec.get_next_elem()
コード例 #31
0
def main():
    (options, args) = getopts()
    start = options.start_time
    end = options.end_time

    target_prefs = Set()
    with open('./../../atlas/anchor_prefix.txt', 'rb') as br:
        for l in br:
            target_prefs.add(l.strip())
        
    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    with open('./data/stream_{0}'.format(start), 'wb') as bw:
        #for pref in target_prefs:
        for ptmp in target_prefs:
            stream.add_filter('prefix', ptmp)
        # stream.add_filter('prefix','0.0.0.0/0')

        # Consider RIPE RRC 10 only
        stream.add_filter('record-type', 'updates')
        stream.add_filter('collector', 'rrc00')

        # Consider this time interval:
        # Sat Aug  1 08:20:11 UTC 2015
        # stream.add_interval_filter(1438417216,1438417216)
        # stream.add_interval_filter(1451606400,1454785264
        stream.add_interval_filter(start, end)

        # Start the stream
        stream.start()

        # Get next record
        cnt = 0

        while stream.get_next_record(rec):
            # Print the record information only if it is not a valid record
            if rec.status != "valid":
                pass
                # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
            else:
                cnt += 1
                elem = rec.get_next_elem()
                while elem:
                    if elem.type == 'S':
                        continue
                    # Print record and elem information
                    # print rec.project, rec.collector, rec.type, rec.time, rec.status,
                    # print elem.type, elem.peer_address, elem.peer_asn, elem.fields, elem.pref
                    bw.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\n'.format(
                        rec.project, rec.collector, rec.type, rec.time, rec.status,
                        elem.type, elem.fields['prefix'], elem.peer_address, elem.peer_asn, elem.fields))
                    bw.flush()
                    elem = rec.get_next_elem()

    print 'Successful termination; Start time: {0}'.format(start)
コード例 #32
0
def recv_bgpstream_updates(begin, until, collector):
    """
    Receive and parse BGP update records from a given bgpstream collector
    """
    logging.info("CALL recv_bgpstream_updates")
    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector', collector)
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(begin, until)
    # Start the stream
    stream.start()
    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        logging.info("Record TS: " + str(rec.time))
        while (elem):
            logging.info(" -- Record Element Type: " + elem.type + ", TS: " +
                         str(elem.time))
            bgp_message = BGPmessage(elem.time, 'update')
            if elem.type.upper() == 'A':
                bgp_message.add_announce(elem.fields['prefix'])
                aspath = elem.fields['as-path'].split()
                for a in aspath:
                    if not '{' in a:  # ignore AS-SETs
                        bgp_message.add_as_to_path(a)
                output(bgp_message)
            elif elem.type.upper() == 'W':
                bgp_message.add_withdraw(elem.fields['prefix'])
                output(bgp_message)
            elem = rec.get_next_elem()
コード例 #33
0
def main():
    (options, args) = getopts()
    limit = options.limit
    start = options.start_time
    end = options.end_time

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider RIPE RRC 10 only
    stream.add_filter('record-type', 'updates')
    stream.add_filter('collector', 'rrc00')
    stream.add_filter('prefix', '0.0.0.0/0')

    # Consider this time interval:
    # Sat Aug  1 08:20:11 UTC 2015
    # stream.add_interval_filter(1438417216,1438417216)
    # stream.add_interval_filter(1451606400,1454785264)
    #stream.add_interval_filter(1454630400, 1454716800)
    # 1 hour
    #1454284800 - 1454288400

    stream.add_interval_filter(start, end)

    # Start the stream
    stream.start()

    # Get next record
    prefixes_update = defaultdict(int)
    prefixes_withdraw = defaultdict(int)

    while stream.get_next_record(rec):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            pass
            # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while elem:
                if elem.type == 'A':
                    #print elem.fields['as-path']
                    prefixes_update[elem.fields['prefix']] += 1

                if elem.type == 'W':
                    prefixes_withdraw[elem.fields['prefix']] += 1

                #print rec.project, rec.collector, rec.type, rec.time, rec.status,
                #print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()

    for k in prefixes_update:
        if prefixes_update[k] >= limit:
            print k + "\t" + str(prefixes_update[k]) + "\t" + str(
                prefixes_withdraw[k])
コード例 #34
0
ファイル: most_req_prefix.py プロジェクト: KTrel/DATAPLANE_1
def main():
    (options, args) = getopts()
    limit = options.limit
    start = options.start_time
    end = options.end_time

    # Create a new bgpstream instance and a reusable bgprecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Consider RIPE RRC 10 only
    stream.add_filter('record-type', 'updates')
    stream.add_filter('collector', 'rrc00')
    stream.add_filter('prefix', '0.0.0.0/0')

    # Consider this time interval:
    # Sat Aug  1 08:20:11 UTC 2015
    # stream.add_interval_filter(1438417216,1438417216)
    # stream.add_interval_filter(1451606400,1454785264)
    #stream.add_interval_filter(1454630400, 1454716800)
    # 1 hour
    #1454284800 - 1454288400

    stream.add_interval_filter(start, end)

    # Start the stream
    stream.start()

    # Get next record
    prefixes_update = defaultdict(int)
    prefixes_withdraw = defaultdict(int)

    while stream.get_next_record(rec):
        # Print the record information only if it is not a valid record
        if rec.status != "valid":
            pass
            # print '*', rec.project, rec.collector, rec.type, rec.time, rec.status
        else:
            elem = rec.get_next_elem()
            while elem:
                if elem.type == 'A':
                    #print elem.fields['as-path']
                    prefixes_update[elem.fields['prefix']] += 1

                if elem.type == 'W':
                    prefixes_withdraw[elem.fields['prefix']] += 1

                #print rec.project, rec.collector, rec.type, rec.time, rec.status,
                #print elem.type, elem.peer_address, elem.peer_asn, elem.fields
                elem = rec.get_next_elem()

    for k in prefixes_update:
        if prefixes_update[k] >= limit:
            print k + "\t" + str(prefixes_update[k]) + "\t" + str(prefixes_withdraw[k])
コード例 #35
0
def initSeeding(lastBGPUpdateProcessed):
	global cursor, startTime, seeded, stream, rec, endTime
	#cursor.execute("""SELECT intVal AS lastBGPUpdateProcessed FROM metadata WHERE name = 'lastBGPUpdateProcessed'""")
	#result = cursor.fetchone()
	#if result == None:
	#	print "ERROR: NO Start Time Found in DB (aka lastBGPUpdateProcessed). Recommended snd time is 1489224750 - int(2592000 * 4) (which is 1478856750) because this is a 4 month backlog."
	#	exit()
		#cursor.execute("""INSERT INTO metadata (name, intVal) VALUES ('lastBGPUpdateProcessed', {0})""".format(1489224750 - int(2592000 * 4)))
		#conn.commit()
	#else:
	#	(startTime,) = result
	startTime = lastBGPUpdateProcessed

	cursor.execute("""SELECT stringVal AS seeded FROM metadata WHERE name = 'seeded'""")
	result = cursor.fetchone()
	if result == None:
		# We will assume that the DB is not seeded if there is no entry and not throw any errors in this case.
		cursor.execute("""INSERT INTO metadata (name, stringVal) VALUES ('seeded', 'false')""")
		conn.commit()
		seeded = False
	else:
		strValSeeded = ""
		(strValSeeded,) = result
		if strValSeeded == "false":
			seeded = False
		elif strValSeeded == "true":
			seeded = True
			print "DB seeded already. Calling initUpdates(True)"
			initUpdates(True, lastBGPUpdateProcessed)
			return True
		else:
			print "Invalid value for seeded metadata entry. Exiting."
			exit()

	# create a new bgpstream instance
	stream = BGPStream()
	# create a reusable bgprecord instance
	rec = BGPRecord()
	stream.add_filter('record-type','ribs') # here we collect updates. This could be changed to ribs to instead acquire periodic snapshots of the RIBs.
	# configure the stream to retrieve Updates from the route-views listener.
	stream.add_filter('collector', 'route-views2')
	# getting updates only from one peer gives us only the perferred route of this peer and no rejected routes.
	# only get updates from level3
	stream.add_filter('peer-asn', '3356')
	stream.add_rib_period_filter(604800)
	stream.add_interval_filter(startTime, startTime + 104800)
	endTime = startTime + 1604801
	stream.start()
	print "bgp stream started from init seeding"
	return False
コード例 #36
0
def setup_bgpstream(prefix):
    # Current time in seconds since the epoch
    epoch_time = int(time.time())
    current_utc_time = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')

    # Create a new bgpstream instance
    stream = BGPStream()

    # Only get updates
    stream.add_filter('record-type', 'updates')

    # Only get updates from our prefix
    #stream.add_filter('prefix', '208.45.214.0/23')
    stream.add_filter('prefix', prefix)

    # Add data providers
    stream.add_filter('project', 'routeviews')
    stream.add_filter('project', 'ris')

    # Start pulling live
    stream.add_interval_filter(epoch_time, 0)

    return stream
コード例 #37
0
def count_valid_withdrawals_per_second(t0,num_seconds):
    # div0 in progress tracking if <100 seconds
    assert num_seconds>100;

    # Create a new bgpstream instance
    stream = BGPStream();
    # use Routeviews Saopaulo records
    stream.add_filter('project','routeviews');
    stream.add_filter('collector','route-views.saopaulo');
    stream.add_filter('record-type','updates');
    # time interval
    stream.add_interval_filter(t0,t0+tt-1);

    # progress tracking
    prog = t0;    

    # Create a reusable bgprecord instance
    rec = BGPRecord();
    # list of zeroes
    ctr = [0]*tt;
    # Start the stream
    stream.start();

    # iterate over records
    while(stream.get_next_record(rec)):
        if rec.status == "valid":
            elem = rec.get_next_elem();
            while(elem):
                if elem.type == 'W':
                    ctr[rec.time-t0] += 1;
                elem = rec.get_next_elem();
        # el-cheapo progress indication with dots. comment out if you don't want the dots.
        # """
        if rec.time > prog and (rec.time - t0) % floor(num_seconds/100) == 0:
            sys.stdout.write('.');
            sys.stdout.flush();
            prog = rec.time;
        # """
    # print newline after the dots
    print "";
    return ctr;
def get_data(timestamp):
    graph = Graph(password="******")

    stream = BGPStream()
    rec = BGPRecord()
    rec_time = None

    # IPv4
    stream.add_filter('prefix', '198.41.0.0/24')  # A-root
    stream.add_filter('prefix', '192.33.4.0/24')  # C-root
    stream.add_filter('prefix', '199.7.91.0/24')  # D-root
    stream.add_filter('prefix', '192.203.230.0/24')  # E-root, IPv4 only
    stream.add_filter('prefix', '192.5.5.0/24')  # F-root
    stream.add_filter('prefix', '192.112.36.0/24')  # G-root, IPv4 only
    stream.add_filter('prefix', '198.97.190.0/24')  # H-root
    stream.add_filter('prefix', '192.36.148.0/24')  # I-root
    stream.add_filter('prefix', '192.58.128.0/24')  # J-root
    stream.add_filter('prefix', '193.0.14.0/24')  # K-root
    stream.add_filter('prefix', '199.7.83.0/24')  # L-root
    stream.add_filter('prefix', '202.12.27.0/24')  # M-root

    # IPv6
    stream.add_filter('prefix', '2001:503:ba3e::/48')  # A
    stream.add_filter('prefix', '2001:500:2::/48')  # C
    stream.add_filter('prefix', '2001:500:2d::/48')  # D
    stream.add_filter('prefix', '2001:500:2f::/48')  # F
    stream.add_filter('prefix', '2001:500:1::/48')  # H
    stream.add_filter('prefix', '2001:7fe::/33')  # I
    stream.add_filter('prefix', '2001:503:c27::/48')  # J
    stream.add_filter('prefix', '2001:7fd::/48')  # K
    stream.add_filter('prefix', '2001:500:9f::/48')  # L
    stream.add_filter('prefix', '2001:dc3::/32')  # M

    stream.add_filter('record-type', 'ribs')
    # stream.add_filter('collector', 'route-views.soxrs')
    stream.add_filter('project', 'routeviews')
    stream.add_interval_filter(timestamp, timestamp)

    stream.start()

    result = {}
    while stream.get_next_record(rec):
        rec_time = rec.time
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while elem:
                print('{} {} {} {} {}'.format(rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields))
                as_path = elem.fields['as-path'].split()
                as_path.reverse()
                prefix = elem.fields['prefix']
                if prefix not in result:
                    result[prefix] = []
                result[prefix].append(as_path)
                elem = rec.get_next_elem()

    # get only unique lists in result
    for prefix in result:
        result[prefix] = [list(x) for x in set(tuple(x) for x in result[prefix])]
    print('timestamp {} ==> result: {}'.format(rec_time, result))

    for prefix in result:
        for path in result[prefix]:
            print('path: {}'.format(path))
            cur_node = None
            prev_node = None
            counter_as_prepend = 0
            for index, asn in enumerate(path):
                cur_node = asn
                graph.run('MERGE(s:asn{{name:"{0}", label:"{0}"}})'.format(asn))  # create new node if not exist.
                if index > 0:
                    if cur_node != prev_node:
                        query = 'MATCH (s:asn),(d:asn) ' \
                                'WHERE s.name="{0}" AND d.name="{1}" ' \
                                'MERGE (s)-[r:TO {{prefix: "{3}", time: {2}, prepended: {4}}}]->(d)'\
                            .format(cur_node, prev_node, rec_time, prefix, counter_as_prepend)
                        graph.run(query)
                        if counter_as_prepend > 0:
                            counter_as_prepend = 0  # reset
                    else:  # AS prepending
                        counter_as_prepend += 1
                # else:  # origin AS
                #     graph.run('MATCH (s:asn)'
                #               'WHERE s.name="{0}"'
                #               'SET s.origin="{1}_{2}"'
                #               'RETURN s'.format(asn, prefix, rec_time))
                prev_node = cur_node
import time
from _pybgpstream import BGPStream, BGPRecord, BGPElem
# read here: http://bgpstream.caida.org/docs/api/pybgpstream/_pybgpstream.html

start_time = time.time()

stream = BGPStream()
rec = BGPRecord()

stream.add_filter('prefix','140.78.0.0/16')
stream.add_filter('record-type','updates')
# stream.add_filter('collector','rrc11')
stream.add_filter('project','ris')
stream.add_interval_filter(1462942850, 1462962850)

stream.start()

while stream.get_next_record(rec):
    # Print the record information only if it is not a valid record
    if rec.status == "valid":
        elem = rec.get_next_elem()
        while elem:
            # Print record and elem information
            print rec.collector, elem.peer_address, elem.peer_asn, elem.fields
            elem = rec.get_next_elem()

# result = []
# while stream.get_next_record(rec):
#     if rec.status == "valid":
#         elem = rec.get_next_elem()
#         while elem:
graph = Graph(password="******")

stream = BGPStream()
rec = BGPRecord()

# stream.add_filter('prefix', '198.41.0.0/24')  # A-root
# stream.add_filter('prefix', '192.33.4.0/24')  # C-root
# stream.add_filter('prefix', '199.7.91.0/24')  # D-root
# stream.add_filter('prefix', '192.203.230.0/24')  # E-root, IPv4 only
# stream.add_filter('prefix', '192.5.5.0/24')  # F-root
# stream.add_filter('prefix', '192.112.36.0/24')  # G-root, IPv4 only
# stream.add_filter('prefix', '198.97.190.0/24')  # H-root
# stream.add_filter('prefix', '192.36.148.0/24')  # I-root
# stream.add_filter('prefix', '192.58.128.0/24')  # J-root
stream.add_filter('prefix', '193.0.14.0/24')  # K-root
stream.add_filter('prefix', '199.7.83.0/24')  # L-root
stream.add_filter('prefix', '202.12.27.0/24')  # M-root

stream.add_filter('record-type','ribs')
# stream.add_filter('collector','route-views.sg')
stream.add_filter('project','routeviews')
timestamp = 1464739200  # 2016/6/1 00:00
stream.add_interval_filter(timestamp, timestamp)  # 1464682200 ==> 05/31/2016 @ 8:10am (UTC); 1464682200

stream.start()

result = {}
while stream.get_next_record(rec):
    if rec.status == "valid":
        elem = rec.get_next_elem()
コード例 #41
0
        out_file_name = sys.argv[3]

    except:
        print "Usage: %s [start time] [end time] [output file name]" %(sys.argv[0])
        exit()

    #stream_start = 1454284800
    #stream_end = 1454288400
    buckets = create_time_buckets(stream_start, stream_end)
    
    prefixList = []
    raw_bgp_stream_data = {}
    
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', 'rrc06')
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(stream_start, stream_end)
    stream.start()
    
    while(stream.get_next_record(rec)):
    
        elem = rec.get_next_elem()
    
        while(elem):
    
            prefix = elem.fields.get("prefix", "")
            time_stamp = rec.time  # unix epoc timestamp 1427846670
    
            if prefix != "":
    		deal_with_time_bucket_junk(prefix, time_stamp)
コード例 #42
0
list_linkset = []

dictup = {}
dictdown = {}
nevents = {}
history = {}

current = start
current_time_short = strftime("%Y/%m/%d-%H:%M:%S", gmtime(current))
current_time_long = strftime("%a, %d %b %Y %H:%M:%S", gmtime(current))

while current <= end:
    stream = BGPStream()
    rec = BGPRecord()

    stream.add_filter('collector', collector_filter)
    stream.add_filter('record-type','ribs')
    stream.add_interval_filter(current - 10 * 60, current + 10 * 60)
    stream.add_filter('peer-asn','3356')
    stream.add_filter('peer-asn','174')
    stream.add_filter('peer-asn','3257')
    stream.add_filter('peer-asn','1299')
    stream.add_filter('peer-asn','2914')
    stream.add_filter('peer-asn','6453')
    stream.add_filter('peer-asn','6762')
    stream.add_filter('peer-asn','6939')
    stream.add_filter('peer-asn','2828')
    stream.add_filter('peer-asn','3549')

    stream.start()
コード例 #43
0
def load_data(start, stop, collectors, window, threshold):
    peers = {}

    # collectors is a list of the collectors we want to include
    # Start and stop define the interval we are looking in the data

    # Create a new BGPStream instance and a reusable BGPRecord instance
    stream = BGPStream()
    rec = BGPRecord()

    # Add filter for each collector.
    # If no collector is mentioned, it will consider 16 of them
    if collectors:
        for collector in collectors:
            print collector
            stream.add_filter('collector', collector)
    else:
        for i in range(0, 10):
            stream.add_filter('collector', 'rrc0' + str(i))
        for i in range(10, 16):
            stream.add_filter('collector', 'rrc' + str(i))

    stream.add_filter('record-type', 'updates')

    # Consider the interval from "start" to "stop" in seconds since epoch
    stream.add_interval_filter(start, stop)

    # Start the stream
    stream.start()

    # For each record (one record = one second, can have multiple elements for the same second) we handle its updates
    while stream.get_next_record(rec):
        timestamp = rec.time
        if rec.status != "valid":
            print rec.project, rec.collector, rec.type, timestamp, rec.status
        else:
            # Go through all elements of the record
            elem = rec.get_next_elem()
            while elem:
                # Consider only the A and W updates
                if elem.type not in ['A', 'W']:
                    elem = rec.get_next_elem()
                    continue

                peer = elem.peer_address
                updatetype = elem.type
                prefix = elem.fields['prefix']
                if peer not in peers:
                    peers[peer] = {
                        'A': [],
                        'W': []
                    }
                update = {'tst': timestamp, 'prefix': prefix}
                if updatetype == 'A':
                    handleUpdate(peers[peer]['A'], burst2writeA, update, peer, updatetype, timestamp, window, threshold)
                    saveGraphPoint(peers[peer]['A'], updatetype, peer, timestamp, collectors, threshold)
                else:
                    handleUpdate(peers[peer]['W'], burst2writeW, update, peer, updatetype, timestamp, window, threshold)
                    saveGraphPoint(peers[peer]['W'], updatetype, peer, timestamp, collectors, threshold)
                elem = rec.get_next_elem()

    # After processing all records, we write the graph json files with the graph points recorded for each peer
    for peer in graph_points:
        peer_file_name = peer.replace(':', '_')
        if not os.path.exists(peer_file_name):
            os.makedirs(peer_file_name)
        with open(peer_file_name+'/'+peer_file_name + '-graph.json', 'w') as outfile:
            json.dump(graph_points[peer], outfile, indent=2)

    # Write the last burst of A updates if there is one left
    if burst2writeA:
        for peer in burst2writeA:
            if burst2writeA[peer]:
                for timestamp in burst2writeA[peer]:
                    writeBurst(peer, burst2writeA, 'A', timestamp)

    # Write the last burst of W updates if there is one left
    if burst2writeW:
        for peer in burst2writeW:
            if burst2writeW[peer]:
                for timestamp in burst2writeW[peer]:
                    writeBurst(peer, burst2writeW, 'W', timestamp)

    # transform csv names in json file to use getJSON in plotGrap
    # step to CSV is used to avoid appending to the end of a json file directly as appending
    # to a json file overwrite the whole file
    jsonlist = []
    with open('csv_peernames-'+'-'.join(collectors)+'.csv', 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            jsonlist.append(row[0])

    jsondata = json.dumps(jsonlist, indent=2)
    fd = open('json_file_names-' + '-'.join(collectors) + '.json', 'w')
    fd.write(jsondata)
    fd.close()
def get_data(timestamp):
    graph = Graph(password="******")

    stream = BGPStream()
    rec = BGPRecord()
    rec_time = None

    # stream.add_filter('prefix', '198.41.0.0/24')  # A-root
    # stream.add_filter('prefix', '192.228.79.0/24')  # B-root, only 1 site
    # stream.add_filter('prefix', '192.33.4.0/24')  # C-root
    # stream.add_filter('prefix', '199.7.91.0/24')  # D-root
    # stream.add_filter('prefix', '192.203.230.0/24')  # E-root, IPv4 only
    # stream.add_filter('prefix', '192.5.5.0/24')  # F-root
    # stream.add_filter('prefix', '192.112.36.0/24')  # G-root, IPv4 only
    # stream.add_filter('prefix', '198.97.190.0/24')  # H-root
    # stream.add_filter('prefix', '192.36.148.0/24')  # I-root
    # stream.add_filter('prefix', '192.58.128.0/24')  # J-root
    stream.add_filter('prefix', '193.0.14.0/24')  # K-root
    # stream.add_filter('prefix', '199.7.83.0/24')  # L-root
    # stream.add_filter('prefix', '202.12.27.0/24')  # M-root

    # IPv6
    # stream.add_filter('prefix', '2001:503:ba3e::/48')  # A
    ## stream.add_filter('prefix', '2001:500:84::/48')  # B, only 1 site
    # stream.add_filter('prefix', '2001:500:2::/48')  # C
    # stream.add_filter('prefix', '2001:500:2d::/48')  # D
    # stream.add_filter('prefix', '2001:500:2f::/48')  # F
    # stream.add_filter('prefix', '2001:500:1::/48')  # H
    # stream.add_filter('prefix', '2001:7fe::/33')  # I
    # stream.add_filter('prefix', '2001:503:c27::/48')  # J
    # stream.add_filter('prefix', '2001:7fd::/48')  # K
    # stream.add_filter('prefix', '2001:500:9f::/48')  # L
    # stream.add_filter('prefix', '2001:dc3::/32')  # M

    stream.add_filter('record-type', 'ribs')
    # stream.add_filter('collector', 'rrc01')
    stream.add_filter('project', 'routeviews')
    stream.add_interval_filter(timestamp, timestamp)

    stream.start()

    result = {}
    while stream.get_next_record(rec):
        rec_time = rec.time
        if rec.status == "valid":
            elem = rec.get_next_elem()
            while elem:
                print rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields
                as_path = elem.fields['as-path'].split()
                as_path.reverse()
                prefix = elem.fields['prefix']
                if prefix not in result:
                    result[prefix] = []
                result[prefix].append(as_path)
                elem = rec.get_next_elem()

    # get only unique lists in result
    for prefix in result:
        result[prefix] = [list(x) for x in set(tuple(x) for x in result[prefix])]
    print('timestamp {} ==> result: {}'.format(rec_time, result))

    for prefix in result:
        for path in result[prefix]:
            print('path: {}'.format(path))
            cur_node = None
            prev_node = None
            counter_as_prepend = 0
            for index, asn in enumerate(path):
                searched_node = graph.find('asn', property_key='label', property_value=asn)
                try:
                    cur_node = searched_node.next()  # see if the AS node is already in the db or not. If yes, cur_node == prev_node
                except StopIteration:
                    cur_node = Node('asn', label=str(asn))  # if not exists, then create a new one
                if index > 0:
                    if index == len(path) - 1:
                        cur_node['path'] = path  # attach AS path to the last ASN
                    if cur_node != prev_node:
                        if counter_as_prepend > 0:
                            cur_node['prepended'] = counter_as_prepend
                            counter_as_prepend = 0  # reset
                        peering = Relationship(cur_node, 'TO', prev_node, time=rec_time, prefix=prefix)
                        # peering['time'] = rec_time
                        # peering['prefix'] = prefix
                        graph.create(peering)
                    else:  # AS prepending
                        counter_as_prepend += 1
                prev_node = cur_node
コード例 #45
0
ファイル: bgpstream_to_kafka.py プロジェクト: nizox/hijacks-2
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("collector")
    parser.add_argument("--our-servers", default="localhost:9092")

    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)

    save_file = "ts-{}".format(args.collector)

    stream = BGPStream()

    stream.add_filter("project", args.collector)

    if os.path.exists(save_file):
        with open(save_file, "r") as f:
            last_ts = int(float(f.read().strip()))
        logger.info("loading timestamp from file: %s", datetime.utcfromtimestamp(last_ts))
    else:
        # Consider RIBs dumps only
        now = time.time()
        last_ts = int(now - now % 3600)
        logger.info("loading from: %s", datetime.utcfromtimestamp(last_ts))

    stream.add_filter("record-type", "ribs")
    stream.add_filter("record-type", "updates")

    stream.add_interval_filter(last_ts, 0)
コード例 #46
0
ファイル: rib-origins-lt.py プロジェクト: smlng/bgp-origins
def main():
    parser = argparse.ArgumentParser(description='', epilog='')
    parser.add_argument('-b', '--begin',
                        help='Begin date (inclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-u', '--until',
                        help='Until date (exclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-c', '--collector',
                        help='Route collector from RIPE RIS or Route-Views project.',
                        type=str, required=True)
    parser.add_argument('-m', '--mongodb',
                        help='MongoDB connection parameters.',
                        type=str, default=None)
    parser.add_argument('-k', '--keepsnapshots',
                        help='Keep all snapshots, works only with -s.',
                        action='store_true')
    parser.add_argument('-s', '--snapshot',
                        help='Enable snapshoting.',
                        action='store_true')
    parser.add_argument('-l', '--loglevel',
                        help='Set loglevel [DEBUG,INFO,WARNING,ERROR,CRITICAL].',
                        type=str, default='WARNING')

    args = vars(parser.parse_args())

    numeric_level = getattr(logging, args['loglevel'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level,
                        format='#> %(asctime)s : %(levelname)s : %(message)s')

    ts_begin = int((args['begin'] - datetime(1970, 1, 1)).total_seconds())
    ts_until = int((args['until'] - datetime(1970, 1, 1)).total_seconds())

    mongodbstr = None
    if args['mongodb']:
        mongodbstr = args['mongodb'].strip()

    rib_ts = 0
    rib_origins = dict()
    origins_lt = list()
    if args['snapshot']:
        rib_ts, rib_origins = load_snapshot(mongodbstr)
    if rib_ts > ts_begin:
        logging.info ("SKIP, found snapshot with newer ts")
        ts_begin = rib_ts - RIB_TS_THRESHOLD

    # BEGIN
    logging.info("START")

    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector',args['collector'])
    stream.add_filter('record-type','ribs')
    stream.add_interval_filter(ts_begin,ts_until)

    # Start the stream
    stream.start()

    while(stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        #end if
        if rec.time > (rib_ts + RIB_TS_THRESHOLD):
            for p in rib_origins:
                for o in rib_origins[p]:
                    if rib_origins[p][o][1] < (rib_ts - RIB_TS_THRESHOLD):
                        origins_lt.append( (p,o,rib_origins[p][o][0],rib_origins[p][o][1]) )
                    #end if
                #end for
            #end for
            if args['snapshot'] and (len(rib_origins.keys()) > 0):
                store_snapshot(rec.time, rib_origins, mongodbstr)
                if not args['keepsnapshots']:
                    remove_snapshot(rib_ts, mongodbstr)
                # end if keepsnapshots
            # end if snapshot
            rib_ts = rec.time
            logging.info("ts: "+str(rib_ts))
            if len(origins_lt) > 0:
                if mongodbstr:
                    store_origins_lt(rib_ts,origins_lt, mongodbstr)
                else:
                    print_origins_lt(rib_ts,origins_lt)
                #end if
                for l in origins_lt:
                    del rib_origins[l[0]][l[1]]
                #end for
                origins_lt = list()
            # end if
        #end if
        while(elem):
            prefix = elem.fields['prefix']
            aspath = elem.fields['as-path'].split()
            for a in aspath: # remove AS-SETs
                if '{' in a:
                    aspath.remove(a)
                #end if
            #end for
            origin = aspath[-1]
            if prefix not in rib_origins:
                rib_origins[prefix] = dict()
            #end if
            if origin not in rib_origins[prefix]:
                rib_origins[prefix][origin] = (rib_ts,rib_ts)
            else:
                rib_origins[prefix][origin] = (rib_origins[prefix][origin][0],rib_ts)
            #end if
            elem = rec.get_next_elem()
        #end while
    #end while
    if args['snapshot']:
        print "NOTE: remaining origin lifetimes are stored in latest snapshot (%d)!\n" % rib_ts
        if (len(rib_origins.keys()) > 0):
            store_snapshot(rib_ts, rib_origins, mongodbstr)
        # end if
    else:
        print "NOTE: output remaining origin lifetimes with current ts (%d)\n" % rib_ts
        origins_lt = list()
        for p in rib_origins:
            for o in rib_origins[p]:
                origins_lt.append( (p,o,rib_origins[p][o][0],rib_ts) )
        if mongodbstr:
            store_origins_lt(rib_ts,origins_lt, mongodbstr)
        else:
            print_origins_lt(rib_ts,origins_lt)
from _pybgpstream import BGPRecord, BGPStream


stop = 1422778200
start = stop - 1200  # 15000 second seems to be the shortest interval to get data from BGPstream

result = []

stream = BGPStream()
rec = BGPRecord()

stream.add_filter('prefix', '192.33.4.0/24')
stream.add_filter('record-type', 'ribs')
stream.add_filter('project', 'ris')
# stream.add_filter('collector', 'router-route-views.routeviews.org.peer-IPV4_route-spews.cbbtier3.att.net')
stream.add_interval_filter(start, stop)

stream.start()
print('start')
# test = stream.get_data_interfaces()
# print('test: {}'.format(test))

while stream.get_next_record(rec):
    if rec.status == "valid":
        elem = rec.get_next_elem()
        while elem:
            as_path = elem.fields['as-path'].split()
            as_path.append(' ')  # for tree creation
            result.append(as_path)
            elem = rec.get_next_elem()
コード例 #48
0
#!/usr/bin/env python

# docs at bgpstream.caida.org
from _pybgpstream import BGPStream, BGPRecord, BGPElem
from collections import defaultdict

# export LD_LIBRARY_PATH="/usr/local/lib"

# Create a new bgpstream instance and a reusable bgprecord instance
stream = BGPStream()
rec = BGPRecord()

# Consider RRC12 
stream.add_filter('collector','rrc12')

# Consider RIBs dumps only
# stream.add_filter('record-type','ribs')

# Consider this time interval:
# Sat, 01 Aug 2015 7:50:00 GMT -  08:10:00 GMT
stream.add_interval_filter(1438415400,1438416600)

# Start the stream
stream.start()

# Get next record
while(stream.get_next_record(rec)):
    elem = rec.get_next_elem()
    while(elem):
        # only consider RIBs entries and Announcement messages
        if elem.type in ["R", "A"]:
import time
from _pybgpstream import BGPStream, BGPRecord, BGPElem

start_time = time.time()

stream = BGPStream()
rec = BGPRecord()

stream.add_filter('prefix','140.78.0.0/16')
stream.add_filter('record-type','ribs')
stream.add_filter('collector','rrc11')
# stream.add_filter('project','ris')
stream.add_interval_filter(1464681000, 1464682200)  # 1464682200 ==> 05/31/2016 @ 8:10am (UTC)

stream.start()
#
# while(stream.get_next_record(rec)):
#     # Print the record information only if it is not a valid record
#     if rec.status == "valid":
#         elem = rec.get_next_elem()
#         while elem:
#             # Print record and elem information
#             print rec.collector, elem.peer_address, elem.peer_asn, elem.fields['as-path']
#             elem = rec.get_next_elem()

result = []
while stream.get_next_record(rec):
    if rec.status == "valid":
        elem = rec.get_next_elem()
        while elem:
            print rec.collector, elem.type, elem.peer_address, elem.peer_asn, elem.fields
コード例 #50
0
ファイル: link_pfx_cnt.py プロジェクト: CAIDA/bgp-hackathon
from _pybgpstream import BGPStream, BGPRecord, BGPElem
from collections import defaultdict
from itertools import groupby

# Create a new bgpstream instance and a reusable bgprecord instance
stream = BGPStream()
rec = BGPRecord()

# Consider RIS RRC 00 only
stream.add_filter('collector','route-views.sfmix')

# Consider RIBs dumps only
stream.add_filter('record-type','ribs')

# Consider this time interval:
jan_02_2016 = 1451692800
stream.add_interval_filter(jan_02_2016 - 300, jan_02_2016 + 300)

stream.start()

tier1_str = "174 209 286 701 1239 1299 2828 2914 3257 3320 3356 5511 6453 6461 6762 7018 12956"
tier1s = tier1_str.split()

edge_pfx = dict()
transit = set()



while(stream.get_next_record(rec)):
    elem = rec.get_next_elem()
    while(elem):
コード例 #51
0
ファイル: rib-origins.py プロジェクト: smlng/bgp-origins
def main():
    parser = argparse.ArgumentParser(description='', epilog='')
    parser.add_argument('-b', '--begin',
                        help='Begin date (inclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-u', '--until',
                        help='Until date (exclusive), format: yyyy-mm-dd HH:MM',
                        type=valid_date, required=True)
    parser.add_argument('-c', '--collector',
                        help='Route collector from RIPE RIS or Route-Views project.',
                        type=str, required=True)
    parser.add_argument('-m', '--mongodb',
                        help='MongoDB connection parameters.',
                        type=str, default=None)
    parser.add_argument('-l', '--loglevel',
                        help='Set loglevel [DEBUG,INFO,WARNING,ERROR,CRITICAL].',
                        type=str, default='WARNING')

    args = vars(parser.parse_args())

    numeric_level = getattr(logging, args['loglevel'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level,
                        format='%(asctime)s : %(levelname)s : %(message)s')

    ts_begin = int((args['begin'] - datetime(1970, 1, 1)).total_seconds())
    ts_until = int((args['until'] - datetime(1970, 1, 1)).total_seconds())

    mongodbstr = None
    if args['mongodb']:
        mongodbstr = args['mongodb'].strip()
    # BEGIN
    logging.info("START")

    # Create bgpstream
    stream = BGPStream()
    rec = BGPRecord()
    # set filtering
    stream.add_filter('collector',args['collector'])
    stream.add_filter('record-type','ribs')
    stream.add_interval_filter(ts_begin,ts_until)

    # Start the stream
    stream.start()

    rib_ts = 0
    rib_origins = dict()
    while(stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
        else:
            logging.warn("stream record invalid, skipping ...")
            continue
        if rec.time > (rib_ts + RIB_TS_INTERVAL):
            rib_ts = rec.time
            if mongodbstr:
                store_rib_origins(rib_ts, rib_origins, mongodbstr)
            else:
                print_rib_origins(rib_ts, rib_origins)
            rib_origins = dict()
        while(elem):
            prefix = elem.fields['prefix']
            aspath = elem.fields['as-path'].split()
            for a in aspath: # remove AS-SETs
                if '{' in a:
                    aspath.remove(a)
            origin = aspath[-1]
            if prefix not in rib_origins:
                rib_origins[prefix] = list()
            if origin not in rib_origins[prefix]:
                rib_origins[prefix].append(origin)
            elem = rec.get_next_elem()
コード例 #52
0
# refer to this page for a complete documentation
#
# https://staff.caida.org/~alistair/pybgpstream/index.html


from _pybgpstream import BGPStream, BGPRecord, BGPElem
import radix
import sys
import calendar
import time

stream = BGPStream()
rec = BGPRecord()
start_interval= 1401623715
##stream.add_filter('project', 'ris')
stream.add_filter('collector', 'route-views2')
#stream.add_filter('collector', 'rrc04')
stream.add_filter('record-type', 'ribs')
stream.add_filter('record-type', 'updates')
stream.add_interval_filter(start_interval,1406894115 )
stream.add_rib_period_filter(172800)
stream.set_data_interface('broker')
#stream.set_data_interface_option('broker', 'db-host', 'loki-ge')
#stream.set_data_interface_option('broker', 'db-port', '3306')
#stream.set_data_interface_option('broker', 'db-user', 'bgpstream')
print "start bgpstream"
stream.start()

# signature {collector}{ip}{asn} -> id
info_id = {}
# id peer ASN (for right analysis)
コード例 #53
0
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("collector")
    parser.add_argument("--our-servers", default="localhost:9092")

    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)

    save_file = "ts-{}".format(args.collector)

    stream = BGPStream()

    stream.add_filter('project', args.collector)

    if os.path.exists(save_file):
        with open(save_file, "r") as f:
            last_ts = int(float(f.read().strip()))
        logger.info("loading timestamp from file: %s",
                    datetime.utcfromtimestamp(last_ts))
    else:
        # Consider RIBs dumps only
        now = time.time()
        last_ts = int(now - now % 3600)
        logger.info("loading from: %s", datetime.utcfromtimestamp(last_ts))

    stream.add_filter('record-type', 'ribs')
    stream.add_filter('record-type', 'updates')
コード例 #54
0

data_source = "DE-CIX"
sys.stderr.write("Inserting data from DE-CIX\n")
# get connection to database

bh_db = BlackHoleDb()

# export LD_LIBRARY_PATH="/usr/local/lib"

# Create a new bgpstream instance and a reusable bgprecord instance
stream = BGPStream()
rec = BGPRecord()

# Consider RRC12 
stream.add_filter('collector','rrc12')

# Consider RIBs dumps only
stream.add_filter('record-type','ribs')

# One rib per day
stream.add_rib_period_filter(3600*24)

# Consider this time interval: May 2016
stream.add_interval_filter(1462060800,1464739200)

# Start the stream
stream.start()

sys.stderr.write("Reading BGP Data\n")
コード例 #55
0
def sendMessageToKafka(col_name, col_data):
    # print "Collector-{} Sending Message...".format(col_name)
    size_total = 0
    num_total = 0
    num_ipv4 = 0
    stream = BGPStream()
    record = BGPRecord()

    time_start = int(col_data.get('updates').get('latestDumpTime'))
    time_end = time_start + int(col_data.get('updates').get('dumpPeriod'))
    # print "Start Time:{}, End Time:{}".format(time_start, time_end)

    stream.add_filter('collector', col_name)
    stream.add_filter('record-type', 'ribs')
    # stream.add_interval_filter(time_start, time_end)
    # stream.add_interval_filter(time_start, time_start+300)
    stream.add_interval_filter(1503475200, 1503475200 + 7200)

    # print "Before Start>>>>>"
    stream.start()
    print col_name
    # print "After Start>>>>>>"
    producer = Kafka_producer()
    while stream.get_next_record(record):
        if record.status == "valid":
            elem = record.get_next_elem()
            while elem:
                if filter(lambda x: ':' in x.peer_address, [elem]):
                    num_total += 1
                    elem = record.get_next_elem()
                    continue
                #print "Element:{},{},{}".format(elem.type, elem.peer_address, elem.peer_asn)
                field = elem.fields
                #print type(field)
                prefix = field['prefix'] if field.has_key('prefix') else ''
                next_hop = field['next-hop'] if field.has_key(
                    'next-hop') else ''
                as_path = field['as-path'] if field.has_key('as-path') else ''
                as_path = as_path.replace(' ', '|')
                text = [
                    elem.type, elem.peer_address,
                    str(elem.peer_asn), prefix, next_hop, as_path,
                    str(record.time)
                ]
                text = ','.join(text)

                # producer = Kafka_producer()
                producer.send_data(col_name, text)
                num_total += 1
                num_ipv4 += 1
                # print "[{}]-{}".format(col_name, num_total)
                # size_total += len(text)
                #NUM_TOTAL += 1
                #print "[{}]-{}-{}-{}-{}".format(col_name, num_total, num_ipv4, size_total, time.ctime(record.time))
                #print "No.{} Message Send Success-[{}]".format(num_total, text)
                elem = record.get_next_elem()
        else:
            pass
            # print "## Current record not valid!"
            # break
        # print "One Collector Finished"
    else:
        # print "-----------------------------"
        # print "Collector[{}] And Records Send Finished\nTotal Num:{}, IPv4 Num:{}, Total Size:{}".format(col_name, num_total, num_ipv4, size_total)
        # print "-----------------------------"
        print "Collector:[{}]".format(col_name)
        print "Total Num:{}, IPv4 Num:{}, Total Size:{}".format(
            num_total, num_ipv4, size_total)
コード例 #56
0
def run_bgpstream(args):
    (collector, start_time, end_time, data_type) = args

    # initialize and configure BGPStream
    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('collector', collector)
    # NB: BGPStream uses inclusive/inclusive intervals, so subtract one off the
    # end time since we are using inclusive/exclusive intervals
    stream.add_interval_filter(start_time, end_time-1)
    stream.add_filter('record-type', data_type)
    stream.start()

    # per-peer data
    peers_data = {}

    # loop over all records in the stream
    while stream.get_next_record(rec):
        elem = rec.get_next_elem()
        # loop over all elems in the record
        while elem:
            # create a peer signature for this elem
            sig = peer_signature(rec, elem)
            # if this is the first time we have ever seen this peer, create
            # an empty result: (Pfxs_v4_set                    ,  Pfxs_v6_set, 
            #                   dict(Transit_ASN)=Pfxs_v4_set  ,  dict(Transit_ASN)=Pfxs_v6_set)
            if sig not in peers_data:
                peers_data[sig] =[set(),set(),{},{}]


            pfx=""
            if('prefix' in elem.fields):            
                pfx=elem.fields['prefix'];                                    
                if(":" in pfx):
                    peers_data[sig][1].add(pfx)

                else:
                    peers_data[sig][0].add(pfx)
            
            if('as-path' in elem.fields):
                #Squash the AS Path to keep only distinct ASNs, i.e., remove prepending 
                path_split = [k for k, g in groupby(elem.fields['as-path'].split(" "))]    
                if(len(path_split)!=0): 
                    for i in range(1,len(path_split)-1):
                        transit=path_split[i]
                        if(":" in pfx):
                            if(transit not in peers_data[sig][3]):
                                peers_data[sig][3][transit]=set()
                            peers_data[sig][3][transit].add(pfx)                                
                        elif(pfx!=""):
                            if(transit not in peers_data[sig][2]):
                                peers_data[sig][2][transit]=set()
                            peers_data[sig][2][transit].add(pfx)  

            elem = rec.get_next_elem()

    # the time in the output row is truncated down to a multiple of
    # RESULT_GRANULARITY so that slices can be merged correctly
    start_time = \
        int(math.floor(start_time/RESULT_GRANULARITY) * RESULT_GRANULARITY)

    # for each peer that we processed data for, create an output row
    return [((start_time, collector, p), (peers_data[p])) for p in peers_data]
コード例 #57
0
ファイル: link_peercount.py プロジェクト: CAIDA/bgp-hackathon
        peer_id_dict[collector_name] = dict()
    if peer_asn not in peer_id_dict[collector_name]:
        peer_id_dict[collector_name][peer_asn] = dict()
    if peer_address not in peer_id_dict[collector_name][peer_asn]:
        next_peer_id = next_peer_id
        peer_id_dict[collector_name][peer_asn][peer_address] = next_peer_id
        next_peer_id += 1 
    return peer_id_dict[collector_name][peer_asn][peer_address]



# Create a new bgpstream instance and a reusable bgprecord instance
stream = BGPStream()
rec = BGPRecord()

stream.add_filter('project','ris')
stream.add_filter('project','routeviews')

# Consider RIBs dumps only
stream.add_filter('record-type','ribs')

# Consider this time interval:
jan_02_2016 = 1451692800
stream.add_interval_filter(jan_02_2016 - 300, jan_02_2016 + 300)

stream.start()

edge_peer_asn = dict()

while(stream.get_next_record(rec)):
    elem = rec.get_next_elem()
コード例 #58
0
ファイル: TestStream.py プロジェクト: KTrel/DATAPLANE_1
from _pybgpstream import BGPStream, BGPRecord, BGPElem

# Create a new bgpstream instance and a reusable bgprecord instance
stream = BGPStream()
rec = BGPRecord()

start = 1454800000
end = 1454802000
target_pref = '200.7.6.0/24'

print start, end, target_pref
print target_pref

stream.add_filter('prefix', target_pref)

# Consider RIPE RRC 10 only
# stream.add_filter('record-type', 'updates')
stream.add_filter('collector', 'rrc00')
stream.add_interval_filter(start, end)

# Consider this time interval:
# Sat Aug  1 08:20:11 UTC 2015
# """ Very short period for test """
# stream.add_interval_filter(start, end)
# """ Jan till now """
#     stream.add_interval_filter(1451606400,1454785264)
# """ yesterday """
#     stream.add_interval_filter(1454630400, 1454716800)

# Start the stream
stream.start()