Beispiel #1
0
def getBGPStream(recordType, AF, collectors, startts, endts):

    stream = BGPStream()

    # recordType is supposed to be ribs or updates
    bgprFilter = "type " + recordType

    if AF == 6:
        bgprFilter += " and ipversion 6"
    else:
        bgprFilter += " and ipversion 4"

    for c in collectors:
        bgprFilter += " and collector %s " % c

    if isinstance(startts, str):
        startts = datetime.strptime(startts + "UTC", "%Y-%m-%dT%H:%M:%S%Z")
    startts = dt2ts(startts)

    if isinstance(endts, str):
        endts = datetime.strptime(endts + "UTC", "%Y-%m-%dT%H:%M:%S%Z")
    endts = dt2ts(endts)

    currentts = dt2ts(datetime.now())

    if endts > currentts:
        stream.set_live_mode()

    stream.parse_filter_string(bgprFilter)
    stream.add_interval_filter(startts, endts)

    return stream
Beispiel #2
0
 def run(self):
     stream = BGPStream()
     rec = BGPRecord()
     if self.upd_file is None:
         stream.add_filter('collector', self.collector)
         stream.add_filter('record-type', self.record_type)
         stream.add_interval_filter(self.from_date, self.to_date)
         stream.set_live_mode()
     else:
         stream.set_data_interface('singlefile')
         if self.upd_file:
             stream.set_data_interface_option('singlefile', 'upd-file',
                                              self.upd_file)
         if self.rib_file:
             stream.set_data_interface_option('singlefile', 'rib-file',
                                              self.rib_file)
     if self.prefix_filter is not None:
         for prefix in self.prefix_filter:
             stream.add_filter('prefix', prefix)
     if self.peer_as_filter:
         for asn in self.peer_as_filter:
             stream.add_filter('peer-asn', str(asn))
     if self.communities_filter:
         for community in self.communities_filter:
             stream.add_filter('community', community)
     stream.start()
     stream.get_next_record(rec)
     prev = rec.time
     while (stream.get_next_record(rec)):
         now = rec.time
         if rec.status == 'valid':
             elem = rec.get_next_elem()
             while (elem):
                 statement = None
                 peer_address = elem.peer_address
                 peer_asn = elem.peer_asn
                 if peer_asn in self.asn_to_nexthop:
                     if elem.type == 'A' or elem.type == 'R':
                         prefix = elem.fields['prefix']
                         as_path = elem.fields['as-path']
                         nexthop = elem.fields['next-hop']
                         if peer_asn in self.asn_to_nexthop:
                             nexthop = self.asn_to_nexthop[peer_asn]
                             statement = 'announce route %s next-hop %s as-path' \
                                 ' [ %s ]' % (prefix, nexthop, as_path)
                     elif elem.type == 'W':
                         prefix = elem.fields['prefix']
                         statement = 'withdraw route %s' % prefix
                 if statement:
                     sys.stdout.write("%s\n" % statement)
                     sys.stdout.flush()
                 elem = rec.get_next_elem()
         time.sleep(self.delay + now - prev)
         prev = now
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    parser.description = textwrap.dedent('''\
        a proof-of-concept utility for watching updates from BGPstream
        and then printing out if an unexpected update is heard
        ''')
    parser.epilog = textwrap.dedent('''\
        Example: watch these route announcements
            %(prog)s -f routes.yaml ''')
    required = parser.add_argument_group('required arguments')
    required.add_argument("-f",
                          "--file",
                          required=True,
                          help="yaml file of prefixes to origin asn")
    parser.add_argument("-d",
                        "--debug",
                        action='store_true',
                        help="print out all updates containing these prefixes")
    args = parser.parse_args()

    routes = pytricia.PyTricia(48)  # longest reasonable pfx in dfz

    with open(args.file, 'r') as f:
        routesfile = yaml.safe_load(f)
    for pfx in routesfile:
        routes[pfx] = routesfile[pfx]

    stream = BGPStream()
    rec = BGPRecord()
    stream.add_filter('record-type', 'updates')
    stream.add_interval_filter(int(time.time()), 0)
    stream.set_live_mode()
    stream.start()

    while (stream.get_next_record(rec)):
        if rec.status == 'valid':
            elem = rec.get_next_elem()
            while (elem):
                if 'as-path' in elem.fields:
                    path = elem.fields['as-path'].split()
                    prefix = elem.fields['prefix']
                    if prefix in routes and (routes[prefix] != path[-1]
                                             or args.debug):
                        print('Heard prefix:', elem.fields['prefix'],
                              'AS-PATH:', elem.fields['as-path'],
                              '  Found by project:', rec.project, 'collector:',
                              rec.collector, 'type:', rec.type, 'at time:',
                              rec.time, 'Type:', elem.type, 'Peer:',
                              elem.peer_address, 'AS', elem.peer_asn)

                elem = rec.get_next_elem()
Beispiel #4
0
    def readupdates(self):
        #TODO implement txt file for update messages?
        if self.txtFile:
            return

        # create a new bgpstream instance
        stream = BGPStream()
        bgprFilter = "type updates"

        if self.af == 6:
            bgprFilter += " and ipversion 6"
        else:
            bgprFilter += " and ipversion 4"

        # bgprFilter += " and collector rrc10 "
        for c in self.collectors:
            bgprFilter += " and collector %s " % c

        # if self.asnFilter is not None:
        # # TOFIX filter is now deprecated, we need to have both
        # # announcements and withdrawals
        # bgprFilter += ' and (path %s$ or elemtype withdrawals)' % self.asnFilter

        logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
        logging.info("Timestamps: %s, %s" % (self.startts, self.endts))
        stream.parse_filter_string(bgprFilter)
        stream.add_interval_filter(self.startts, self.endts)
        if self.livemode:
            stream.set_live_mode()

        stream.start()
        # for line in p1.stdout:
        # create a reusable bgprecord instance
        rec = BGPRecord()
        while (stream.get_next_record(rec)):
            if rec.status != "valid":
                logging.warn("Invalid BGP record: %s, %s, %s, %s, %s" %
                             (rec.project, rec.collector, rec.type, rec.time,
                              rec.status))
            zDt = rec.time
            elem = rec.get_next_elem()
            while (elem):
                zOrig = elem.peer_address
                if zOrig not in self.peers:
                    # no need to update the counts for non-full feed peers
                    elem = rec.get_next_elem()
                    continue

                zAS = elem.peer_asn
                if zAS in self.excludedPeers or (len(
                        self.includedPeers) and zAS not in self.includedPeers):
                    elem = rec.get_next_elem()
                    continue
                zPfx = elem.fields["prefix"]
                if zPfx == "0.0.0.0/0" or zPfx in self.excludedPrefix or (
                        len(self.includedPrefix)
                        and zPfx not in self.includedPrefix):
                    elem = rec.get_next_elem()
                    continue

                msgTs = zDt
                # set first time bin!
                if self.ts is None:
                    self.slideTimeWindow(msgTs)

                elif self.ts + self.timeWindow <= msgTs:
                    self.slideTimeWindow(msgTs)

                elif self.ts > msgTs:
                    #Old update, ignore this to update the graph
                    logging.warn(
                        "Ignoring old update (peer IP: %s, timestamp: %s, current time bin: %s): %s"
                        % (zOrig, zDt, self.ts, (elem.type, zAS, elem.fields)))
                    elem = rec.get_next_elem()
                    continue

                node = self.rtree.search_exact(zPfx)

                if elem.type == "W":
                    # Withdraw: remove the corresponding node
                    if not node is None and zOrig in node.data:
                        origAS = node.data[zOrig]["origAS"]

                        if self.spatialResolution:
                            count = node.data[zOrig]["count"]
                            # Update count for above node
                            parent = self.findParent(node, zOrig)
                            if parent is None:
                                self.incTotalCount(-count, zOrig, origAS, zAS)
                                asns = node.data[zOrig]["path"]
                                self.incCount(-count, zOrig, origAS, zAS, asns)
                            else:
                                node.data[zOrig]["count"] = 0
                                # Add ips to above node and corresponding ASes
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if zOrig in n.data and n!=parent])
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                                # oldpCount = parent.data[zOrig]["count"]
                                # pCount = self.nbIPs(parent.prefixlen) - pcountBelow
                                # parent.data[zOrig]["count"] = pCount
                                # pdiff = pCount - oldpCount
                                # assert pdiff==count

                                # Update count for origAS and path from the
                                # parent node
                                porigAS = parent.data[zOrig]["origAS"]
                                pasns = parent.data[zOrig]["path"]
                                self.incCount(count, zOrig, porigAS, zAS,
                                              pasns)
                                self.incTotalCount(count, zOrig, porigAS, zAS)

                                # Update count for withdrawn origAS and path
                                asns = node.data[zOrig]["path"]
                                self.incCount(-count, zOrig, origAS, zAS, asns)
                                self.incTotalCount(-count, zOrig, origAS, zAS)

                        else:
                            asns = node.data[zOrig]["path"]
                            self.incCount(-1, zOrig, origAS, zAS, asns)
                            self.incTotalCount(-1, zOrig, origAS, zAS)

                        del node.data[zOrig]

                else:
                    # Announce: update counters
                    sPath = elem.fields["as-path"]
                    path = sPath.split(" ")
                    origAS = path[-1]

                    if origAS in self.excludedOriginASN or (
                            len(self.includedOriginASN)
                            and origAS not in self.includedOriginASN):
                        elem = rec.get_next_elem()
                        continue
                        # FIXME: this is not going to work in the case of
                        # delegated prefixes or implicit withdraws

                    if len(path) < 2:
                        # Ignoring paths with only one AS
                        elem = rec.get_next_elem()
                        continue

                    if self.announceQueue is not None:
                        self.announceQueue.put((zDt, zOrig, zAS, zPfx, path))

                    # Announce:
                    if node is None or not zOrig in node.data:
                        # Add a new node

                        node = self.rtree.add(zPfx)
                        if self.spatialResolution:
                            # Compute the exact number of IPs
                            count = self.nbIPs(node.prefixlen)
                            countBelow = sum([
                                n.data[zOrig]["count"]
                                for n in self.rtree.search_covered(zPfx)
                                if zOrig in n.data and n != node
                            ])
                            count -= countBelow
                            # Update the ASes counts
                            node.data[zOrig] = {
                                "path": set(path),
                                "count": count,
                                "origAS": origAS
                            }
                            asns = node.data[zOrig]["path"]
                            self.incCount(count, zOrig, origAS, zAS, asns)
                            self.incTotalCount(count, zOrig, origAS, zAS)

                            parent = self.findParent(node, zOrig)
                            if not parent is None:
                                # Update above nodes
                                # print("%s: (%s) %s, %s, %s" % (zDt, elem.type, zAS, zPfx, count))
                                pcountBelow = sum([
                                    n.data[zOrig]["count"] for n in
                                    self.rtree.search_covered(parent.prefix)
                                    if zOrig in n.data and n != parent
                                ])
                                # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                                oldpCount = parent.data[zOrig]["count"]
                                pCount = self.nbIPs(
                                    parent.prefixlen) - pcountBelow
                                pdiff = pCount - oldpCount
                                parent.data[zOrig]["count"] = pCount
                                # print("parent %s: (%s) %s, %s, %s" % (zDt, zAS, parent.prefix, oldpCount, pCount))
                                # print [(n.prefix,n.data[zOrig]["count"]) for n in self.rtree.search_covered(parent.prefix) if zOrig in n.data and n!=parent ]
                                porigAS = parent.data[zOrig]["origAS"]
                                pasns = parent.data[zOrig]["path"]
                                self.incCount(pdiff, zOrig, porigAS, zAS,
                                              pasns)
                                self.incTotalCount(pdiff, zOrig, porigAS, zAS)

                        else:
                            self.incTotalCount(1, zOrig, origAS, zAS)
                            count = 1
                            # Update the ASes counts
                            node.data[zOrig] = {
                                "path": set(path),
                                "count": count,
                                "origAS": origAS
                            }
                            asn = node.data[zOrig]["path"]
                            self.incCount(count, zOrig, origAS, zAS, asns)

                    else:
                        #Update node path and counts
                        if self.spatialResolution:
                            count = node.data[zOrig]["count"]
                        else:
                            count = 1

                        porigAS = node.data[zOrig]["origAS"]
                        asns = node.data[zOrig]["path"]
                        self.incCount(-count, zOrig, porigAS, zAS, asns)
                        self.incTotalCount(-count, zOrig, porigAS, zAS)

                        node.data[zOrig]["path"] = set(path)
                        node.data[zOrig]["origAS"] = origAS
                        asns = node.data[zOrig]["path"]
                        self.incCount(count, zOrig, origAS, zAS, asns)
                        self.incTotalCount(count, zOrig, origAS, zAS)

                elem = rec.get_next_elem()
Beispiel #5
0
    def readrib(self):
        stream = None
        rec = None
        if self.txtFile is None:
            # create a new bgpstream instance
            stream = BGPStream()

            # create a reusable bgprecord instance
            rec = BGPRecord()
            bgprFilter = "type ribs"

            if self.af == 6:
                bgprFilter += " and ipversion 6"
            else:
                bgprFilter += " and ipversion 4"

            for c in self.collectors:
                bgprFilter += " and collector %s " % c

            # if not self.asnFilter is None:
            # bgprFilter += ' and path %s$' % self.asnFilter
            for p in self.includedPeers:
                bgprFilter += " and peer %s " % p

            for p in self.includedPrefix:
                bgprFilter += " and prefix more %s " % p

            logging.info("Connecting to BGPstream... (%s)" % bgprFilter)
            logging.info("Timestamps: %s, %s" %
                         (self.startts - 3600, self.startts + 3600))
            stream.parse_filter_string(bgprFilter)
            stream.add_interval_filter(self.startts - 3600,
                                       self.startts + 3600)
            if self.livemode:
                stream.set_live_mode()

            stream.start()

        else:
            rec = txtReader.txtReader(self.txtFile)

        # for line in p1.stdout:
        while (self.txtFile
               and not rec.running) or (stream
                                        and stream.get_next_record(rec)):
            if rec.status != "valid":
                print rec.project, rec.collector, rec.type, rec.time, rec.status
            zDt = rec.time
            elem = rec.get_next_elem()

            while (elem):
                zOrig = elem.peer_address
                zAS = elem.peer_asn
                if zAS in self.excludedPeers or (len(
                        self.includedPeers) and zAS not in self.includedPeers):
                    elem = rec.get_next_elem()
                    continue
                zPfx = elem.fields["prefix"]
                sPath = elem.fields["as-path"]
                # print("%s: %s, %s, %s" % (zDt, zAS, zPfx, elem.fields))

                if zPfx == "0.0.0.0/0" or zPfx in self.excludedPrefix or (
                        len(self.includedPrefix)
                        and zPfx not in self.includedPrefix):
                    elem = rec.get_next_elem()
                    continue

                path = sPath.split(" ")
                origAS = path[-1]
                if origAS in self.excludedOriginASN or (
                        len(self.includedOriginASN)
                        and origAS not in self.includedOriginASN):
                    elem = rec.get_next_elem()
                    continue
                    # FIXME: this is not going to work in the case of
                    # delegated prefixes (and using IP addresses as spatial
                    # resolution)

                self.peersASN[zOrig].add(zAS)

                if len(path) < 2:
                    # Ignore paths with only one AS
                    elem = rec.get_next_elem()
                    continue

                node = self.rtree.add(zPfx)
                if zOrig in node.data:
                    # Already read this entry, we should read only one RIB per peer
                    elem = rec.get_next_elem()
                    continue

                if self.ribQueue is not None:
                    self.ribQueue.put((zDt, zOrig, zAS, zPfx, path))

                node.data[zOrig] = {
                    "path": set(path),
                    "count": 0,
                    "origAS": origAS
                }

                # print "%s, %s, %s, %s, %s" % (elem.time, elem.type, elem.peer_address, elem.peer_asn, elem.fields)

                if self.spatialResolution:
                    # compute weight for this path
                    count = self.nbIPs(node.prefixlen)
                    countBelow = sum([
                        n.data[zOrig]["count"]
                        for n in self.rtree.search_covered(zPfx)
                        if zOrig in n.data and n != node
                    ])
                    count -= countBelow
                    # assert count >= 0
                    node.data[zOrig]["count"] = count

                    # Update above nodes
                    parent = self.findParent(node, zOrig)
                    if not parent is None:
                        # pcountBelow = sum([n.data[zOrig]["count"] for n in self.rtree.search_covered(parent.prefix) if n.parent == parent and zOrig in n.data])
                        pcountBelow = sum([
                            n.data[zOrig]["count"]
                            for n in self.rtree.search_covered(parent.prefix)
                            if zOrig in n.data and n != parent
                        ])
                        oldpCount = parent.data[zOrig]["count"]
                        pCount = self.nbIPs(parent.prefixlen) - pcountBelow
                        pdiff = pCount - oldpCount
                        parent.data[zOrig]["count"] = pCount
                        pOrigAS = parent.data[zOrig]["origAS"]
                        asns = parent.data[zOrig]["path"]
                        self.incCount(pdiff, zOrig, pOrigAS, zAS, asns)
                        self.incTotalCount(pdiff, zOrig, pOrigAS, zAS)
                else:
                    count = 1
                    node.data[zOrig]["count"] = count

                asns = node.data[zOrig]["path"]
                self.incTotalCount(count, zOrig, origAS, zAS)
                self.incCount(count, zOrig, origAS, zAS, asns)

                elem = rec.get_next_elem()