def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.counter = Counter() start_timestamp = self.get_push_timestamp(datetime.now(timezone.utc)) # self.start_collecting(start_timestamp, int(datetime.now(timezone.utc).strftime("%s"))) self.start_collecting(start_timestamp, start_timestamp)
def __init__(self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282"): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord()
def __init__( self, filters={"collector": ["rrc00"]}, rpki_validator="rpki-validator.realmv6.org:8282", settings_file="../settings.json", ): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters["collector"]: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = RouteCollectorMeta(None, 0, 0, 0, 0) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) settings = get_settings(settings_file) settings["db"]["password"] = os.environ["PGPASS"] self.db = DBConnector(settings["db"]) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp)
class BGPDataConsumer(object): """docstring for BGPDataAggregator""" def __init__( self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282", ): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord() def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def _start_rtr_manager(self): self.mgr.start() while not self.mgr.is_synced(): sleep(0.2) if status.error: print("Connection error") exit()
def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream( '/Users/mx/Projects/Uni/bgp-group/bgp_dump.txt') self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) # for filter_type, filter_array in filters.items(): # for filter_value in filter_array: # self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = defaultdict(int) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp, start_timestamp)
class Worker(ConsumerProducerMixin): """ RabbitMQ Consumer/Producer for this Service. """ def __init__(self, connection: Connection) -> NoReturn: self.connection = connection self.timestamp = -1 self.rules = None self.prefix_tree = None self.mon_num = 1 setattr(self, "publish_hijack_fun", self.publish_hijack_result_production) if TEST_ENV == "true": setattr(self, "publish_hijack_fun", self.publish_hijack_result_test) self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT) ping_redis(self.redis) self.rtrmanager = None if RPKI_VALIDATOR_ENABLED == "true": while True: try: self.rtrmanager = RTRManager( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) self.rtrmanager.start() log.info( "Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) break except Exception: log.info( "Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) log.info("Retrying RTR connection in 5 seconds...") time.sleep(5) # EXCHANGES self.update_exchange = Exchange( "bgp-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.update_exchange.declare() self.hijack_exchange = Exchange( "hijack-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.hijack_exchange.declare() self.hijack_hashing = Exchange( "hijack-hashing", channel=connection, type="x-consistent-hash", durable=False, delivery_mode=1, ) self.hijack_hashing.declare() self.handled_exchange = Exchange( "handled-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.config_exchange = Exchange( "config", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.pg_amq_bridge = Exchange( "amq.direct", type="direct", durable=True, delivery_mode=1 ) # QUEUES self.update_queue = Queue( "detection-update-update", exchange=self.pg_amq_bridge, routing_key="update-insert", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_ongoing_queue = Queue( "detection-hijack-ongoing", exchange=self.hijack_exchange, routing_key="ongoing", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.config_queue = Queue( "detection-config-notify-{}".format(uuid()), exchange=self.config_exchange, routing_key="notify", durable=False, auto_delete=True, max_priority=3, consumer_arguments={"x-priority": 3}, ) self.config_request_rpc() log.info("started") def get_consumers( self, Consumer: Consumer, channel: Connection ) -> List[Consumer]: return [ Consumer( queues=[self.config_queue], on_message=self.handle_config_notify, prefetch_count=1, accept=["ujson"], ), Consumer( queues=[self.update_queue], on_message=self.handle_bgp_update, prefetch_count=100, accept=["ujson", "txtjson"], ), Consumer( queues=[self.hijack_ongoing_queue], on_message=self.handle_ongoing_hijacks, prefetch_count=10, accept=["ujson"], ), ] def on_consume_ready(self, connection, channel, consumers, **kwargs): self.producer.publish( self.timestamp, exchange=self.hijack_exchange, routing_key="ongoing-request", priority=1, serializer="ujson", ) def handle_config_notify(self, message: Dict) -> NoReturn: """ Consumer for Config-Notify messages that come from the configuration service. Upon arrival this service updates its running configuration. """ message.ack() log.debug("message: {}\npayload: {}".format(message, message.payload)) raw = message.payload if raw["timestamp"] > self.timestamp: self.timestamp = raw["timestamp"] self.rules = raw.get("rules", []) self.init_detection() # Request ongoing hijacks from DB self.producer.publish( self.timestamp, exchange=self.hijack_exchange, routing_key="ongoing-request", priority=1, serializer="ujson", ) def config_request_rpc(self) -> NoReturn: """ Initial RPC of this service to request the configuration. The RPC is blocked until the configuration service replies back. """ self.correlation_id = uuid() callback_queue = Queue( uuid(), durable=False, auto_delete=True, max_priority=4, consumer_arguments={"x-priority": 4}, ) self.producer.publish( "", exchange="", routing_key="config-request-queue", reply_to=callback_queue.name, correlation_id=self.correlation_id, retry=True, declare=[ Queue( "config-request-queue", durable=False, max_priority=4, consumer_arguments={"x-priority": 4}, ), callback_queue, ], priority=4, serializer="ujson", ) with Consumer( self.connection, on_message=self.handle_config_request_reply, queues=[callback_queue], accept=["ujson"], ): while self.rules is None: self.connection.drain_events() log.debug("{}".format(self.rules)) def handle_config_request_reply(self, message: Dict): """ Callback function for the config request RPC. Updates running configuration upon receiving a new configuration. """ message.ack() log.debug("message: {}\npayload: {}".format(message, message.payload)) if self.correlation_id == message.properties["correlation_id"]: raw = message.payload if raw["timestamp"] > self.timestamp: self.timestamp = raw["timestamp"] self.rules = raw.get("rules", []) self.init_detection() def init_detection(self) -> NoReturn: """ Updates rules everytime it receives a new configuration. """ log.info("Initiating detection...") log.info("Starting building detection prefix tree...") self.prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128), } raw_prefix_count = 0 for rule in self.rules: try: rule_translated_origin_asn_set = set() for asn in rule["origin_asns"]: this_translated_asn_list = flatten(translate_asn_range(asn)) rule_translated_origin_asn_set.update( set(this_translated_asn_list) ) rule["origin_asns"] = list(rule_translated_origin_asn_set) rule_translated_neighbor_set = set() for asn in rule["neighbors"]: this_translated_asn_list = flatten(translate_asn_range(asn)) rule_translated_neighbor_set.update( set(this_translated_asn_list) ) rule["neighbors"] = list(rule_translated_neighbor_set) conf_obj = { "origin_asns": rule["origin_asns"], "neighbors": rule["neighbors"], "policies": set(rule["policies"]), "community_annotations": rule["community_annotations"], } for prefix in rule["prefixes"]: for translated_prefix in translate_rfc2622(prefix): ip_version = get_ip_version(translated_prefix) if self.prefix_tree[ip_version].has_key(translated_prefix): node = self.prefix_tree[ip_version][translated_prefix] else: node = { "prefix": translated_prefix, "data": {"confs": []}, } self.prefix_tree[ip_version].insert( translated_prefix, node ) node["data"]["confs"].append(conf_obj) raw_prefix_count += 1 except Exception: log.exception("Exception") log.info( "{} prefixes integrated in detection prefix tree in total".format( raw_prefix_count ) ) log.info("Finished building detection prefix tree.") log.info("Detection initiated, configured and running.") def handle_ongoing_hijacks(self, message: Dict) -> NoReturn: """ Handles ongoing hijacks from the database. """ log.debug("{} ongoing hijack events".format(len(message.payload))) message.ack() for update in message.payload: self.handle_bgp_update(update) def handle_bgp_update(self, message: Dict) -> NoReturn: """ Callback function that runs the main logic of detecting hijacks for every bgp update. """ # log.debug('{}'.format(message)) if isinstance(message, dict): monitor_event = message else: message.ack() monitor_event = message.payload monitor_event["path"] = monitor_event["as_path"] monitor_event["timestamp"] = datetime( *map(int, re.findall(r"\d+", monitor_event["timestamp"])) ).timestamp() raw = monitor_event.copy() # mark the initial redis hijack key since it may change upon # outdated checks if "hij_key" in monitor_event: monitor_event["initial_redis_hijack_key"] = redis_key( monitor_event["prefix"], monitor_event["hijack_as"], monitor_event["hij_type"], ) is_hijack = False if monitor_event["type"] == "A": monitor_event["path"] = Detection.Worker.__clean_as_path( monitor_event["path"] ) ip_version = get_ip_version(monitor_event["prefix"]) if monitor_event["prefix"] in self.prefix_tree[ip_version]: prefix_node = self.prefix_tree[ip_version][monitor_event["prefix"]] monitor_event["matched_prefix"] = prefix_node["prefix"] try: path_hijacker = -1 pol_hijacker = -1 hij_dimensions = [ "-", "-", "-", "-", ] # prefix, path, dplane, policy hij_dimension_index = 0 for func_dim in self.__hijack_dimension_checker_gen(): if hij_dimension_index == 0: # prefix dimension for func_pref in func_dim(): hij_dimensions[hij_dimension_index] = func_pref( monitor_event, prefix_node ) if hij_dimensions[hij_dimension_index] != "-": break elif hij_dimension_index == 1: # path type dimension for func_path in func_dim(len(monitor_event["path"])): ( path_hijacker, hij_dimensions[hij_dimension_index], ) = func_path(monitor_event, prefix_node) if hij_dimensions[hij_dimension_index] != "-": break elif hij_dimension_index == 2: # data plane dimension for func_dplane in func_dim(): hij_dimensions[hij_dimension_index] = func_dplane( monitor_event, prefix_node ) if hij_dimensions[hij_dimension_index] != "-": break elif hij_dimension_index == 3: # policy dimension for func_pol in func_dim(len(monitor_event["path"])): ( pol_hijacker, hij_dimensions[hij_dimension_index], ) = func_pol(monitor_event, prefix_node) if hij_dimensions[hij_dimension_index] != "-": break hij_dimension_index += 1 # check if dimension combination in hijack combinations # and commit hijack if hij_dimensions in HIJACK_DIM_COMBINATIONS: is_hijack = True # show pol hijacker only if the path hijacker is uncertain hijacker = path_hijacker if path_hijacker == -1 and pol_hijacker != -1: hijacker = pol_hijacker self.commit_hijack(monitor_event, hijacker, hij_dimensions) except Exception: log.exception("exception") outdated_hijack = None if not is_hijack and "hij_key" in monitor_event: try: # outdated hijack, benign from now on redis_hijack_key = redis_key( monitor_event["prefix"], monitor_event["hijack_as"], monitor_event["hij_type"], ) outdated_hijack = self.redis.get(redis_hijack_key) purge_redis_eph_pers_keys( self.redis, redis_hijack_key, monitor_event["hij_key"] ) # mark in DB only if it is the first time this hijack was purged (pre-existent in redis) if outdated_hijack: self.mark_outdated( monitor_event["hij_key"], redis_hijack_key ) except Exception: log.exception("exception") elif ( is_hijack and "hij_key" in monitor_event and monitor_event["initial_redis_hijack_key"] != monitor_event["final_redis_hijack_key"] ): try: outdated_hijack = self.redis.get( monitor_event["initial_redis_hijack_key"] ) # outdated hijack, but still a hijack; need key change purge_redis_eph_pers_keys( self.redis, monitor_event["initial_redis_hijack_key"], monitor_event["hij_key"], ) # mark in DB only if it is the first time this hijack was purged (pre-existsent in redis) if outdated_hijack: self.mark_outdated( monitor_event["hij_key"], monitor_event["initial_redis_hijack_key"], ) except Exception: log.exception("exception") elif not is_hijack: self.gen_implicit_withdrawal(monitor_event) self.mark_handled(raw) if outdated_hijack: try: outdated_hijack = json.loads(outdated_hijack) outdated_hijack["end_tag"] = "outdated" mail_log.info( "{}".format( json.dumps( hijack_log_field_formatter(outdated_hijack), indent=4, ) ), extra={ "community_annotation": outdated_hijack.get( "community_annotation", "NA" ) }, ) hij_log.info( "{}".format( json.dumps(hijack_log_field_formatter(outdated_hijack)) ), extra={ "community_annotation": outdated_hijack.get( "community_annotation", "NA" ) }, ) except Exception: log.exception("exception") elif monitor_event["type"] == "W": self.producer.publish( { "prefix": monitor_event["prefix"], "peer_asn": monitor_event["peer_asn"], "timestamp": monitor_event["timestamp"], "key": monitor_event["key"], }, exchange=self.update_exchange, routing_key="withdraw", priority=0, serializer="ujson", ) @staticmethod def __remove_prepending(seq: List[int]) -> Tuple[List[int], bool]: """ Static method to remove prepending ASs from AS path. """ last_add = None new_seq = [] for x in seq: if last_add != x: last_add = x new_seq.append(x) is_loopy = False if len(set(seq)) != len(new_seq): is_loopy = True # raise Exception('Routing Loop: {}'.format(seq)) return (new_seq, is_loopy) @staticmethod def __clean_loops(seq: List[int]) -> List[int]: """ Static method that remove loops from AS path. """ # use inverse direction to clean loops in the path of the traffic seq_inv = seq[::-1] new_seq_inv = [] for x in seq_inv: if x not in new_seq_inv: new_seq_inv.append(x) else: x_index = new_seq_inv.index(x) new_seq_inv = new_seq_inv[: x_index + 1] return new_seq_inv[::-1] @staticmethod def __clean_as_path(path: List[int]) -> List[int]: """ Static wrapper method for loop and prepending removal. """ (clean_as_path, is_loopy) = Detection.Worker.__remove_prepending(path) if is_loopy: clean_as_path = Detection.Worker.__clean_loops(clean_as_path) return clean_as_path def __hijack_dimension_checker_gen(self) -> Callable: """ Generator that returns hijack dimension checking functions. """ yield self.__hijack_prefix_checker_gen yield self.__hijack_path_checker_gen yield self.__hijack_dplane_checker_gen yield self.__hijack_pol_checker_gen def __hijack_prefix_checker_gen(self) -> Callable: """ Generator that returns prefix dimension detection functions. """ yield self.detect_prefix_squatting_hijack yield self.detect_prefix_subprefix_hijack def __hijack_path_checker_gen(self, path_len: int) -> Callable: """ Generator that returns path dimension detection functions. """ if path_len > 0: yield self.detect_path_type_0_hijack if path_len > 1: yield self.detect_path_type_1_hijack if path_len > 2: yield self.detect_path_type_N_hijack yield self.detect_path_type_U_hijack def __hijack_dplane_checker_gen(self) -> Callable: """ Generator that returns data plane dimension detection functions. """ yield self.detect_dplane_blackholing_hijack yield self.detect_dplane_imposture_hijack yield self.detect_dplane_mitm_hijack def __hijack_pol_checker_gen(self, path_len: int) -> Callable: """ Generator that returns policy dimension detection functions. """ if path_len > 3: yield self.detect_pol_leak_hijack yield self.detect_pol_other_hijack @exception_handler(log) def detect_prefix_squatting_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> str: """ Squatting hijack detection. """ for item in prefix_node["data"]["confs"]: # check if there are origin_asns defined (even wildcards) if item["origin_asns"]: return "-" return "Q" @exception_handler(log) def detect_prefix_subprefix_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> str: """ Subprefix or exact prefix hijack detection. """ mon_prefix = ipaddress.ip_network(monitor_event["prefix"]) if ( ipaddress.ip_network(prefix_node["prefix"]).prefixlen < mon_prefix.prefixlen ): return "S" return "E" @exception_handler(log) def detect_path_type_0_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: """ Origin hijack detection. """ origin_asn = monitor_event["path"][-1] for item in prefix_node["data"]["confs"]: if origin_asn in item["origin_asns"] or item["origin_asns"] == [-1]: return (-1, "-") return (origin_asn, "0") @exception_handler(log) def detect_path_type_1_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: """ Type-1 hijack detection. """ origin_asn = monitor_event["path"][-1] first_neighbor_asn = monitor_event["path"][-2] for item in prefix_node["data"]["confs"]: # [] or [-1] neighbors means "allow everything" if ( origin_asn in item["origin_asns"] or item["origin_asns"] == [-1] ) and ( (not item["neighbors"]) or item["neighbors"] == [-1] or first_neighbor_asn in item["neighbors"] ): return (-1, "-") return (first_neighbor_asn, "1") @exception_handler(log) def detect_path_type_N_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: # Placeholder for type-N detection (not supported) return (-1, "-") @exception_handler(log) def detect_path_type_U_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: # Placeholder for type-U detection (not supported) return (-1, "-") @exception_handler(log) def detect_dplane_blackholing_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> str: # Placeholder for blackholing detection (not supported) return "-" @exception_handler(log) def detect_dplane_imposture_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> str: # Placeholder for imposture detection (not supported) return "-" @exception_handler(log) def detect_dplane_mitm_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> str: # Placeholder for mitm detection (not supported) return "-" @exception_handler(log) def detect_pol_leak_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: """ Route leak hijack detection """ for item in prefix_node["data"]["confs"]: if "no-export" in item["policies"]: return (monitor_event["path"][-2], "L") return (-1, "-") @exception_handler(log) def detect_pol_other_hijack( self, monitor_event: Dict, prefix_node: Dict, *args, **kwargs ) -> Tuple[int, str]: # Placeholder for policy violation detection (not supported) return (-1, "-") def commit_hijack( self, monitor_event: Dict, hijacker: int, hij_dimensions: List[str] ) -> NoReturn: """ Commit new or update an existing hijack to the database. It uses redis server to store ongoing hijacks information to not stress the db. """ hij_type = "|".join(hij_dimensions) redis_hijack_key = redis_key(monitor_event["prefix"], hijacker, hij_type) if "hij_key" in monitor_event: monitor_event["final_redis_hijack_key"] = redis_hijack_key hijack_value = { "prefix": monitor_event["prefix"], "hijack_as": hijacker, "type": hij_type, "time_started": monitor_event["timestamp"], "time_last": monitor_event["timestamp"], "peers_seen": {monitor_event["peer_asn"]}, "monitor_keys": {monitor_event["key"]}, "configured_prefix": monitor_event["matched_prefix"], "timestamp_of_config": self.timestamp, "end_tag": None, "outdated_parent": None, "rpki_status": "NA", } if ( RPKI_VALIDATOR_ENABLED == "true" and self.rtrmanager and monitor_event["path"] ): try: asn = monitor_event["path"][-1] if "/" in monitor_event["prefix"]: network, netmask = monitor_event["prefix"].split("/") # /32 or /128 else: ip_version = get_ip_version(monitor_event["prefix"]) network = monitor_event["prefix"] netmask = 32 if ip_version == "v6": netmask = 128 redis_rpki_asn_prefix_key = "rpki_as{}_p{}".format( asn, monitor_event["prefix"] ) redis_rpki_status = self.redis.get(redis_rpki_asn_prefix_key) if not redis_rpki_status: rpki_status = get_rpki_val_result( self.rtrmanager, asn, network, int(netmask) ) else: rpki_status = redis_rpki_status.decode() hijack_value["rpki_status"] = rpki_status # the default refresh interval for the RPKI RTR manager is 3600 seconds self.redis.set(redis_rpki_asn_prefix_key, rpki_status, ex=3600) except Exception: log.exception("exception") if ( "hij_key" in monitor_event and monitor_event["initial_redis_hijack_key"] != monitor_event["final_redis_hijack_key"] ): hijack_value["outdated_parent"] = monitor_event["hij_key"] # identify the number of infected ases hijack_value["asns_inf"] = set() if hij_dimensions[1] in {"0", "1"}: hijack_value["asns_inf"] = set( monitor_event["path"][: -(int(hij_dimensions[1]) + 1)] ) elif hij_dimensions[3] == "L": hijack_value["asns_inf"] = set(monitor_event["path"][:-2]) # assume the worst-case scenario of a type-2 hijack elif len(monitor_event["path"]) > 2: hijack_value["asns_inf"] = set(monitor_event["path"][:-3]) # make the following operation atomic using blpop (blocking) # first, make sure that the semaphore is initialized if self.redis.getset("{}token_active".format(redis_hijack_key), 1) != b"1": redis_pipeline = self.redis.pipeline() redis_pipeline.lpush("{}token".format(redis_hijack_key), "token") # lock, by extracting the token (other processes that access # it at the same time will be blocked) # attention: it is important that this command is batched in the # pipeline since the db may async delete # the token redis_pipeline.blpop("{}token".format(redis_hijack_key)) redis_pipeline.execute() else: # lock, by extracting the token (other processes that access it # at the same time will be blocked) token = self.redis.blpop("{}token".format(redis_hijack_key), timeout=60) # if timeout after 60 seconds, return without hijack alert # since this means that sth has been purged in the meanwhile (e.g., due to outdated hijack # in another instance; a detector cannot be stuck for a whole minute in a single hijack BGP update) if not token: log.info( "Monitor event {} encountered redis token timeout and will be cleared as benign for hijack {}".format( str(monitor_event), redis_hijack_key ) ) return # proceed now that we have clearance redis_pipeline = self.redis.pipeline() try: result = self.redis.get(redis_hijack_key) if result: result = json.loads(result) result["time_started"] = min( result["time_started"], hijack_value["time_started"] ) result["time_last"] = max( result["time_last"], hijack_value["time_last"] ) result["peers_seen"] = set(result["peers_seen"]) result["peers_seen"].update(hijack_value["peers_seen"]) result["asns_inf"] = set(result["asns_inf"]) result["asns_inf"].update(hijack_value["asns_inf"]) # no update since db already knows! result["monitor_keys"] = hijack_value["monitor_keys"] self.comm_annotate_hijack(monitor_event, result) result["outdated_parent"] = hijack_value["outdated_parent"] result["bgpupdate_keys"] = set(result["bgpupdate_keys"]) result["bgpupdate_keys"].add(monitor_event["key"]) result["rpki_status"] = hijack_value["rpki_status"] else: hijack_value["time_detected"] = time.time() hijack_value["key"] = get_hash( [ monitor_event["prefix"], hijacker, hij_type, "{0:.6f}".format(hijack_value["time_detected"]), ] ) hijack_value["bgpupdate_keys"] = {monitor_event["key"]} redis_pipeline.sadd("persistent-keys", hijack_value["key"]) result = hijack_value self.comm_annotate_hijack(monitor_event, result) mail_log.info( "{}".format( json.dumps(hijack_log_field_formatter(result), indent=4) ), extra={ "community_annotation": result.get( "community_annotation", "NA" ) }, ) redis_pipeline.set(redis_hijack_key, json.dumps(result)) # store the origin, neighbor combination for this hijack BGP update origin = None neighbor = None if monitor_event["path"]: origin = monitor_event["path"][-1] if len(monitor_event["path"]) > 1: neighbor = monitor_event["path"][-2] redis_pipeline.sadd( "hij_orig_neighb_{}".format(redis_hijack_key), "{}_{}".format(origin, neighbor), ) # store the prefix and peer ASN for this hijack BGP update redis_pipeline.sadd( "prefix_{}_peer_{}_hijacks".format( monitor_event["prefix"], monitor_event["peer_asn"] ), redis_hijack_key, ) redis_pipeline.sadd( "hijack_{}_prefixes_peers".format(redis_hijack_key), "{}_{}".format(monitor_event["prefix"], monitor_event["peer_asn"]), ) except Exception: log.exception("exception") finally: # execute whatever has been accumulated in redis till now redis_pipeline.execute() # publish hijack self.publish_hijack_fun(result, redis_hijack_key) hij_log.info( "{}".format(json.dumps(hijack_log_field_formatter(result))), extra={ "community_annotation": result.get("community_annotation", "NA") }, ) # unlock, by pushing back the token (at most one other process # waiting will be unlocked) redis_pipeline = self.redis.pipeline() redis_pipeline.set("{}token_active".format(redis_hijack_key), 1) redis_pipeline.lpush("{}token".format(redis_hijack_key), "token") redis_pipeline.execute() def mark_handled(self, monitor_event: Dict) -> NoReturn: """ Marks a bgp update as handled on the database. """ # log.debug('{}'.format(monitor_event['key'])) self.producer.publish( monitor_event["key"], exchange=self.handled_exchange, routing_key="update", priority=1, serializer="ujson", ) def mark_outdated(self, hij_key: str, redis_hij_key: str) -> NoReturn: """ Marks a hijack as outdated on the database. """ # log.debug('{}'.format(hij_key)) msg = {"persistent_hijack_key": hij_key, "redis_hijack_key": redis_hij_key} self.producer.publish( msg, exchange=self.hijack_exchange, routing_key="outdate", priority=1, serializer="ujson", ) def publish_hijack_result_production(self, result, redis_hijack_key): self.producer.publish( result, exchange=self.hijack_hashing, routing_key=redis_hijack_key, priority=0, serializer="ujson", ) def publish_hijack_result_test(self, result, redis_hijack_key): self.producer.publish( result, exchange=self.hijack_exchange, routing_key="update", priority=0, serializer="ujson", ) self.producer.publish( result, exchange=self.hijack_hashing, routing_key=redis_hijack_key, priority=0, serializer="ujson", ) def gen_implicit_withdrawal(self, monitor_event: Dict) -> NoReturn: """ Checks if a benign BGP update should trigger an implicit withdrawal """ # log.debug('{}'.format(monitor_event['key'])) prefix = monitor_event["prefix"] peer_asn = monitor_event["peer_asn"] if self.redis.exists("prefix_{}_peer_{}_hijacks".format(prefix, peer_asn)): # generate implicit withdrawal withdraw_msg = { "service": "implicit-withdrawal", "type": "W", "prefix": prefix, "path": [], "orig_path": {"triggering_bgp_update": monitor_event}, "communities": [], "timestamp": monitor_event["timestamp"] + 1, "peer_asn": peer_asn, } key_generator(withdraw_msg) self.producer.publish( withdraw_msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) def comm_annotate_hijack(self, monitor_event: Dict, hijack: Dict) -> NoReturn: """ Annotates a hijack based on community checks (modifies "community_annotation" field in-place) """ try: if hijack.get("community_annotation", "NA") in [None, "", "NA"]: hijack["community_annotation"] = "NA" bgp_update_communities = set() for comm_as_value in monitor_event["communities"]: community = "{}:{}".format(comm_as_value[0], comm_as_value[1]) bgp_update_communities.add(community) ip_version = get_ip_version(monitor_event["prefix"]) if monitor_event["prefix"] in self.prefix_tree[ip_version]: prefix_node = self.prefix_tree[ip_version][monitor_event["prefix"]] for item in prefix_node["data"]["confs"]: annotations = [] for annotation_element in item.get("community_annotations", []): for annotation in annotation_element: annotations.append(annotation) for annotation_element in item.get("community_annotations", []): for annotation in annotation_element: for community_rule in annotation_element[annotation]: in_communities = set(community_rule.get("in", [])) out_communities = set(community_rule.get("out", [])) if ( in_communities <= bgp_update_communities and out_communities.isdisjoint( bgp_update_communities ) ): if hijack["community_annotation"] == "NA": hijack["community_annotation"] = annotation elif annotations.index( annotation ) < annotations.index( hijack["community_annotation"] ): hijack["community_annotation"] = annotation except Exception: log.exception("exception")
def test(self): """ Loads a test file that includes crafted bgp updates as input and expected messages as output. """ RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest") RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest") RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq") RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672) RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format( RABBITMQ_USER, RABBITMQ_PASS, RABBITMQ_HOST, RABBITMQ_PORT ) RPKI_VALIDATOR_HOST = os.getenv("RPKI_VALIDATOR_HOST", "routinator") RPKI_VALIDATOR_PORT = os.getenv("RPKI_VALIDATOR_PORT", 3323) # check RPKI RTR manager connectivity while True: try: rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT) rtrmanager.start() print( "Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) rtrmanager.stop() break except Exception: print( "Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) print("Retrying in 30 seconds...") time.sleep(30) # exchanges self.update_exchange = Exchange( "bgp-update", type="direct", durable=False, delivery_mode=1 ) self.hijack_exchange = Exchange( "hijack-update", type="direct", durable=False, delivery_mode=1 ) self.pg_amq_bridge = Exchange( "amq.direct", type="direct", durable=True, delivery_mode=1 ) # queues self.update_queue = Queue( "detection-testing", exchange=self.pg_amq_bridge, routing_key="update-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_queue = Queue( "hijack-testing", exchange=self.hijack_exchange, routing_key="update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_db_queue = Queue( "hijack-db-testing", exchange=self.pg_amq_bridge, routing_key="hijack-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) with Connection(RABBITMQ_URI) as connection: # print("Waiting for pg_amq exchange..") # Tester.waitExchange(self.pg_amq_bridge, connection.default_channel) # print("Waiting for hijack exchange..") # Tester.waitExchange(self.hijack_exchange, connection.default_channel) # print("Waiting for update exchange..") # Tester.waitExchange(self.update_exchange, connection.default_channel) # wait for dependencies data workers to start wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES) while True: try: r = requests.get( "http://{}:{}/config".format(CONFIGURATION_HOST, REST_PORT) ) result = r.json() assert len(result) > 0 break except Exception: print("exception") time.sleep(1) time.sleep(1) for testfile in os.listdir("testfiles/"): self.clear() self.curr_test = testfile self.messages = {} # load test with open("testfiles/{}".format(testfile), "r") as f: self.messages = json.load(f) send_len = len(self.messages) with nested( connection.Consumer( self.hijack_queue, callbacks=[self.validate_message], accept=["ujson"], ), connection.Consumer( self.update_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), connection.Consumer( self.hijack_db_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), ): send_cnt = 0 # send and validate all messages in the messages.json file while send_cnt < send_len: self.curr_idx = send_cnt self.send_next_message(connection) send_cnt += 1 # sleep until we receive all expected messages while self.curr_idx != send_cnt: time.sleep(0.1) try: connection.drain_events(timeout=10) except socket.timeout: # avoid infinite loop by timeout assert False, "Consumer timeout"
def __init__(self, connection: Connection, shared_memory_manager_dict: Dict) -> NoReturn: self.connection = connection self.shared_memory_manager_dict = shared_memory_manager_dict self.rtrmanager = None # wait for other needed data workers to start wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES) # EXCHANGES self.update_exchange = create_exchange("bgp-update", connection, declare=True) self.hijack_exchange = create_exchange("hijack-update", connection, declare=True) self.hijack_hashing = create_exchange("hijack-hashing", connection, "x-consistent-hash", declare=True) self.handled_exchange = create_exchange("handled-update", connection, declare=True) self.hijack_notification_exchange = create_exchange( "hijack-notification", connection, declare=True) self.command_exchange = create_exchange("command", connection, declare=True) # QUEUES self.update_queue = create_queue( SERVICE_NAME, exchange=self.update_exchange, routing_key="stored-update-with-prefix-node", priority=1, ) self.hijack_ongoing_queue = create_queue( SERVICE_NAME, exchange=self.hijack_exchange, routing_key="ongoing-with-prefix-node", priority=1, ) self.stop_queue = create_queue( "{}-{}".format(SERVICE_NAME, uuid()), exchange=self.command_exchange, routing_key="stop-{}".format(SERVICE_NAME), priority=1, ) setattr(self, "publish_hijack_fun", self.publish_hijack_result_production) if TEST_ENV == "true": setattr(self, "publish_hijack_fun", self.publish_hijack_result_test) self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT) ping_redis(self.redis) if RPKI_VALIDATOR_ENABLED == "true": from rtrlib import RTRManager while True: try: self.rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT) self.rtrmanager.start() log.info("Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) break except Exception: log.info( "Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) log.info("Retrying RTR connection in 30 seconds...") time.sleep(30) log.info("data worker initiated")
class DetectionDataWorker(ConsumerProducerMixin): """ RabbitMQ Consumer/Producer for the detection Service. """ def __init__(self, connection: Connection, shared_memory_manager_dict: Dict) -> NoReturn: self.connection = connection self.shared_memory_manager_dict = shared_memory_manager_dict self.rtrmanager = None # wait for other needed data workers to start wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES) # EXCHANGES self.update_exchange = create_exchange("bgp-update", connection, declare=True) self.hijack_exchange = create_exchange("hijack-update", connection, declare=True) self.hijack_hashing = create_exchange("hijack-hashing", connection, "x-consistent-hash", declare=True) self.handled_exchange = create_exchange("handled-update", connection, declare=True) self.hijack_notification_exchange = create_exchange( "hijack-notification", connection, declare=True) self.command_exchange = create_exchange("command", connection, declare=True) # QUEUES self.update_queue = create_queue( SERVICE_NAME, exchange=self.update_exchange, routing_key="stored-update-with-prefix-node", priority=1, ) self.hijack_ongoing_queue = create_queue( SERVICE_NAME, exchange=self.hijack_exchange, routing_key="ongoing-with-prefix-node", priority=1, ) self.stop_queue = create_queue( "{}-{}".format(SERVICE_NAME, uuid()), exchange=self.command_exchange, routing_key="stop-{}".format(SERVICE_NAME), priority=1, ) setattr(self, "publish_hijack_fun", self.publish_hijack_result_production) if TEST_ENV == "true": setattr(self, "publish_hijack_fun", self.publish_hijack_result_test) self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT) ping_redis(self.redis) if RPKI_VALIDATOR_ENABLED == "true": from rtrlib import RTRManager while True: try: self.rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT) self.rtrmanager.start() log.info("Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) break except Exception: log.info( "Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) log.info("Retrying RTR connection in 30 seconds...") time.sleep(30) log.info("data worker initiated") def get_consumers(self, Consumer: Consumer, channel: Connection) -> List[Consumer]: return [ Consumer( queues=[self.update_queue], on_message=self.handle_bgp_update, prefetch_count=100, accept=["ujson"], ), Consumer( queues=[self.hijack_ongoing_queue], on_message=self.handle_ongoing_hijacks, prefetch_count=10, accept=["ujson"], ), Consumer( queues=[self.stop_queue], on_message=self.stop_consumer_loop, prefetch_count=100, accept=["ujson"], ), ] def on_consume_ready(self, connection, channel, consumers, **kwargs): self.producer.publish( "", exchange=self.hijack_exchange, routing_key="ongoing-request", priority=1, serializer="ujson", ) def handle_ongoing_hijacks(self, message: Dict) -> NoReturn: """ Handles ongoing hijacks from the database. """ log.debug("{} ongoing hijack events".format(len(message.payload))) message.ack() for update in message.payload: self.handle_bgp_update(update) def handle_bgp_update(self, message: Dict) -> NoReturn: """ Callback function that runs the main logic of detecting hijacks for every bgp update. """ if isinstance(message, dict): monitor_event = message else: message.ack() monitor_event = message.payload monitor_event["path"] = monitor_event["as_path"] monitor_event["timestamp"] = datetime( *map(int, re.findall(r"\d+", monitor_event["timestamp"]))).timestamp() raw = monitor_event.copy() # mark the initial redis hijack key since it may change upon # outdated checks if "hij_key" in monitor_event: monitor_event["initial_redis_hijack_key"] = redis_key( monitor_event["prefix"], monitor_event["hijack_as"], monitor_event["hij_type"], ) is_hijack = False if monitor_event["type"] == "A": # save the original path as-is to preserve patterns (if needed) monitor_event["orig_path"] = monitor_event["path"][::] monitor_event["path"] = clean_as_path(monitor_event["path"]) if "prefix_node" in monitor_event: prefix_node = monitor_event["prefix_node"] monitor_event["matched_prefix"] = prefix_node["prefix"] final_hij_dimensions = [ "-", "-", "-", "-", ] # prefix, path, dplane, policy for prefix_node_conf in prefix_node["data"]["confs"]: try: path_hijacker = -1 pol_hijacker = -1 hij_dimensions = [ "-", "-", "-", "-", ] # prefix, path, dplane, policy hij_dimension_index = 0 for func_dim in self.__hijack_dimension_checker_gen(): if hij_dimension_index == 0: # prefix dimension for func_pref in func_dim(): hij_dimensions[ hij_dimension_index] = func_pref( monitor_event, prefix_node, prefix_node_conf) if hij_dimensions[ hij_dimension_index] != "-": break elif hij_dimension_index == 1: # path type dimension for func_path in func_dim( len(monitor_event["path"])): ( path_hijacker, hij_dimensions[hij_dimension_index], ) = func_path(monitor_event, prefix_node, prefix_node_conf) if hij_dimensions[ hij_dimension_index] != "-": break elif hij_dimension_index == 2: # data plane dimension for func_dplane in func_dim(): hij_dimensions[ hij_dimension_index] = func_dplane( monitor_event, prefix_node, prefix_node_conf) if hij_dimensions[ hij_dimension_index] != "-": break elif hij_dimension_index == 3: # policy dimension for func_pol in func_dim( len(monitor_event["path"])): ( pol_hijacker, hij_dimensions[hij_dimension_index], ) = func_pol(monitor_event, prefix_node, prefix_node_conf) if hij_dimensions[ hij_dimension_index] != "-": break hij_dimension_index += 1 # check if dimension combination in hijack combinations for this rule, # but do not commit hijack yet (record the last possible hijack issue) if hij_dimensions in HIJACK_DIM_COMBINATIONS: final_hij_dimensions = hij_dimensions[::] is_hijack = True # show pol hijacker only if the path hijacker is uncertain hijacker = path_hijacker if path_hijacker == -1 and pol_hijacker != -1: hijacker = pol_hijacker # benign rule matching beats hijack detection else: is_hijack = False break except Exception: log.exception("exception") if is_hijack: try: hij_dimensions = final_hij_dimensions self.commit_hijack(monitor_event, hijacker, hij_dimensions) except Exception: log.exception("exception") else: if "hij_key" not in monitor_event: log.error("unconfigured BGP update received '{}'".format( monitor_event)) else: is_hijack = False outdated_hijack = None if not is_hijack and "hij_key" in monitor_event: try: # outdated hijack, benign from now on redis_hijack_key = redis_key( monitor_event["prefix"], monitor_event["hijack_as"], monitor_event["hij_type"], ) outdated_hijack = self.redis.get(redis_hijack_key) purge_redis_eph_pers_keys(self.redis, redis_hijack_key, monitor_event["hij_key"]) # mark in DB only if it is the first time this hijack was purged (pre-existent in redis) if outdated_hijack: self.mark_outdated(monitor_event["hij_key"], redis_hijack_key) except Exception: log.exception("exception") elif (is_hijack and "hij_key" in monitor_event and monitor_event["initial_redis_hijack_key"] != monitor_event["final_redis_hijack_key"]): try: outdated_hijack = self.redis.get( monitor_event["initial_redis_hijack_key"]) # outdated hijack, but still a hijack; need key change purge_redis_eph_pers_keys( self.redis, monitor_event["initial_redis_hijack_key"], monitor_event["hij_key"], ) # mark in DB only if it is the first time this hijack was purged (pre-existsent in redis) if outdated_hijack: self.mark_outdated( monitor_event["hij_key"], monitor_event["initial_redis_hijack_key"], ) except Exception: log.exception("exception") elif not is_hijack: self.gen_implicit_withdrawal(monitor_event) self.mark_handled(raw) if outdated_hijack: try: outdated_hijack = classic_json.loads( outdated_hijack.decode("utf-8")) outdated_hijack["end_tag"] = "outdated" self.producer.publish( outdated_hijack, exchange=self.hijack_notification_exchange, routing_key="mail-log", retry=False, priority=1, serializer="ujson", ) self.producer.publish( outdated_hijack, exchange=self.hijack_notification_exchange, routing_key="hij-log", retry=False, priority=1, serializer="ujson", ) except Exception: log.exception("exception") elif monitor_event["type"] == "W": self.producer.publish( { "prefix": monitor_event["prefix"], "peer_asn": monitor_event["peer_asn"], "timestamp": monitor_event["timestamp"], "key": monitor_event["key"], }, exchange=self.update_exchange, routing_key="withdraw", priority=0, serializer="ujson", ) def __hijack_dimension_checker_gen(self) -> Callable: """ Generator that returns hijack dimension checking functions. """ yield self.__hijack_prefix_checker_gen yield self.__hijack_path_checker_gen yield self.__hijack_dplane_checker_gen yield self.__hijack_pol_checker_gen def __hijack_prefix_checker_gen(self) -> Callable: """ Generator that returns prefix dimension detection functions. """ yield self.detect_prefix_squatting_hijack yield self.detect_prefix_subprefix_hijack def __hijack_path_checker_gen(self, path_len: int) -> Callable: """ Generator that returns path dimension detection functions. """ if path_len > 0: yield self.detect_path_type_0_hijack if path_len > 1: yield self.detect_path_type_1_hijack yield self.detect_path_type_P_hijack if path_len > 2: yield self.detect_path_type_N_hijack yield self.detect_path_type_U_hijack def __hijack_dplane_checker_gen(self) -> Callable: """ Generator that returns data plane dimension detection functions. """ yield self.detect_dplane_blackholing_hijack yield self.detect_dplane_imposture_hijack yield self.detect_dplane_mitm_hijack def __hijack_pol_checker_gen(self, path_len: int) -> Callable: """ Generator that returns policy dimension detection functions. """ if path_len > 3: yield self.detect_pol_leak_hijack yield self.detect_pol_other_hijack @exception_handler(log) def detect_prefix_squatting_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> str: """ Squatting hijack detection. """ # check if there are origin_asns defined (even wildcards) if prefix_node_conf["origin_asns"]: return "-" return "Q" @exception_handler(log) def detect_prefix_subprefix_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> str: """ Subprefix or exact prefix hijack detection. """ mon_prefix = ipaddress.ip_network(monitor_event["prefix"]) if ipaddress.ip_network( prefix_node["prefix"]).prefixlen < mon_prefix.prefixlen: return "S" return "E" @exception_handler(log) def detect_path_type_0_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: """ Origin hijack detection. """ origin_asn = monitor_event["path"][-1] if origin_asn in prefix_node_conf["origin_asns"] or prefix_node_conf[ "origin_asns"] == [-1]: return -1, "-" return origin_asn, "0" @exception_handler(log) def detect_path_type_1_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: """ Type-1 hijack detection. """ origin_asn = monitor_event["path"][-1] first_neighbor_asn = monitor_event["path"][-2] # [] or [-1] neighbors means "allow everything" if (origin_asn in prefix_node_conf["origin_asns"] or prefix_node_conf["origin_asns"] == [-1]) and ( (not prefix_node_conf["neighbors"]) or prefix_node_conf["neighbors"] == [-1] or first_neighbor_asn in prefix_node_conf["neighbors"]): return -1, "-" return first_neighbor_asn, "1" @exception_handler(log) def detect_path_type_P_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: """ Type-P hijack detection. In case there is a type-P hijack (i.e. no pattern matches an incoming BGP update), it returns a tuple with the potential hijacker AS plus the hijack type (P). The potential hijacker is the first AS that differs in the most specific (best matching) pattern, starting from the origin AS. """ if "orig_path" not in monitor_event: return -1, "-" orig_path = monitor_event["orig_path"] pattern_matched = False pattern_hijacker = -1 best_match_length = 0 if len(prefix_node_conf["prepend_seq"]) > 0: for conf_seq in prefix_node_conf["prepend_seq"]: if len(orig_path) >= len(conf_seq) + 1: # isolate the monitor event pattern that # should be matched to the configured pattern # (excluding the origin which is the very first hop # of the incoming AS-path) monitor_event_seq = orig_path[len(orig_path) - len(conf_seq) - 1:-1] if monitor_event_seq == conf_seq: # patterns match, break (no hijack of type P) pattern_matched = True break else: # calculate the actual differences in the current pattern; # this creates a list of elements with values 0 on matched # elements and !0 otherwise pattern_diffs = [ observed_as - conf_as for observed_as, conf_as in zip( monitor_event_seq, conf_seq) ] this_best_match_length = 0 # after reversing the pattern difference sequence (i.e., start with # origin), find the greatest length of consecutive 0s (i.e., non-differences/matches) for diff in pattern_diffs[::-1]: if diff == 0: # match found, continue increasing the best match length this_best_match_length += 1 else: # first difference, break here and register the length break # update the best matching length for all patterns found till now best_match_length = max(best_match_length, this_best_match_length) # no hijack (either pattern matching achieved or no configured pattern provided) if len(prefix_node_conf["prepend_seq"]) == 0 or pattern_matched: return -1, "-" # the hijacker is the first AS that breaks the most specific (best matching) pattern pattern_hijacker = orig_path[len(orig_path) - best_match_length - 2] return pattern_hijacker, "P" @exception_handler(log) def detect_path_type_N_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: # Placeholder for type-N detection (not supported) return -1, "-" @exception_handler(log) def detect_path_type_U_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: # Placeholder for type-U detection (not supported) return -1, "-" @exception_handler(log) def detect_dplane_blackholing_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> str: # Placeholder for blackholing detection (not supported) return "-" @exception_handler(log) def detect_dplane_imposture_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> str: # Placeholder for imposture detection (not supported) return "-" @exception_handler(log) def detect_dplane_mitm_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> str: # Placeholder for mitm detection (not supported) return "-" @exception_handler(log) def detect_pol_leak_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: """ Route leak hijack detection """ if "no-export" in prefix_node_conf["policies"]: return monitor_event["path"][-2], "L" return -1, "-" @exception_handler(log) def detect_pol_other_hijack(self, monitor_event: Dict, prefix_node: Dict, prefix_node_conf: Dict, *args, **kwargs) -> Tuple[int, str]: # Placeholder for policy violation detection (not supported) return -1, "-" def commit_hijack(self, monitor_event: Dict, hijacker: int, hij_dimensions: List[str]) -> NoReturn: """ Commit new or update an existing hijack to the database. It uses redis server to store ongoing hijacks information to not stress the db. """ hij_type = "|".join(hij_dimensions) redis_hijack_key = redis_key(monitor_event["prefix"], hijacker, hij_type) if "hij_key" in monitor_event: monitor_event["final_redis_hijack_key"] = redis_hijack_key hijack_value = { "prefix": monitor_event["prefix"], "hijack_as": hijacker, "type": hij_type, "time_started": monitor_event["timestamp"], "time_last": monitor_event["timestamp"], "peers_seen": {monitor_event["peer_asn"]}, "monitor_keys": {monitor_event["key"]}, "configured_prefix": monitor_event["matched_prefix"], "timestamp_of_config": monitor_event["prefix_node"]["timestamp"], "end_tag": None, "outdated_parent": None, "rpki_status": "NA", } if (RPKI_VALIDATOR_ENABLED == "true" and self.rtrmanager and monitor_event["path"]): try: asn = monitor_event["path"][-1] if "/" in monitor_event["prefix"]: network, netmask = monitor_event["prefix"].split("/") # /32 or /128 else: ip_version = get_ip_version(monitor_event["prefix"]) network = monitor_event["prefix"] netmask = 32 if ip_version == "v6": netmask = 128 redis_rpki_asn_prefix_key = "rpki_as{}_p{}".format( asn, monitor_event["prefix"]) redis_rpki_status = self.redis.get(redis_rpki_asn_prefix_key) if not redis_rpki_status: rpki_status = get_rpki_val_result(self.rtrmanager, asn, network, int(netmask)) else: rpki_status = redis_rpki_status.decode("utf-8") hijack_value["rpki_status"] = rpki_status # the default refresh interval for the RPKI RTR manager is 3600 seconds self.redis.set(redis_rpki_asn_prefix_key, rpki_status, ex=3600) except Exception: log.exception("exception") if ("hij_key" in monitor_event and monitor_event["initial_redis_hijack_key"] != monitor_event["final_redis_hijack_key"]): hijack_value["outdated_parent"] = monitor_event["hij_key"] # identify the number of infected ases hijack_value["asns_inf"] = set() if hij_dimensions[1] in {"0", "1"}: hijack_value["asns_inf"] = set( monitor_event["path"][:-(int(hij_dimensions[1]) + 1)]) elif hij_dimensions[3] == "L": hijack_value["asns_inf"] = set(monitor_event["path"][:-2]) # assume the worst-case scenario of a type-2 hijack elif len(monitor_event["path"]) > 2: hijack_value["asns_inf"] = set(monitor_event["path"][:-3]) # make the following operation atomic using blpop (blocking) # first, make sure that the semaphore is initialized if self.redis.getset("{}token_active".format(redis_hijack_key), 1) != b"1": redis_pipeline = self.redis.pipeline() redis_pipeline.lpush("{}token".format(redis_hijack_key), "token") # lock, by extracting the token (other processes that access # it at the same time will be blocked) # attention: it is important that this command is batched in the # pipeline since the db may async delete # the token redis_pipeline.blpop("{}token".format(redis_hijack_key)) redis_pipeline.execute() else: # lock, by extracting the token (other processes that access it # at the same time will be blocked) token = self.redis.blpop("{}token".format(redis_hijack_key), timeout=60) # if timeout after 60 seconds, return without hijack alert # since this means that sth has been purged in the meanwhile (e.g., due to outdated hijack # in another instance; a detector cannot be stuck for a whole minute in a single hijack BGP update) if not token: log.info( "Monitor event {} encountered redis token timeout and will be cleared as benign for hijack {}" .format(str(monitor_event), redis_hijack_key)) return # proceed now that we have clearance redis_pipeline = self.redis.pipeline() try: result = self.redis.get(redis_hijack_key) if result: result = classic_json.loads(result.decode("utf-8")) result["time_started"] = min(result["time_started"], hijack_value["time_started"]) result["time_last"] = max(result["time_last"], hijack_value["time_last"]) result["peers_seen"] = set(result["peers_seen"]) result["peers_seen"].update(hijack_value["peers_seen"]) result["asns_inf"] = set(result["asns_inf"]) result["asns_inf"].update(hijack_value["asns_inf"]) # no update since db already knows! result["monitor_keys"] = hijack_value["monitor_keys"] self.comm_annotate_hijack(monitor_event, result) result["outdated_parent"] = hijack_value["outdated_parent"] result["bgpupdate_keys"] = set(result["bgpupdate_keys"]) result["bgpupdate_keys"].add(monitor_event["key"]) result["rpki_status"] = hijack_value["rpki_status"] else: hijack_value["time_detected"] = time.time() hijack_value["key"] = get_hash([ monitor_event["prefix"], hijacker, hij_type, "{0:.6f}".format(hijack_value["time_detected"]), ]) hijack_value["bgpupdate_keys"] = {monitor_event["key"]} redis_pipeline.sadd("persistent-keys", hijack_value["key"]) result = hijack_value self.comm_annotate_hijack(monitor_event, result) self.producer.publish( result, exchange=self.hijack_notification_exchange, routing_key="mail-log", retry=False, priority=1, serializer="ujson", ) redis_pipeline.set(redis_hijack_key, json.dumps(result)) # store the origin, neighbor combination for this hijack BGP update origin = None neighbor = None if monitor_event["path"]: origin = monitor_event["path"][-1] if len(monitor_event["path"]) > 1: neighbor = monitor_event["path"][-2] redis_pipeline.sadd( "hij_orig_neighb_{}".format(redis_hijack_key), "{}_{}".format(origin, neighbor), ) # store the prefix and peer ASN for this hijack BGP update redis_pipeline.sadd( "prefix_{}_peer_{}_hijacks".format(monitor_event["prefix"], monitor_event["peer_asn"]), redis_hijack_key, ) redis_pipeline.sadd( "hijack_{}_prefixes_peers".format(redis_hijack_key), "{}_{}".format(monitor_event["prefix"], monitor_event["peer_asn"]), ) except Exception: log.exception("exception") finally: # execute whatever has been accumulated in redis till now redis_pipeline.execute() # publish hijack self.publish_hijack_fun(result, redis_hijack_key) self.producer.publish( result, exchange=self.hijack_notification_exchange, routing_key="hij-log", retry=False, priority=1, serializer="ujson", ) # unlock, by pushing back the token (at most one other process # waiting will be unlocked) redis_pipeline = self.redis.pipeline() redis_pipeline.set("{}token_active".format(redis_hijack_key), 1) redis_pipeline.lpush("{}token".format(redis_hijack_key), "token") redis_pipeline.execute() def mark_handled(self, monitor_event: Dict) -> NoReturn: """ Marks a bgp update as handled on the database. """ # log.debug('{}'.format(monitor_event['key'])) self.producer.publish( monitor_event["key"], exchange=self.handled_exchange, routing_key="update", priority=1, serializer="ujson", ) def mark_outdated(self, hij_key: str, redis_hij_key: str) -> NoReturn: """ Marks a hijack as outdated on the database. """ # log.debug('{}'.format(hij_key)) msg = { "persistent_hijack_key": hij_key, "redis_hijack_key": redis_hij_key } self.producer.publish( msg, exchange=self.hijack_exchange, routing_key="outdate", priority=1, serializer="ujson", ) def publish_hijack_result_production(self, result, redis_hijack_key): self.producer.publish( result, exchange=self.hijack_hashing, routing_key=redis_hijack_key, priority=0, serializer="ujson", ) def publish_hijack_result_test(self, result, redis_hijack_key): self.producer.publish( result, exchange=self.hijack_exchange, routing_key="update", priority=0, serializer="ujson", ) self.producer.publish( result, exchange=self.hijack_hashing, routing_key=redis_hijack_key, priority=0, serializer="ujson", ) def gen_implicit_withdrawal(self, monitor_event: Dict) -> NoReturn: """ Checks if a benign BGP update should trigger an implicit withdrawal """ # log.debug('{}'.format(monitor_event['key'])) prefix = monitor_event["prefix"] super_prefix = ipaddress.ip_network(prefix).supernet() peer_asn = monitor_event["peer_asn"] # if the the update's prefix matched exactly or is directly more specific than an originally hijacked prefix if self.redis.exists("prefix_{}_peer_{}_hijacks".format( prefix, peer_asn)) or self.redis.exists( "prefix_{}_peer_{}_hijacks".format(super_prefix, peer_asn)): # generate implicit withdrawal withdraw_msg = { "service": "implicit-withdrawal", "type": "W", "prefix": prefix, "path": [], "orig_path": { "triggering_bgp_update": monitor_event }, "communities": [], "timestamp": monitor_event["timestamp"] + 1, "peer_asn": peer_asn, } if not self.redis.exists("prefix_{}_peer_{}_hijacks".format( prefix, peer_asn)) and self.redis.exists( "prefix_{}_peer_{}_hijacks".format( super_prefix, peer_asn)): withdraw_msg["prefix"] = str(super_prefix) key_generator(withdraw_msg) self.producer.publish( withdraw_msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) def comm_annotate_hijack(self, monitor_event: Dict, hijack: Dict) -> NoReturn: """ Annotates a hijack based on community checks (modifies "community_annotation" field in-place) """ try: if hijack.get("community_annotation", "NA") in [None, "", "NA"]: hijack["community_annotation"] = "NA" bgp_update_communities = set() for comm_as_value in monitor_event["communities"]: community = "{}:{}".format(comm_as_value[0], comm_as_value[1]) bgp_update_communities.add(community) if "prefix_node" in monitor_event: prefix_node = monitor_event["prefix_node"] for item in prefix_node["data"]["confs"]: annotations = [] for annotation_element in item.get("community_annotations", []): for annotation in annotation_element: annotations.append(annotation) for annotation_element in item.get("community_annotations", []): for annotation in annotation_element: for community_rule in annotation_element[ annotation]: in_communities = set( community_rule.get("in", [])) out_communities = set( community_rule.get("out", [])) if (in_communities <= bgp_update_communities and out_communities.isdisjoint( bgp_update_communities)): if hijack["community_annotation"] == "NA": hijack[ "community_annotation"] = annotation elif annotations.index( annotation) < annotations.index( hijack["community_annotation"] ): hijack[ "community_annotation"] = annotation else: log.error("unconfigured BGP update received '{}'".format( monitor_event)) except Exception: log.exception("exception") def stop_consumer_loop(self, message: Dict) -> NoReturn: """ Callback function that stop the current consumer loop """ message.ack() self.should_stop = True
class BGPTest(object): def __init__(self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282"): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord() def _start_rtr_manager(self): self.mgr.start() while not self.mgr.is_synced(): sleep(0.2) if status.error: print("Connection error") exit() def start_stream(self, start_time=None, end_time=0, route_collector=""): """ Starts the """ if route_collector == "": route_collector = self.rc self.stream.add_filter('collector', route_collector) self.stream.add_filter('record-type', 'ribs') if (start_time is None) or not isinstance(start_time, datetime): start_time = datetime.utcnow() if isinstance(end_time, datetime): end = int(end_time.strftime("%s")) else: end = 0 start = int(datetime.utcnow().strftime("%s")) print(start) self.stream.add_interval_filter(start, 0) # print('Start stream with', start_time, end_time) self.stream.start() def get_records(self): while(self.stream.get_next_record(self.rec)): # Print the self.record information only if it is not a valid self.record if self.rec.status != "valid": pass else: elem = self.rec.get_next_elem() while(elem): # Print self.record and elem information print(self.rec.project, self.rec.collector, self.rec.type, self.rec.time, self.rec.status) print(elem.type, elem.peer_address, elem.peer_asn, elem.fields) # prefix = elem.fields["prefix"].split('/') # result = mgr.validate((int) elem.fields["as-path"].split(" ")[-1], prefix[0], prefix[1]) elem = self.rec.get_next_elem() print("done.")
class BGPCounter(object): """docstring for BGPDataAggregator""" def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.counter = Counter() start_timestamp = self.get_push_timestamp(datetime.now(timezone.utc)) # self.start_collecting(start_timestamp, int(datetime.now(timezone.utc).strftime("%s"))) self.start_collecting(start_timestamp, start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def get_push_timestamp(self, start_time): hours = [0, 8, 16, 24] # get closest push for i in range(0, len(hours)): if hours[i + 1] > start_time.hour: break start_time = start_time.replace(hour=hours[i], minute=0, second=0, microsecond=0) return int(start_time.timestamp()) def start_collecting(self, start_timestamp, end_timestamp=0): self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) self.stream.start() rec = BGPRecord() act_dump = "unknown" while (self.stream.get_next_record(rec)): self.i += 1 if self.i % 10000 == 0: print(self.i) if rec.status == "valid": if (act_dump != rec.dump_position): act_dump = rec.dump_position print('Dump Position:', rec.dump_position) elem = rec.get_next_elem() while (elem): self.counter.update(elem.type) elem = rec.get_next_elem() print(self.counter)
from rtrlib import RTRManager, PfxvState def callback(pfx_record, data): print(pfx_record) mgr = RTRManager('rpki-validator.realmv6.org', 8282) mgr.start() result = mgr.validate(55803, '223.25.52.0', 23) mgr.for_each_ipv4_record(callback, None) mgr.stop() print('\n--', result)
class BGPLocalAggregator(object): """docstring for BGPDataAggregator""" def __init__(self, filters={'collector': ['rrc00']}, rpki_validator="rpki-validator.realmv6.org:8282", db="metasnap.db"): self.stream = BGPStream( '/Users/mx/Projects/Uni/bgp-group/bgp_dump.txt') self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) # for filter_type, filter_array in filters.items(): # for filter_value in filter_array: # self.stream.add_filter(filter_type, filter_value) for collector in filters['collector']: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = defaultdict(int) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) # self.db = DBConnector(db, read_only=False) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp, start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def start_collecting(self, start_timestamp, end_timestamp=0): # self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) self.stream.start() rec = self.stream.get_next_record() while (rec): if rec.status == "valid": # self.i += 1 # if self.i % 1000000 == 0: # print(self.i // 1000000, end=' ') elem = rec.get_next_elem() while (elem): origin_asn = "" if elem.type is 'R' or elem.type is 'A': origin_asn = elem.fields['as-path'].split(' ')[-1] try: origin_asn = int(origin_asn) except ValueError: elem = rec.get_next_elem() continue prefix = elem.fields['prefix'] ip, mask_len = split_prefix(prefix) # Check if v4 or v6 is_v4 = check_ipv4(ip) validated = self.mgr.validate(origin_asn, ip, mask_len) old_elem = self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].get(prefix) if elem.type is 'R' or elem.type is 'A': self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)][prefix] = Route( origin_asn, rec.collector, prefix, is_v4, validated.state.value) if old_elem: if old_elem.type != validated.state.value: """Make use of the fact that: 0: valid in enum 1: unknown in enum 2: invalid in enum We designed the namedtuple the way to represent that. So valid is a pos 3 and so on. """ self.metadata_vp[rec.collector][elem.peer_asn][ 3 + old_elem.type] -= 1 self.metadata_vp[rec.collector][elem.peer_asn][ 3 + validated.state] += 1 else: if not self.metadata_vp[rec.collector].get( elem.peer_asn): """Init the metadata-entry if it not exists already""" self.metadata_vp[rec.collector][elem.peer_asn] = \ [elem.peer_asn, rec.collector, rec.time, 0, 0, 0] # Update the VantagePoint Metadate the same way like above. self.metadata_vp[rec.collector][elem.peer_asn][ 3 + validated.state.value] += 1 self.metadata_vp[rec.collector][ elem.peer_asn][2] = rec.time self.peers[rec.collector][elem.peer_asn] += 1 if is_v4: self.prefix4[rec.collector][prefix] += 1 else: self.prefix6[rec.collector][prefix] += 1 elif elem.type is 'W': if old_elem: # Reduce the number of IPv4/v6 Addresses for this prefix if is_v4: self.prefix4[rec.collector][prefix] -= 1 if self.prefix4[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) else: self.prefix6[rec.collector][prefix] -= 1 if self.prefix6[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) # Reduce number of prefixes belonging to this ASN self.peers[rec.collector][elem.peer_asn] -= 1 if self.peers[rec.collector][elem.peer_asn] == 0: del (self.prefix4[rec.collector][prefix]) # Update the metadata valid/unknown/invalid count self.metadata_vp[rec.collector][elem.peer_asn][ 3 + old_elem.type] -= 1 # Update the metadata timestamp self.metadata_vp[rec.collector][ elem.peer_asn][2] = rec.time # Remove the entry from the route_table self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].pop(prefix, None) else: ##!TODO: write log about that! pass elem = rec.get_next_elem() rec = self.stream.get_next_record()
class BGPDataAggregator(object): """docstring for BGPDataAggregator""" def __init__( self, filters={"collector": ["rrc00"]}, rpki_validator="rpki-validator.realmv6.org:8282", settings_file="../settings.json", ): self.stream = BGPStream() self.filters = filters self.route_table = dict() self.i = 0 self.metadata_vp = dict() self.metadata_rc = dict() self.peers = Counter() self.prefix4 = Counter() self.prefix6 = Counter() start_timestamp = get_push_timestamp(datetime.now(timezone.utc)) for filter_type, filter_array in filters.items(): for filter_value in filter_array: self.stream.add_filter(filter_type, filter_value) for collector in filters["collector"]: self.route_table[collector] = defaultdict(dict) self.metadata_vp[collector] = defaultdict(list) self.metadata_rc[collector] = RouteCollectorMeta(None, 0, 0, 0, 0) self.peers[collector] = defaultdict(int) self.prefix4[collector] = defaultdict(int) self.prefix6[collector] = defaultdict(int) settings = get_settings(settings_file) settings["db"]["password"] = os.environ["PGPASS"] self.db = DBConnector(settings["db"]) rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) self.mgr.start() self.start_collecting(start_timestamp) def __del__(self): if self.mgr.is_synced(): self.mgr.stop() def push_data(self, timestamp): print("UPDATE:", timestamp) self.db.update_vp_meta(self.metadata_vp) for rc in self.metadata_rc.keys(): self.metadata_rc[rc] = RouteCollectorMeta( rc, timestamp, len(self.peers[rc].keys()), len(self.prefix4[rc]), len(self.prefix6[rc]), ) self.db.update_rc_meta(self.metadata_rc) def start_collecting(self, start_timestamp, end_timestamp=0): self.stream.add_interval_filter(start_timestamp, end_timestamp) print("Start BGPStream:", start_timestamp, end_timestamp) next_timestamp = init_next_timestamp(start_timestamp, 5) print("Next Push to DB at:", next_timestamp) self.stream.start() rec = BGPRecord() while (self.stream.get_next_record(rec)): if rec.status == "valid": if rec.time >= next_timestamp: self.push_data(next_timestamp) next_timestamp += 300 elem = rec.get_next_elem() while elem: origin_asn = "" if elem.type is "R" or elem.type is "A": origin_asn = elem.fields["as-path"].split(" ")[-1] try: origin_asn = int(origin_asn) except ValueError: elem = rec.get_next_elem() continue prefix = elem.fields["prefix"] ip, mask_len = split_prefix(prefix) # Check if v4 or v6 is_v4 = check_ipv4(ip) validated = self.mgr.validate(origin_asn, ip, mask_len) old_elem = self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].get(prefix) if elem.type is "R" or elem.type is "A": self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)][prefix] = Route( origin_asn, rec.collector, prefix, is_v4, validated.state.value, ) if old_elem: if old_elem.type != validated.state.value: """Make use of the fact that: 0: valid in enum 1: unknown in enum 2: invalid in enum We designed the namedtuple the way to represent that. So valid is a pos 3 and so on. """ self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + old_elem.type] -= 1 self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address )][4 + validated.state.value] += 1 else: if not self.metadata_vp[rec.collector].get( (elem.peer_asn, elem.peer_address)): """Init the metadata-entry if it not exists already""" self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)] = [ elem.peer_asn, elem.peer_address, rec.collector, next_timestamp, 0, 0, 0, ] # Update the VantagePoint Metadate the same way like above. self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + validated.state.value] += 1 self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][3] = next_timestamp self.peers[rec.collector][elem.peer_asn] += 1 if is_v4: self.prefix4[rec.collector][prefix] += 1 else: self.prefix6[rec.collector][prefix] += 1 elif elem.type is "W": if old_elem: # Reduce the number of IPv4/v6 Addresses for this prefix if is_v4: self.prefix4[rec.collector][prefix] -= 1 if self.prefix4[rec.collector][prefix] == 0: del (self.prefix4[rec.collector][prefix]) else: self.prefix6[rec.collector][prefix] -= 1 if self.prefix6[rec.collector][prefix] == 0: del (self.prefix6[rec.collector][prefix]) # Reduce number of prefixes belonging to this ASN self.peers[rec.collector][elem.peer_asn] -= 1 if self.peers[rec.collector][elem.peer_asn] == 0: del (self.peers[rec.collector][elem.peer_asn]) # Update the metadata valid/unknown/invalid count self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][4 + old_elem.type] -= 1 # Update the metadata timestamp self.metadata_vp[rec.collector][( elem.peer_asn, elem.peer_address)][3] = next_timestamp # Remove the entry from the route_table self.route_table[rec.collector][( elem.peer_asn, elem.peer_address)].pop(prefix, None) else: # !!TODO: write log about that! pass elem = rec.get_next_elem()
def __init__(self, connection: Connection) -> NoReturn: self.connection = connection self.timestamp = -1 self.rules = None self.prefix_tree = None self.mon_num = 1 setattr(self, "publish_hijack_fun", self.publish_hijack_result_production) if TEST_ENV == "true": setattr(self, "publish_hijack_fun", self.publish_hijack_result_test) self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT) ping_redis(self.redis) self.rtrmanager = None if RPKI_VALIDATOR_ENABLED == "true": while True: try: self.rtrmanager = RTRManager( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) self.rtrmanager.start() log.info( "Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) break except Exception: log.info( "Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT ) ) log.info("Retrying RTR connection in 5 seconds...") time.sleep(5) # EXCHANGES self.update_exchange = Exchange( "bgp-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.update_exchange.declare() self.hijack_exchange = Exchange( "hijack-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.hijack_exchange.declare() self.hijack_hashing = Exchange( "hijack-hashing", channel=connection, type="x-consistent-hash", durable=False, delivery_mode=1, ) self.hijack_hashing.declare() self.handled_exchange = Exchange( "handled-update", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.config_exchange = Exchange( "config", channel=connection, type="direct", durable=False, delivery_mode=1, ) self.pg_amq_bridge = Exchange( "amq.direct", type="direct", durable=True, delivery_mode=1 ) # QUEUES self.update_queue = Queue( "detection-update-update", exchange=self.pg_amq_bridge, routing_key="update-insert", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_ongoing_queue = Queue( "detection-hijack-ongoing", exchange=self.hijack_exchange, routing_key="ongoing", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.config_queue = Queue( "detection-config-notify-{}".format(uuid()), exchange=self.config_exchange, routing_key="notify", durable=False, auto_delete=True, max_priority=3, consumer_arguments={"x-priority": 3}, ) self.config_request_rpc() log.info("started")
def test(self): """ Loads a test file that includes crafted bgp updates as input and expected messages as output. """ RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest") RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest") RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq") RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672) RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format(RABBITMQ_USER, RABBITMQ_PASS, RABBITMQ_HOST, RABBITMQ_PORT) RPKI_VALIDATOR_HOST = os.getenv("RPKI_VALIDATOR_HOST", "routinator") RPKI_VALIDATOR_PORT = os.getenv("RPKI_VALIDATOR_PORT", 3323) # check RPKI RTR manager connectivity while True: try: rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT) rtrmanager.start() print("Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) rtrmanager.stop() break except Exception: print("Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) print("Retrying in 30 seconds...") time.sleep(30) # exchanges self.update_exchange = Exchange("bgp-update", type="direct", durable=False, delivery_mode=1) self.hijack_exchange = Exchange("hijack-update", type="direct", durable=False, delivery_mode=1) self.pg_amq_bridge = Exchange("amq.direct", type="direct", durable=True, delivery_mode=1) # queues self.update_queue = Queue( "detection-testing", exchange=self.pg_amq_bridge, routing_key="update-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_queue = Queue( "hijack-testing", exchange=self.hijack_exchange, routing_key="update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_db_queue = Queue( "hijack-db-testing", exchange=self.pg_amq_bridge, routing_key="hijack-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) with Connection(RABBITMQ_URI) as connection: print("Waiting for pg_amq exchange..") Tester.waitExchange(self.pg_amq_bridge, connection.default_channel) print("Waiting for hijack exchange..") Tester.waitExchange(self.hijack_exchange, connection.default_channel) print("Waiting for update exchange..") Tester.waitExchange(self.update_exchange, connection.default_channel) # query database for the states of the processes db_con = self.getDbConnection() db_cur = db_con.cursor() query = "SELECT name FROM process_states WHERE running=True" running_modules = set() # wait until all 5 modules are running while len(running_modules) < 5: db_cur.execute(query) entries = db_cur.fetchall() for entry in entries: running_modules.add(entry[0]) db_con.commit() print("Running modules: {}".format(running_modules)) print("{}/5 modules are running.".format(len(running_modules))) time.sleep(1) Tester.config_request_rpc(connection) time.sleep(10) for testfile in os.listdir("testfiles/"): self.clear() self.curr_test = testfile self.messages = {} # load test with open("testfiles/{}".format(testfile), "r") as f: self.messages = json.load(f) send_len = len(self.messages) with nested( connection.Consumer( self.hijack_queue, callbacks=[self.validate_message], accept=["ujson"], ), connection.Consumer( self.update_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), connection.Consumer( self.hijack_db_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), ): send_cnt = 0 # send and validate all messages in the messages.json file while send_cnt < send_len: self.curr_idx = send_cnt self.send_next_message(connection) send_cnt += 1 # sleep until we receive all expected messages while self.curr_idx != send_cnt: time.sleep(0.1) try: connection.drain_events(timeout=10) except socket.timeout: # avoid infinite loop by timeout assert False, "Consumer timeout" connection.close() time.sleep(5) self.supervisor.supervisor.stopAllProcesses() self.waitProcess("listener", 0) # 0 STOPPED self.waitProcess("clock", 0) # 0 STOPPED self.waitProcess("detection", 0) # 0 STOPPED self.waitProcess("configuration", 0) # 0 STOPPED self.waitProcess("database", 0) # 0 STOPPED self.waitProcess("observer", 0) # 0 STOPPED
class BGPStats(object): """docstring for bgpStats""" def __init__(self, route_collector="rrc00", rpki_validator="rpki-validator.realmv6.org:8282"): self.rc = route_collector rpki = rpki_validator.split(":") self.mgr = RTRManager(rpki[0], rpki[1]) # self._start_rtr_manager() self.stream = BGPStream() self.rec = BGPRecord() def _start_rtr_manager(self): self.mgr.start() def start_stream(self, start_time=None, end_time=0, route_collector=""): """ Starts the """ hours = [0, 8, 16, 24] if route_collector == "": route_collector = self.rc self.stream.add_filter('collector', route_collector) # self.stream.add_filter('record-type', 'ribs') if (start_time is None) or not isinstance(start_time, datetime): start_time = datetime.utcnow() if isinstance(end_time, datetime): end = int(end_time.strftime("%s")) else: end = 0 # get closest push for i in range(0, len(hours)): if hours[i + 1] > start_time.hour: break start_time = start_time.replace(hour=12, minute=0, second=0, microsecond=0) yesterday = 1527163200 start = int(start_time.strftime("%s")) self.stream.add_interval_filter(1527163200, 1527183200) print('Start stream with', start_time, end_time) self.stream.start() def get_records(self): print("get_records:") while (self.stream.get_next_record(self.rec)): # Print the self.record information only if it is not a valid self.record if self.rec.status == "valid": # print('--', self.rec.project, self.rec.collector, # self.rec.type, self.rec.time, self.rec.status, '--') elem = self.rec.get_next_elem() while (elem): if elem.type is 'R' or elem.type is 'A': asn = elem.fields['as-path'].split(' ')[-1] try: asn = int(asn) except ValueError: elem = self.rec.get_next_elem() continue asn = int(elem.fields['as-path'].split(' ')[-1]) prefix = ipaddress.ip_interface(elem.fields['prefix']) ip = prefix.ip.compressed mask_len = int(prefix.with_prefixlen.split('/')[1]) validated = self.mgr.validate(asn, ip, mask_len) if validated.is_valid: print(validated) elif elem.type is 'W': pass # print("Withdrawal:", elem.peer_address, elem.peer_asn, elem.fields) elif elem.type is 'S': print("Peerstate:", elem.peer_address, elem.peer_asn, elem.fields) else: print("Unknown:", elem.peer_address, elem.peer_asn, elem.fields) elem = self.rec.get_next_elem() # print('\n') print("done.")