示例#1
0
 def handle_mitigation_request(self, message):
     message.ack()
     hijack_event = message.payload
     ip_version = get_ip_version(hijack_event["prefix"])
     if hijack_event["prefix"] in self.prefix_tree[ip_version]:
         prefix_node = self.prefix_tree[ip_version][
             hijack_event["prefix"]]
         mitigation_action = prefix_node["data"]["mitigation"][0]
         if mitigation_action == "manual":
             log.info("starting manual mitigation of hijack {}".format(
                 hijack_event))
         else:
             log.info(
                 "starting custom mitigation of hijack {} using '{}' script"
                 .format(hijack_event, mitigation_action))
             hijack_event_str = json.dumps(hijack_event)
             subprocess.Popen(
                 [mitigation_action, "-i", hijack_event_str],
                 shell=False,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE,
             )
         # do something
         mit_started = {"key": hijack_event["key"], "time": time.time()}
         self.producer.publish(
             mit_started,
             exchange=self.mitigation_exchange,
             routing_key="mit-start",
             priority=2,
             serializer="ujson",
         )
     else:
         log.warn("no rule for hijack {}".format(hijack_event))
示例#2
0
        def init_mitigation(self):
            log.info("Initiating mitigation...")

            log.info("Starting building mitigation prefix tree...")
            self.prefix_tree = {
                "v4": pytricia.PyTricia(32),
                "v6": pytricia.PyTricia(128),
            }
            raw_prefix_count = 0
            for rule in self.rules:
                try:
                    for prefix in rule["prefixes"]:
                        for translated_prefix in translate_rfc2622(prefix):
                            ip_version = get_ip_version(translated_prefix)
                            node = {
                                "prefix": translated_prefix,
                                "data": {
                                    "mitigation": rule["mitigation"]
                                },
                            }
                            self.prefix_tree[ip_version].insert(
                                translated_prefix, node)
                            raw_prefix_count += 1
                except Exception:
                    log.exception("Exception")
            log.info(
                "{} prefixes integrated in mitigation prefix tree in total".
                format(raw_prefix_count))
            log.info("Finished building mitigation prefix tree.")

            log.info("Mitigation initiated, configured and running.")
示例#3
0
 def build_prefix_tree(self):
     log.info("Starting building autoignore prefix tree...")
     self.prefix_tree = {
         "v4": pytricia.PyTricia(32),
         "v6": pytricia.PyTricia(128),
     }
     raw_prefix_count = 0
     for key in self.autoignore_rules:
         try:
             rule = self.autoignore_rules[key]
             for prefix in rule["prefixes"]:
                 for translated_prefix in translate_rfc2622(prefix):
                     ip_version = get_ip_version(translated_prefix)
                     if self.prefix_tree[ip_version].has_key(translated_prefix):
                         node = self.prefix_tree[ip_version][translated_prefix]
                     else:
                         node = {"prefix": translated_prefix, "rule_key": key}
                         self.prefix_tree[ip_version].insert(
                             translated_prefix, node
                         )
                     raw_prefix_count += 1
         except Exception:
             log.exception("Exception")
     log.info(
         "{} prefixes integrated in autoignore prefix tree in total".format(
             raw_prefix_count
         )
     )
     log.info("Finished building autoignore prefix tree.")
示例#4
0
        def start_monitors(self):
            log.info("Initiating monitor...")

            for proc_id in self.process_ids:
                try:
                    proc_id[1].terminate()
                except ProcessLookupError:
                    log.exception("process terminate")
            self.process_ids.clear()
            self.prefixes.clear()

            log.info("Starting building monitor prefix tree...")
            self.prefix_tree = {
                "v4": pytricia.PyTricia(32),
                "v6": pytricia.PyTricia(128),
            }
            raw_prefix_count = 0
            for rule in self.rules:
                try:
                    for prefix in rule["prefixes"]:
                        for translated_prefix in translate_rfc2622(prefix):
                            ip_version = get_ip_version(translated_prefix)
                            self.prefix_tree[ip_version].insert(translated_prefix, "")
                            raw_prefix_count += 1
                except Exception:
                    log.exception("Exception")
            log.info(
                "{} prefixes integrated in monitor prefix tree in total".format(
                    raw_prefix_count
                )
            )
            log.info("Finished building monitor prefix tree.")

            # only keep super prefixes for monitors
            log.info("Calculating monitored prefixes for monitor to supervise...")
            for ip_version in self.prefix_tree:
                for prefix in self.prefix_tree[ip_version]:
                    worst_prefix = search_worst_prefix(
                        prefix, self.prefix_tree[ip_version]
                    )
                    if worst_prefix:
                        self.prefixes.add(worst_prefix)
            dump_json(list(self.prefixes), self.prefix_file)
            log.info("Calculated monitored prefixes for monitor to supervise.")

            log.info("Initiating configured monitoring instances....")
            self.init_ris_instance()
            self.init_exabgp_instance()
            self.init_bgpstreamhist_instance()
            self.init_bgpstreamlive_instance()
            self.init_bgpstreamkafka_instance()
            log.info("All configured monitoring instances initiated.")

            log.info("Monitor initiated, configured and running.")
示例#5
0
 def find_prefix_node(self, prefix):
     ip_version = get_ip_version(prefix)
     prefix_node = None
     shared_memory_locks["prefix_tree"].acquire()
     if ip_version == "v4":
         size = 32
     else:
         size = 128
     # need to turn to pytricia tree since this means that the tree has changed due to re-configuration
     if self.shared_memory_manager_dict["prefix_tree_recalculate"]:
         self.prefix_tree[ip_version] = dict_to_pytricia(
             self.shared_memory_manager_dict["prefix_tree"][ip_version], size
         )
         log.info("{} pytricia tree re-parsed from configuration".format(ip_version))
         self.shared_memory_manager_dict["prefix_tree_recalculate"] = False
     if prefix in self.prefix_tree[ip_version]:
         prefix_node = self.prefix_tree[ip_version][prefix]
     shared_memory_locks["prefix_tree"].release()
     return prefix_node
示例#6
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("bgpstreamkafka_seen_bgp_update",
                  "1",
                  ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # create a new bgpstream instance and a reusable bgprecord instance
        stream = _pybgpstream.BGPStream()

        # set kafka data interface
        stream.set_data_interface("kafka")

        # set host connection details
        stream.set_data_interface_option("kafka", "brokers",
                                         "{}:{}".format(self.host, self.port))

        # set topic
        stream.set_data_interface_option("kafka", "topic", self.topic)

        # filter prefixes
        for prefix in self.prefixes:
            stream.add_filter("prefix", prefix)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # filter record type
        stream.add_filter("record-type", "updates")

        # filter based on timing (if end=0 --> live mode)
        # Bypass for https://github.com/FORTH-ICS-INSPIRE/artemis/issues/411#issuecomment-661325802
        start_time = int(time.time()) - START_TIME_OFFSET
        if "BGPSTREAM_TIMESTAMP_BYPASS" in os.environ:
            log.warn(
                "Using BGPSTREAM_TIMESTAMP_BYPASS, meaning BMP timestamps are thrown away from BGPStream"
            )
            start_time = 0
        stream.add_interval_filter(start_time, 0)

        # set live mode
        stream.set_live_mode()

        # start the stream
        stream.start()

        # start producing
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break

                # get next record
                try:
                    rec = stream.get_next_record()
                except BaseException:
                    continue

                if (rec.status != "valid") or (rec.type != "update"):
                    continue

                # get next element
                try:
                    elem = rec.get_next_elem()
                except BaseException:
                    continue

                while elem:
                    if not self.shared_memory_manager_dict[
                            "data_worker_should_run"]:
                        break

                    if elem.type in {"A", "W"}:
                        redis.set(
                            "bgpstreamkafka_seen_bgp_update",
                            "1",
                            ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                        )
                        this_prefix = str(elem.fields["prefix"])
                        service = "bgpstreamkafka|{}".format(str(
                            rec.collector))
                        type_ = elem.type
                        if type_ == "A":
                            as_path = elem.fields["as-path"].split(" ")
                            communities = [{
                                "asn": int(comm.split(":")[0]),
                                "value": int(comm.split(":")[1]),
                            } for comm in elem.fields["communities"]]
                        else:
                            as_path = []
                            communities = []
                        timestamp = float(rec.time)
                        if timestamp == 0:
                            timestamp = time.time()
                            log.debug("fixed timestamp: {}".format(timestamp))
                        peer_asn = elem.peer_asn

                        ip_version = get_ip_version(this_prefix)
                        if this_prefix in prefix_tree[ip_version]:
                            msg = {
                                "type": type_,
                                "timestamp": timestamp,
                                "path": as_path,
                                "service": service,
                                "communities": communities,
                                "prefix": this_prefix,
                                "peer_asn": peer_asn,
                            }
                            try:
                                if validator.validate(msg):
                                    msgs = normalize_msg_path(msg)
                                    for msg in msgs:
                                        key_generator(msg)
                                        log.debug(msg)
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                                else:
                                    log.debug(
                                        "Invalid format message: {}".format(
                                            msg))
                            except BaseException:
                                log.exception(
                                    "Error when normalizing BGP message: {}".
                                    format(msg))
                    try:
                        elem = rec.get_next_elem()
                    except BaseException:
                        continue
示例#7
0
def configure_prefixtree(msg, shared_memory_manager_dict):
    config = msg
    try:
        # check newer config
        config_timestamp = shared_memory_manager_dict["config_timestamp"]
        if config["timestamp"] > config_timestamp:
            shared_memory_locks["service_reconfiguring"].acquire()
            shared_memory_manager_dict["service_reconfiguring"] = True
            shared_memory_locks["service_reconfiguring"].release()

            # calculate prefix tree
            prefix_tree = {
                "v4": pytricia.PyTricia(32),
                "v6": pytricia.PyTricia(128)
            }
            rules = config.get("rules", [])
            for rule in rules:
                rule_translated_origin_asn_set = set()
                for asn in rule["origin_asns"]:
                    this_translated_asn_list = flatten(
                        translate_asn_range(asn))
                    rule_translated_origin_asn_set.update(
                        set(this_translated_asn_list))
                rule["origin_asns"] = list(rule_translated_origin_asn_set)
                rule_translated_neighbor_set = set()
                for asn in rule["neighbors"]:
                    this_translated_asn_list = flatten(
                        translate_asn_range(asn))
                    rule_translated_neighbor_set.update(
                        set(this_translated_asn_list))
                rule["neighbors"] = list(rule_translated_neighbor_set)

                conf_obj = {
                    "origin_asns": rule["origin_asns"],
                    "neighbors": rule["neighbors"],
                    "prepend_seq": rule.get("prepend_seq", []),
                    "policies": list(set(rule.get("policies", []))),
                    "community_annotations": rule.get("community_annotations",
                                                      []),
                    "mitigation": rule.get("mitigation", "manual"),
                }
                for prefix in rule["prefixes"]:
                    for translated_prefix in translate_rfc2622(prefix):
                        ip_version = get_ip_version(translated_prefix)
                        if prefix_tree[ip_version].has_key(translated_prefix):
                            node = prefix_tree[ip_version][translated_prefix]
                        else:
                            node = {
                                "prefix": translated_prefix,
                                "data": {
                                    "confs": []
                                },
                                "timestamp": config["timestamp"],
                            }
                            prefix_tree[ip_version].insert(
                                translated_prefix, node)
                        node["data"]["confs"].append(conf_obj)

            # calculate the monitored and configured prefixes
            configured_prefix_count = 0
            monitored_prefixes = set()
            for ip_version in prefix_tree:
                for prefix in prefix_tree[ip_version]:
                    configured_prefix_count += 1
                    monitored_prefix = search_worst_prefix(
                        prefix, prefix_tree[ip_version])
                    if monitored_prefix:
                        monitored_prefixes.add(monitored_prefix)

            # extract autoignore rules
            autoignore_rules = config.get("autoignore", {})

            # calculate autoignore prefix tree
            autoignore_prefix_tree = {
                "v4": pytricia.PyTricia(32),
                "v6": pytricia.PyTricia(128),
            }

            for key in autoignore_rules:
                rule = autoignore_rules[key]
                for prefix in rule["prefixes"]:
                    for translated_prefix in translate_rfc2622(prefix):
                        ip_version = get_ip_version(translated_prefix)
                        if not autoignore_prefix_tree[ip_version].has_key(
                                translated_prefix):
                            node = {
                                "prefix": translated_prefix,
                                "rule_key": key
                            }
                            autoignore_prefix_tree[ip_version].insert(
                                translated_prefix, node)

            # note that the object should be picklable (e.g., dict instead of pytricia tree,
            # see also: https://github.com/jsommers/pytricia/issues/20)
            shared_memory_locks["prefix_tree"].acquire()
            dict_prefix_tree = {
                "v4": pytricia_to_dict(prefix_tree["v4"]),
                "v6": pytricia_to_dict(prefix_tree["v6"]),
            }
            shared_memory_manager_dict["prefix_tree"] = dict_prefix_tree
            shared_memory_manager_dict["prefix_tree_recalculate"] = True
            shared_memory_locks["prefix_tree"].release()

            shared_memory_locks["monitored_prefixes"].acquire()
            shared_memory_manager_dict["monitored_prefixes"] = list(
                monitored_prefixes)
            shared_memory_locks["monitored_prefixes"].release()

            shared_memory_locks["configured_prefix_count"].acquire()
            shared_memory_manager_dict[
                "configured_prefix_count"] = configured_prefix_count
            shared_memory_locks["configured_prefix_count"].release()

            # note that the object should be picklable (e.g., dict instead of pytricia tree,
            # see also: https://github.com/jsommers/pytricia/issues/20)
            dict_autoignore_prefix_tree = {
                "v4": pytricia_to_dict(autoignore_prefix_tree["v4"]),
                "v6": pytricia_to_dict(autoignore_prefix_tree["v6"]),
            }
            shared_memory_locks["autoignore"].acquire()
            shared_memory_manager_dict["autoignore_rules"] = autoignore_rules
            shared_memory_manager_dict[
                "autoignore_prefix_tree"] = dict_autoignore_prefix_tree
            shared_memory_manager_dict["autoignore_recalculate"] = True
            shared_memory_locks["autoignore"].release()

            shared_memory_locks["config_timestamp"].acquire()
            shared_memory_manager_dict["config_timestamp"] = config[
                "timestamp"]
            shared_memory_locks["config_timestamp"].release()

        shared_memory_locks["service_reconfiguring"].acquire()
        shared_memory_manager_dict["service_reconfiguring"] = False
        shared_memory_locks["service_reconfiguring"].release()
        return {"success": True, "message": "configured"}
    except Exception:
        log.exception("exception")
        shared_memory_locks["service_reconfiguring"].acquire()
        shared_memory_manager_dict["service_reconfiguring"] = False
        shared_memory_locks["service_reconfiguring"].release()
        return {
            "success": False,
            "message": "error during service configuration"
        }
示例#8
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("bgpstreamlive_seen_bgp_update",
                  "1",
                  ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # create a new bgpstream instance and a reusable bgprecord instance
        stream = _pybgpstream.BGPStream()

        # consider collectors from given projects
        for project in self.monitor_projects:
            # ignore deprecated projects
            if project in DEPRECATED_PROJECTS:
                continue
            # add the classic project sources
            stream.add_filter("project", project)
            # plus their real-time counterparts
            if project in LIVE_PROJECT_MAPPINGS:
                stream.add_filter("project", LIVE_PROJECT_MAPPINGS[project])

        # filter prefixes
        for prefix in self.prefixes:
            stream.add_filter("prefix", prefix)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # filter record type
        stream.add_filter("record-type", "updates")

        # filter based on timing (if end=0 --> live mode)
        stream.add_interval_filter(int(time.time()) - START_TIME_OFFSET, 0)

        # set live mode
        stream.set_live_mode()

        # start the stream
        stream.start()

        # start producing
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break

                # get next record
                try:
                    rec = stream.get_next_record()
                except BaseException:
                    continue

                if (rec.status != "valid") or (rec.type != "update"):
                    continue

                # get next element
                try:
                    elem = rec.get_next_elem()
                except BaseException:
                    continue

                while elem:
                    if not self.shared_memory_manager_dict[
                            "data_worker_should_run"]:
                        break

                    if elem.type in {"A", "W"}:
                        redis.set(
                            "bgpstreamlive_seen_bgp_update",
                            "1",
                            ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                        )
                        this_prefix = str(elem.fields["prefix"])
                        service = "bgpstreamlive|{}|{}".format(
                            str(rec.project), str(rec.collector))
                        type_ = elem.type
                        if type_ == "A":
                            as_path = elem.fields["as-path"].split(" ")
                            communities = [{
                                "asn": int(comm.split(":")[0]),
                                "value": int(comm.split(":")[1]),
                            } for comm in elem.fields["communities"]]
                        else:
                            as_path = []
                            communities = []
                        timestamp = float(rec.time)
                        peer_asn = elem.peer_asn

                        ip_version = get_ip_version(this_prefix)
                        if this_prefix in prefix_tree[ip_version]:
                            msg = {
                                "type": type_,
                                "timestamp": timestamp,
                                "path": as_path,
                                "service": service,
                                "communities": communities,
                                "prefix": this_prefix,
                                "peer_asn": peer_asn,
                            }
                            try:
                                if validator.validate(msg):
                                    msgs = normalize_msg_path(msg)
                                    for msg in msgs:
                                        key_generator(msg)
                                        log.debug(msg)
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                                else:
                                    log.warning(
                                        "Invalid format message: {}".format(
                                            msg))
                            except BaseException:
                                log.exception(
                                    "Error when normalizing BGP message: {}".
                                    format(msg))
                    try:
                        elem = rec.get_next_elem()
                    except BaseException:
                        continue
示例#9
0
    def commit_hijack(self, monitor_event: Dict, hijacker: int,
                      hij_dimensions: List[str]) -> NoReturn:
        """
        Commit new or update an existing hijack to the database.
        It uses redis server to store ongoing hijacks information
        to not stress the db.
        """
        hij_type = "|".join(hij_dimensions)
        redis_hijack_key = redis_key(monitor_event["prefix"], hijacker,
                                     hij_type)

        if "hij_key" in monitor_event:
            monitor_event["final_redis_hijack_key"] = redis_hijack_key

        hijack_value = {
            "prefix": monitor_event["prefix"],
            "hijack_as": hijacker,
            "type": hij_type,
            "time_started": monitor_event["timestamp"],
            "time_last": monitor_event["timestamp"],
            "peers_seen": {monitor_event["peer_asn"]},
            "monitor_keys": {monitor_event["key"]},
            "configured_prefix": monitor_event["matched_prefix"],
            "timestamp_of_config": monitor_event["prefix_node"]["timestamp"],
            "end_tag": None,
            "outdated_parent": None,
            "rpki_status": "NA",
        }

        if (RPKI_VALIDATOR_ENABLED == "true" and self.rtrmanager
                and monitor_event["path"]):
            try:
                asn = monitor_event["path"][-1]
                if "/" in monitor_event["prefix"]:
                    network, netmask = monitor_event["prefix"].split("/")
                # /32 or /128
                else:
                    ip_version = get_ip_version(monitor_event["prefix"])
                    network = monitor_event["prefix"]
                    netmask = 32
                    if ip_version == "v6":
                        netmask = 128
                redis_rpki_asn_prefix_key = "rpki_as{}_p{}".format(
                    asn, monitor_event["prefix"])
                redis_rpki_status = self.redis.get(redis_rpki_asn_prefix_key)
                if not redis_rpki_status:
                    rpki_status = get_rpki_val_result(self.rtrmanager, asn,
                                                      network, int(netmask))
                else:
                    rpki_status = redis_rpki_status.decode("utf-8")
                hijack_value["rpki_status"] = rpki_status
                # the default refresh interval for the RPKI RTR manager is 3600 seconds
                self.redis.set(redis_rpki_asn_prefix_key, rpki_status, ex=3600)

            except Exception:
                log.exception("exception")

        if ("hij_key" in monitor_event
                and monitor_event["initial_redis_hijack_key"] !=
                monitor_event["final_redis_hijack_key"]):
            hijack_value["outdated_parent"] = monitor_event["hij_key"]

        # identify the number of infected ases
        hijack_value["asns_inf"] = set()
        if hij_dimensions[1] in {"0", "1"}:
            hijack_value["asns_inf"] = set(
                monitor_event["path"][:-(int(hij_dimensions[1]) + 1)])
        elif hij_dimensions[3] == "L":
            hijack_value["asns_inf"] = set(monitor_event["path"][:-2])
        # assume the worst-case scenario of a type-2 hijack
        elif len(monitor_event["path"]) > 2:
            hijack_value["asns_inf"] = set(monitor_event["path"][:-3])

        # make the following operation atomic using blpop (blocking)
        # first, make sure that the semaphore is initialized
        if self.redis.getset("{}token_active".format(redis_hijack_key),
                             1) != b"1":
            redis_pipeline = self.redis.pipeline()
            redis_pipeline.lpush("{}token".format(redis_hijack_key), "token")
            # lock, by extracting the token (other processes that access
            # it at the same time will be blocked)
            # attention: it is important that this command is batched in the
            # pipeline since the db may async delete
            # the token
            redis_pipeline.blpop("{}token".format(redis_hijack_key))
            redis_pipeline.execute()
        else:
            # lock, by extracting the token (other processes that access it
            # at the same time will be blocked)
            token = self.redis.blpop("{}token".format(redis_hijack_key),
                                     timeout=60)
            # if timeout after 60 seconds, return without hijack alert
            # since this means that sth has been purged in the meanwhile (e.g., due to outdated hijack
            # in another instance; a detector cannot be stuck for a whole minute in a single hijack BGP update)
            if not token:
                log.info(
                    "Monitor event {} encountered redis token timeout and will be cleared as benign for hijack {}"
                    .format(str(monitor_event), redis_hijack_key))
                return

        # proceed now that we have clearance
        redis_pipeline = self.redis.pipeline()
        try:
            result = self.redis.get(redis_hijack_key)
            if result:
                result = classic_json.loads(result.decode("utf-8"))
                result["time_started"] = min(result["time_started"],
                                             hijack_value["time_started"])
                result["time_last"] = max(result["time_last"],
                                          hijack_value["time_last"])
                result["peers_seen"] = set(result["peers_seen"])
                result["peers_seen"].update(hijack_value["peers_seen"])

                result["asns_inf"] = set(result["asns_inf"])
                result["asns_inf"].update(hijack_value["asns_inf"])

                # no update since db already knows!
                result["monitor_keys"] = hijack_value["monitor_keys"]
                self.comm_annotate_hijack(monitor_event, result)
                result["outdated_parent"] = hijack_value["outdated_parent"]

                result["bgpupdate_keys"] = set(result["bgpupdate_keys"])
                result["bgpupdate_keys"].add(monitor_event["key"])

                result["rpki_status"] = hijack_value["rpki_status"]
            else:
                hijack_value["time_detected"] = time.time()
                hijack_value["key"] = get_hash([
                    monitor_event["prefix"],
                    hijacker,
                    hij_type,
                    "{0:.6f}".format(hijack_value["time_detected"]),
                ])
                hijack_value["bgpupdate_keys"] = {monitor_event["key"]}
                redis_pipeline.sadd("persistent-keys", hijack_value["key"])
                result = hijack_value
                self.comm_annotate_hijack(monitor_event, result)
                self.producer.publish(
                    result,
                    exchange=self.hijack_notification_exchange,
                    routing_key="mail-log",
                    retry=False,
                    priority=1,
                    serializer="ujson",
                )
            redis_pipeline.set(redis_hijack_key, json.dumps(result))

            # store the origin, neighbor combination for this hijack BGP update
            origin = None
            neighbor = None
            if monitor_event["path"]:
                origin = monitor_event["path"][-1]
            if len(monitor_event["path"]) > 1:
                neighbor = monitor_event["path"][-2]
            redis_pipeline.sadd(
                "hij_orig_neighb_{}".format(redis_hijack_key),
                "{}_{}".format(origin, neighbor),
            )

            # store the prefix and peer ASN for this hijack BGP update
            redis_pipeline.sadd(
                "prefix_{}_peer_{}_hijacks".format(monitor_event["prefix"],
                                                   monitor_event["peer_asn"]),
                redis_hijack_key,
            )
            redis_pipeline.sadd(
                "hijack_{}_prefixes_peers".format(redis_hijack_key),
                "{}_{}".format(monitor_event["prefix"],
                               monitor_event["peer_asn"]),
            )
        except Exception:
            log.exception("exception")
        finally:
            # execute whatever has been accumulated in redis till now
            redis_pipeline.execute()

            # publish hijack
            self.publish_hijack_fun(result, redis_hijack_key)

            self.producer.publish(
                result,
                exchange=self.hijack_notification_exchange,
                routing_key="hij-log",
                retry=False,
                priority=1,
                serializer="ujson",
            )

            # unlock, by pushing back the token (at most one other process
            # waiting will be unlocked)
            redis_pipeline = self.redis.pipeline()
            redis_pipeline.set("{}token_active".format(redis_hijack_key), 1)
            redis_pipeline.lpush("{}token".format(redis_hijack_key), "token")
            redis_pipeline.execute()
示例#10
0
def normalize_ripe_ris(msg, prefix_tree):
    msgs = []
    if isinstance(msg, dict):
        msg["key"] = None  # initial placeholder before passing the validator
        if "community" in msg:
            msg["communities"] = [{
                "asn": comm[0],
                "value": comm[1]
            } for comm in msg["community"]]
            del msg["community"]
        if "host" in msg:
            msg["service"] = "ripe-ris|" + msg["host"]
            del msg["host"]
        if "peer_asn" in msg:
            msg["peer_asn"] = int(msg["peer_asn"])
        if "path" not in msg:
            msg["path"] = []
        if "timestamp" in msg:
            msg["timestamp"] = float(msg["timestamp"])
        if "type" in msg:
            del msg["type"]
        if "raw" in msg:
            del msg["raw"]
        if "origin" in msg:
            del msg["origin"]
        if "id" in msg:
            del msg["id"]
        if "announcements" in msg and "withdrawals" in msg:
            # need 2 separate messages
            # one for announcements
            msg_ann = deepcopy(msg)
            msg_ann["type"] = update_to_type["announcements"]
            prefixes = []
            for element in msg_ann["announcements"]:
                if "prefixes" in element:
                    prefixes.extend(element["prefixes"])
            for prefix in prefixes:
                ip_version = get_ip_version(prefix)
                try:
                    if prefix in prefix_tree[ip_version]:
                        new_msg = deepcopy(msg_ann)
                        new_msg["prefix"] = prefix
                        del new_msg["announcements"]
                        del new_msg["withdrawals"]
                        msgs.append(new_msg)
                except Exception:
                    log.exception("exception")
            # one for withdrawals
            msg_wit = deepcopy(msg)
            msg_wit["type"] = update_to_type["withdrawals"]
            msg_wit["path"] = []
            msg_wit["communities"] = []
            prefixes = msg_wit["withdrawals"]
            for prefix in prefixes:
                ip_version = get_ip_version(prefix)
                try:
                    if prefix in prefix_tree[ip_version]:
                        new_msg = deepcopy(msg_wit)
                        new_msg["prefix"] = prefix
                        del new_msg["announcements"]
                        del new_msg["withdrawals"]
                        msgs.append(new_msg)
                except Exception:
                    log.exception("exception")
        else:
            for update_type in update_types:
                if update_type in msg:
                    msg["type"] = update_to_type[update_type]
                    prefixes = []
                    for element in msg[update_type]:
                        if update_type == "announcements":
                            if "prefixes" in element:
                                prefixes.extend(element["prefixes"])
                        elif update_type == "withdrawals":
                            prefixes.append(element)
                    for prefix in prefixes:
                        ip_version = get_ip_version(prefix)
                        try:
                            if prefix in prefix_tree[ip_version]:
                                new_msg = deepcopy(msg)
                                new_msg["prefix"] = prefix
                                del new_msg[update_type]
                                msgs.append(new_msg)
                        except Exception:
                            log.exception("exception")
    return msgs
示例#11
0
def parse_ripe_ris(connection, prefixes_file, hosts):
    exchange = Exchange("bgp-update",
                        channel=connection,
                        type="direct",
                        durable=False)
    exchange.declare()

    prefixes = load_json(prefixes_file)
    assert prefixes is not None
    prefix_tree = {"v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128)}
    for prefix in prefixes:
        ip_version = get_ip_version(prefix)
        prefix_tree[ip_version].insert(prefix, "")

    ris_suffix = os.getenv("RIS_ID", "my_as")

    validator = mformat_validator()
    with Producer(connection) as producer:
        while True:
            try:
                events = requests.get(
                    "https://ris-live.ripe.net/v1/stream/?format=json&client=artemis-{}"
                    .format(ris_suffix),
                    stream=True,
                    timeout=10,
                )
                # http://docs.python-requests.org/en/latest/user/advanced/#streaming-requests
                iterator = events.iter_lines()
                next(iterator)
                for data in iterator:
                    try:
                        parsed = json.loads(data)
                        msg = parsed["data"]
                        if "type" in parsed and parsed["type"] == "ris_error":
                            log.error(msg)
                        # also check if ris host is in the configuration
                        elif ("type" in msg and msg["type"] == "UPDATE"
                              and (not hosts or msg["host"] in hosts)):
                            norm_ris_msgs = normalize_ripe_ris(
                                msg, prefix_tree)
                            for norm_ris_msg in norm_ris_msgs:
                                redis.set(
                                    "ris_seen_bgp_update",
                                    "1",
                                    ex=int(
                                        os.getenv(
                                            "MON_TIMEOUT_LAST_BGP_UPDATE",
                                            DEFAULT_MON_TIMEOUT_LAST_BGP_UPDATE,
                                        )),
                                )
                                try:
                                    if validator.validate(norm_ris_msg):
                                        norm_path_msgs = normalize_msg_path(
                                            norm_ris_msg)
                                        for norm_path_msg in norm_path_msgs:
                                            key_generator(norm_path_msg)
                                            log.debug(norm_path_msg)
                                            producer.publish(
                                                norm_path_msg,
                                                exchange=exchange,
                                                routing_key="update",
                                                serializer="ujson",
                                            )
                                    else:
                                        log.warning(
                                            "Invalid format message: {}".
                                            format(msg))
                                except BaseException:
                                    log.exception(
                                        "Error when normalizing BGP message: {}"
                                        .format(norm_ris_msg))
                    except Exception:
                        log.exception("exception message {}".format(data))
                log.warning(
                    "Iterator ran out of data; the connection will be retried")
            except Exception:
                log.info(
                    "RIPE RIS Server closed connection. Restarting socket in 60seconds.."
                )
                time.sleep(60)
示例#12
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("ris_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # set RIS suffix on connection
        ris_suffix = RIS_ID

        # main loop to process BGP updates
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break
                try:
                    events = requests.get(
                        "https://ris-live.ripe.net/v1/stream/?format=json&client=artemis-{}"
                        .format(ris_suffix),
                        stream=True,
                        timeout=10,
                    )
                    # http://docs.python-requests.org/en/latest/user/advanced/#streaming-requests
                    iterator = events.iter_lines()
                    next(iterator)
                    for data in iterator:
                        if not self.shared_memory_manager_dict[
                                "data_worker_should_run"]:
                            break
                        try:
                            parsed = json.loads(data)
                            msg = parsed["data"]
                            if "type" in parsed and parsed[
                                    "type"] == "ris_error":
                                log.error(msg)
                            # also check if ris host is in the configuration
                            elif (
                                    "type" in msg and msg["type"] == "UPDATE"
                                    and
                                (not self.hosts or msg["host"] in self.hosts)):
                                norm_ris_msgs = self.normalize_ripe_ris(
                                    msg, prefix_tree)
                                for norm_ris_msg in norm_ris_msgs:
                                    redis.set(
                                        "ris_seen_bgp_update",
                                        "1",
                                        ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                                    )
                                    try:
                                        if validator.validate(norm_ris_msg):
                                            norm_path_msgs = normalize_msg_path(
                                                norm_ris_msg)
                                            for norm_path_msg in norm_path_msgs:
                                                key_generator(norm_path_msg)
                                                log.debug(norm_path_msg)
                                                producer.publish(
                                                    norm_path_msg,
                                                    exchange=self.
                                                    update_exchange,
                                                    routing_key="update",
                                                    serializer="ujson",
                                                )
                                        else:
                                            log.warning(
                                                "Invalid format message: {}".
                                                format(msg))
                                    except BaseException:
                                        log.exception("exception")
                                        log.error(
                                            "Error when normalizing BGP message: {}"
                                            .format(norm_ris_msg))
                        except Exception:
                            log.exception("exception")
                            log.error("exception message {}".format(data))
                    log.warning(
                        "Iterator ran out of data; the connection will be retried"
                    )
                except Exception:
                    log.exception("exception")
                    log.info(
                        "RIPE RIS Server closed connection. Restarting socket in 10 seconds.."
                    )
                    time.sleep(10)
示例#13
0
    def run(self):
        # build monitored prefix tree
        prefix_tree = {"v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128)}
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # start producing
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            for csv_file in glob.glob("{}/*.csv".format(self.input_dir)):
                if not self.shared_memory_manager_dict["data_worker_should_run"]:
                    break

                try:
                    with open(csv_file, "r") as f:
                        csv_reader = csv.reader(f, delimiter="|")
                        for row in csv_reader:
                            if not self.shared_memory_manager_dict[
                                "data_worker_should_run"
                            ]:
                                break

                            try:
                                if len(row) != 9:
                                    continue
                                if row[0].startswith("#"):
                                    continue
                                # example row: 139.91.0.0/16|8522|1403|1403 6461 2603 21320
                                # 5408
                                # 8522|routeviews|route-views2|A|"[{""asn"":1403,""value"":6461}]"|1517446677
                                this_prefix = row[0]
                                if row[6] == "A":
                                    as_path = row[3].split(" ")
                                    communities = json.loads(row[7])
                                else:
                                    as_path = []
                                    communities = []
                                service = "historical|{}|{}".format(row[4], row[5])
                                type_ = row[6]
                                timestamp = float(row[8])
                                peer_asn = int(row[2])
                                ip_version = get_ip_version(this_prefix)
                                if this_prefix in prefix_tree[ip_version]:
                                    msg = {
                                        "type": type_,
                                        "timestamp": timestamp,
                                        "path": as_path,
                                        "service": service,
                                        "communities": communities,
                                        "prefix": this_prefix,
                                        "peer_asn": peer_asn,
                                    }
                                    try:
                                        if validator.validate(msg):
                                            msgs = normalize_msg_path(msg)
                                            for msg in msgs:
                                                key_generator(msg)
                                                log.debug(msg)
                                                producer.publish(
                                                    msg,
                                                    exchange=self.update_exchange,
                                                    routing_key="update",
                                                    serializer="ujson",
                                                )
                                                time.sleep(0.01)
                                        else:
                                            log.warning(
                                                "Invalid format message: {}".format(msg)
                                            )
                                    except BaseException:
                                        log.exception(
                                            "Error when normalizing BGP message: {}".format(
                                                msg
                                            )
                                        )
                            except Exception:
                                log.exception("row")
                except Exception:
                    log.exception("exception")

        # run until instructed to stop
        while True:
            if not self.shared_memory_manager_dict["data_worker_should_run"]:
                break
            time.sleep(1)
示例#14
0
            def handle_exabgp_msg(bgp_message):
                redis.set("exabgp_seen_bgp_update",
                          "1",
                          ex=MON_TIMEOUT_LAST_BGP_UPDATE)
                msg = {
                    "type": bgp_message["type"],
                    "communities": bgp_message.get("communities", []),
                    "timestamp": float(bgp_message["timestamp"]),
                    "path": bgp_message.get("path", []),
                    "service": "exabgp|{}".format(host),
                    "prefix": bgp_message["prefix"],
                    "peer_asn": int(bgp_message["peer_asn"]),
                }

                this_prefix = msg["prefix"]
                ip_version = get_ip_version(this_prefix)
                if this_prefix in prefix_tree[ip_version]:
                    try:
                        if validator.validate(msg):
                            msgs = normalize_msg_path(msg)
                            for msg in msgs:
                                key_generator(msg)
                                log.debug(msg)
                                if autoconf:
                                    try:
                                        if learn_neighbors:
                                            msg["learn_neighbors"] = True
                                        shared_memory_locks[
                                            "autoconf_updates"].acquire()
                                        autoconf_updates = self.shared_memory_manager_dict[
                                            "autoconf_updates"]
                                        autoconf_updates[msg["key"]] = msg
                                        self.shared_memory_manager_dict[
                                            "autoconf_updates"] = autoconf_updates
                                        # mark the autoconf BGP updates for configuration
                                        # processing in redis
                                        redis_pipeline = redis.pipeline()
                                        redis_pipeline.sadd(
                                            "autoconf-update-keys-to-process",
                                            msg["key"],
                                        )
                                        redis_pipeline.execute()
                                    except Exception:
                                        log.exception("exception")
                                    finally:
                                        shared_memory_locks[
                                            "autoconf_updates"].release()
                                else:
                                    with Producer(self.connection) as producer:
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                        else:
                            log.warning(
                                "Invalid format message: {}".format(msg))
                    except BaseException:
                        log.exception(
                            "Error when normalizing BGP message: {}".format(
                                msg))
示例#15
0
    def run_host_sio_process(self, host):
        def exit_gracefully(signum, frame):
            if sio is not None:
                sio.disconnect()
                log.info("'{}' sio disconnected".format(host))
            log.info("'{}' client exited".format(host))
            shared_memory_locks["data_worker"].acquire()
            self.shared_memory_manager_dict["data_worker_should_run"] = False
            shared_memory_locks["data_worker"].release()

        # register signal handler
        signal.signal(signal.SIGTERM, exit_gracefully)
        signal.signal(signal.SIGINT, exit_gracefully)

        try:
            # set autoconf booleans
            autoconf = False
            learn_neighbors = False
            if "autoconf" in self.hosts[host]:
                autoconf = True
                if "learn_neighbors" in self.hosts[host]:
                    learn_neighbors = True

            # build monitored prefix tree
            if autoconf:
                prefixes = ["0.0.0.0/0", "::/0"]
            else:
                prefixes = self.prefixes
            prefix_tree = {
                "v4": pytricia.PyTricia(32),
                "v6": pytricia.PyTricia(128)
            }
            for prefix in prefixes:
                ip_version = get_ip_version(prefix)
                prefix_tree[ip_version].insert(prefix, "")

            # set up message validator
            validator = MformatValidator()

            def handle_exabgp_msg(bgp_message):
                redis.set("exabgp_seen_bgp_update",
                          "1",
                          ex=MON_TIMEOUT_LAST_BGP_UPDATE)
                msg = {
                    "type": bgp_message["type"],
                    "communities": bgp_message.get("communities", []),
                    "timestamp": float(bgp_message["timestamp"]),
                    "path": bgp_message.get("path", []),
                    "service": "exabgp|{}".format(host),
                    "prefix": bgp_message["prefix"],
                    "peer_asn": int(bgp_message["peer_asn"]),
                }

                this_prefix = msg["prefix"]
                ip_version = get_ip_version(this_prefix)
                if this_prefix in prefix_tree[ip_version]:
                    try:
                        if validator.validate(msg):
                            msgs = normalize_msg_path(msg)
                            for msg in msgs:
                                key_generator(msg)
                                log.debug(msg)
                                if autoconf:
                                    try:
                                        if learn_neighbors:
                                            msg["learn_neighbors"] = True
                                        shared_memory_locks[
                                            "autoconf_updates"].acquire()
                                        autoconf_updates = self.shared_memory_manager_dict[
                                            "autoconf_updates"]
                                        autoconf_updates[msg["key"]] = msg
                                        self.shared_memory_manager_dict[
                                            "autoconf_updates"] = autoconf_updates
                                        # mark the autoconf BGP updates for configuration
                                        # processing in redis
                                        redis_pipeline = redis.pipeline()
                                        redis_pipeline.sadd(
                                            "autoconf-update-keys-to-process",
                                            msg["key"],
                                        )
                                        redis_pipeline.execute()
                                    except Exception:
                                        log.exception("exception")
                                    finally:
                                        shared_memory_locks[
                                            "autoconf_updates"].release()
                                else:
                                    with Producer(self.connection) as producer:
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                        else:
                            log.warning(
                                "Invalid format message: {}".format(msg))
                    except BaseException:
                        log.exception(
                            "Error when normalizing BGP message: {}".format(
                                msg))

            # set up socket-io client
            sio = SocketIO("http://" + host, namespace=BaseNamespace)
            log.info("'{}' client ready to receive sio messages".format(host))
            sio.on("exa_message", handle_exabgp_msg)
            sio.emit("exa_subscribe", {"prefixes": prefixes})
            if autoconf:
                route_refresh_command_v4 = "announce route-refresh ipv4 unicast"
                sio.emit("route_command",
                         {"command": route_refresh_command_v4})
                route_refresh_command_v6 = "announce route-refresh ipv6 unicast"
                sio.emit("route_command",
                         {"command": route_refresh_command_v6})
            sio.wait()
        except Exception:
            log.exception("exception")
示例#16
0
 def find_best_prefix_node(self, prefix):
     ip_version = get_ip_version(prefix)
     if prefix in self.prefix_tree[ip_version]:
         return self.prefix_tree[ip_version][prefix]
     return None