示例#1
0
        def __init__(self, connection):
            self.module_name = "monitor"
            self.connection = connection
            self.timestamp = -1
            self.prefix_tree = None
            self.process_ids = []
            self.rules = None
            self.prefixes = set()
            self.prefix_file = "/root/monitor_prefixes.json"
            self.monitors = None
            self.flag = True
            self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
            ping_redis(self.redis)
            self.correlation_id = None

            # EXCHANGES
            self.config_exchange = create_exchange("config", connection)

            # QUEUES
            self.config_queue = create_queue(
                self.module_name,
                exchange=self.config_exchange,
                routing_key="notify",
                priority=2,
            )

            self.config_request_rpc()

            # setup Redis monitor listeners
            self.setup_redis_mon_listeners()

            log.info("started")
示例#2
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("exabgp_seen_bgp_update",
                  "1",
                  ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        autoconf_running = self.shared_memory_manager_dict["autoconf_running"]
        if not autoconf_running:
            log.info("setting up autoconf updater process...")
            with Connection(RABBITMQ_URI) as connection:
                self.autoconf_updater = AutoconfUpdater(
                    connection, self.shared_memory_manager_dict)
                shared_memory_locks["autoconf_updates"].acquire()
                self.shared_memory_manager_dict["autoconf_running"] = True
                shared_memory_locks["autoconf_updates"].release()
                mp.Process(target=self.autoconf_updater.run).start()
            log.info("autoignore checker set up")

        # start host processes
        host_processes = []
        for host in self.hosts:
            host_process = mp.Process(target=self.run_host_sio_process,
                                      args=(host, ))
            host_processes.append(host_process)
            host_process.start()

        while True:
            if not self.shared_memory_manager_dict["data_worker_should_run"]:
                for host_process in host_processes:
                    host_process.terminate()
                break
            time.sleep(1)
示例#3
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("bgpstreamkafka_seen_bgp_update",
                  "1",
                  ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # create a new bgpstream instance and a reusable bgprecord instance
        stream = _pybgpstream.BGPStream()

        # set kafka data interface
        stream.set_data_interface("kafka")

        # set host connection details
        stream.set_data_interface_option("kafka", "brokers",
                                         "{}:{}".format(self.host, self.port))

        # set topic
        stream.set_data_interface_option("kafka", "topic", self.topic)

        # filter prefixes
        for prefix in self.prefixes:
            stream.add_filter("prefix", prefix)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # filter record type
        stream.add_filter("record-type", "updates")

        # filter based on timing (if end=0 --> live mode)
        # Bypass for https://github.com/FORTH-ICS-INSPIRE/artemis/issues/411#issuecomment-661325802
        start_time = int(time.time()) - START_TIME_OFFSET
        if "BGPSTREAM_TIMESTAMP_BYPASS" in os.environ:
            log.warn(
                "Using BGPSTREAM_TIMESTAMP_BYPASS, meaning BMP timestamps are thrown away from BGPStream"
            )
            start_time = 0
        stream.add_interval_filter(start_time, 0)

        # set live mode
        stream.set_live_mode()

        # start the stream
        stream.start()

        # start producing
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break

                # get next record
                try:
                    rec = stream.get_next_record()
                except BaseException:
                    continue

                if (rec.status != "valid") or (rec.type != "update"):
                    continue

                # get next element
                try:
                    elem = rec.get_next_elem()
                except BaseException:
                    continue

                while elem:
                    if not self.shared_memory_manager_dict[
                            "data_worker_should_run"]:
                        break

                    if elem.type in {"A", "W"}:
                        redis.set(
                            "bgpstreamkafka_seen_bgp_update",
                            "1",
                            ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                        )
                        this_prefix = str(elem.fields["prefix"])
                        service = "bgpstreamkafka|{}".format(str(
                            rec.collector))
                        type_ = elem.type
                        if type_ == "A":
                            as_path = elem.fields["as-path"].split(" ")
                            communities = [{
                                "asn": int(comm.split(":")[0]),
                                "value": int(comm.split(":")[1]),
                            } for comm in elem.fields["communities"]]
                        else:
                            as_path = []
                            communities = []
                        timestamp = float(rec.time)
                        if timestamp == 0:
                            timestamp = time.time()
                            log.debug("fixed timestamp: {}".format(timestamp))
                        peer_asn = elem.peer_asn

                        ip_version = get_ip_version(this_prefix)
                        if this_prefix in prefix_tree[ip_version]:
                            msg = {
                                "type": type_,
                                "timestamp": timestamp,
                                "path": as_path,
                                "service": service,
                                "communities": communities,
                                "prefix": this_prefix,
                                "peer_asn": peer_asn,
                            }
                            try:
                                if validator.validate(msg):
                                    msgs = normalize_msg_path(msg)
                                    for msg in msgs:
                                        key_generator(msg)
                                        log.debug(msg)
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                                else:
                                    log.debug(
                                        "Invalid format message: {}".format(
                                            msg))
                            except BaseException:
                                log.exception(
                                    "Error when normalizing BGP message: {}".
                                    format(msg))
                    try:
                        elem = rec.get_next_elem()
                    except BaseException:
                        continue
示例#4
0
    def __init__(self, connection: Connection,
                 shared_memory_manager_dict: Dict) -> NoReturn:
        self.connection = connection
        self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
        ping_redis(self.redis)
        self.shared_memory_manager_dict = shared_memory_manager_dict

        self.prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        shared_memory_locks["prefix_tree"].acquire()
        if self.shared_memory_manager_dict["prefix_tree_recalculate"]:
            for ip_version in ["v4", "v6"]:
                if ip_version == "v4":
                    size = 32
                else:
                    size = 128
                self.prefix_tree[ip_version] = dict_to_pytricia(
                    self.shared_memory_manager_dict["prefix_tree"][ip_version],
                    size)
                log.info("{} pytricia tree parsed from configuration".format(
                    ip_version))
                self.shared_memory_manager_dict[
                    "prefix_tree_recalculate"] = False
        shared_memory_locks["prefix_tree"].release()

        self.autoignore_prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128),
        }
        shared_memory_locks["autoignore"].acquire()
        if self.shared_memory_manager_dict["autoignore_recalculate"]:
            for ip_version in ["v4", "v6"]:
                if ip_version == "v4":
                    size = 32
                else:
                    size = 128
                self.autoignore_prefix_tree[ip_version] = dict_to_pytricia(
                    self.shared_memory_manager_dict["autoignore_prefix_tree"]
                    [ip_version],
                    size,
                )
                log.info("{} pytricia tree parsed from configuration".format(
                    ip_version))
                self.shared_memory_manager_dict[
                    "autoignore_recalculate"] = False
        shared_memory_locks["autoignore"].release()

        # EXCHANGES
        self.update_exchange = create_exchange("bgp-update",
                                               connection,
                                               declare=True)
        self.hijack_exchange = create_exchange("hijack-update",
                                               connection,
                                               declare=True)
        self.autoconf_exchange = create_exchange("autoconf",
                                                 connection,
                                                 declare=True)
        self.pg_amq_bridge = create_exchange("amq.direct", connection)
        self.mitigation_exchange = create_exchange("mitigation",
                                                   connection,
                                                   declare=True)
        self.autoignore_exchange = create_exchange("autoignore",
                                                   connection,
                                                   declare=True)
        self.command_exchange = create_exchange("command",
                                                connection,
                                                declare=True)

        # QUEUES
        self.update_queue = create_queue(
            SERVICE_NAME,
            exchange=self.update_exchange,
            routing_key="update",
            priority=1,
        )
        self.hijack_ongoing_queue = create_queue(
            SERVICE_NAME,
            exchange=self.hijack_exchange,
            routing_key="ongoing",
            priority=1,
        )
        self.pg_amq_update_queue = create_queue(
            SERVICE_NAME,
            exchange=self.pg_amq_bridge,
            routing_key="update-insert",
            priority=1,
        )
        self.mitigation_request_queue = create_queue(
            SERVICE_NAME,
            exchange=self.mitigation_exchange,
            routing_key="mitigate",
            priority=2,
        )
        self.unmitigation_request_queue = create_queue(
            SERVICE_NAME,
            exchange=self.mitigation_exchange,
            routing_key="unmitigate",
            priority=2,
        )
        self.stop_queue = create_queue(
            "{}-{}".format(SERVICE_NAME, uuid()),
            exchange=self.command_exchange,
            routing_key="stop-{}".format(SERVICE_NAME),
            priority=1,
        )
        self.autoconf_update_queue = create_queue(
            SERVICE_NAME,
            exchange=self.autoconf_exchange,
            routing_key="update",
            priority=4,
            random=True,
        )
        self.ongoing_hijack_prefixes_queue = create_queue(
            SERVICE_NAME,
            exchange=self.autoignore_exchange,
            routing_key="ongoing-hijack-prefixes",
            priority=1,
            random=True,
        )

        log.info("data worker initiated")
示例#5
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("bgpstreamlive_seen_bgp_update",
                  "1",
                  ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # create a new bgpstream instance and a reusable bgprecord instance
        stream = _pybgpstream.BGPStream()

        # consider collectors from given projects
        for project in self.monitor_projects:
            # ignore deprecated projects
            if project in DEPRECATED_PROJECTS:
                continue
            # add the classic project sources
            stream.add_filter("project", project)
            # plus their real-time counterparts
            if project in LIVE_PROJECT_MAPPINGS:
                stream.add_filter("project", LIVE_PROJECT_MAPPINGS[project])

        # filter prefixes
        for prefix in self.prefixes:
            stream.add_filter("prefix", prefix)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # filter record type
        stream.add_filter("record-type", "updates")

        # filter based on timing (if end=0 --> live mode)
        stream.add_interval_filter(int(time.time()) - START_TIME_OFFSET, 0)

        # set live mode
        stream.set_live_mode()

        # start the stream
        stream.start()

        # start producing
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break

                # get next record
                try:
                    rec = stream.get_next_record()
                except BaseException:
                    continue

                if (rec.status != "valid") or (rec.type != "update"):
                    continue

                # get next element
                try:
                    elem = rec.get_next_elem()
                except BaseException:
                    continue

                while elem:
                    if not self.shared_memory_manager_dict[
                            "data_worker_should_run"]:
                        break

                    if elem.type in {"A", "W"}:
                        redis.set(
                            "bgpstreamlive_seen_bgp_update",
                            "1",
                            ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                        )
                        this_prefix = str(elem.fields["prefix"])
                        service = "bgpstreamlive|{}|{}".format(
                            str(rec.project), str(rec.collector))
                        type_ = elem.type
                        if type_ == "A":
                            as_path = elem.fields["as-path"].split(" ")
                            communities = [{
                                "asn": int(comm.split(":")[0]),
                                "value": int(comm.split(":")[1]),
                            } for comm in elem.fields["communities"]]
                        else:
                            as_path = []
                            communities = []
                        timestamp = float(rec.time)
                        peer_asn = elem.peer_asn

                        ip_version = get_ip_version(this_prefix)
                        if this_prefix in prefix_tree[ip_version]:
                            msg = {
                                "type": type_,
                                "timestamp": timestamp,
                                "path": as_path,
                                "service": service,
                                "communities": communities,
                                "prefix": this_prefix,
                                "peer_asn": peer_asn,
                            }
                            try:
                                if validator.validate(msg):
                                    msgs = normalize_msg_path(msg)
                                    for msg in msgs:
                                        key_generator(msg)
                                        log.debug(msg)
                                        producer.publish(
                                            msg,
                                            exchange=self.update_exchange,
                                            routing_key="update",
                                            serializer="ujson",
                                        )
                                else:
                                    log.warning(
                                        "Invalid format message: {}".format(
                                            msg))
                            except BaseException:
                                log.exception(
                                    "Error when normalizing BGP message: {}".
                                    format(msg))
                    try:
                        elem = rec.get_next_elem()
                    except BaseException:
                        continue
示例#6
0
    def __init__(self, connection: Connection,
                 shared_memory_manager_dict: Dict) -> NoReturn:
        self.connection = connection
        self.shared_memory_manager_dict = shared_memory_manager_dict
        self.rtrmanager = None

        # wait for other needed data workers to start
        wait_data_worker_dependencies(DATA_WORKER_DEPENDENCIES)

        # EXCHANGES
        self.update_exchange = create_exchange("bgp-update",
                                               connection,
                                               declare=True)
        self.hijack_exchange = create_exchange("hijack-update",
                                               connection,
                                               declare=True)
        self.hijack_hashing = create_exchange("hijack-hashing",
                                              connection,
                                              "x-consistent-hash",
                                              declare=True)
        self.handled_exchange = create_exchange("handled-update",
                                                connection,
                                                declare=True)
        self.hijack_notification_exchange = create_exchange(
            "hijack-notification", connection, declare=True)
        self.command_exchange = create_exchange("command",
                                                connection,
                                                declare=True)

        # QUEUES
        self.update_queue = create_queue(
            SERVICE_NAME,
            exchange=self.update_exchange,
            routing_key="stored-update-with-prefix-node",
            priority=1,
        )
        self.hijack_ongoing_queue = create_queue(
            SERVICE_NAME,
            exchange=self.hijack_exchange,
            routing_key="ongoing-with-prefix-node",
            priority=1,
        )
        self.stop_queue = create_queue(
            "{}-{}".format(SERVICE_NAME, uuid()),
            exchange=self.command_exchange,
            routing_key="stop-{}".format(SERVICE_NAME),
            priority=1,
        )

        setattr(self, "publish_hijack_fun",
                self.publish_hijack_result_production)
        if TEST_ENV == "true":
            setattr(self, "publish_hijack_fun",
                    self.publish_hijack_result_test)

        self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
        ping_redis(self.redis)

        if RPKI_VALIDATOR_ENABLED == "true":
            from rtrlib import RTRManager

            while True:
                try:
                    self.rtrmanager = RTRManager(RPKI_VALIDATOR_HOST,
                                                 RPKI_VALIDATOR_PORT)
                    self.rtrmanager.start()
                    log.info("Connected to RPKI VALIDATOR '{}:{}'".format(
                        RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT))
                    break
                except Exception:
                    log.info(
                        "Could not connect to RPKI VALIDATOR '{}:{}'".format(
                            RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT))
                    log.info("Retrying RTR connection in 30 seconds...")
                    time.sleep(30)

        log.info("data worker initiated")
示例#7
0
    def run(self):
        # update redis
        ping_redis(redis)
        redis.set("ris_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE)

        # build monitored prefix tree
        prefix_tree = {
            "v4": pytricia.PyTricia(32),
            "v6": pytricia.PyTricia(128)
        }
        for prefix in self.prefixes:
            ip_version = get_ip_version(prefix)
            prefix_tree[ip_version].insert(prefix, "")

        # set RIS suffix on connection
        ris_suffix = RIS_ID

        # main loop to process BGP updates
        validator = MformatValidator()
        with Producer(self.connection) as producer:
            while True:
                if not self.shared_memory_manager_dict[
                        "data_worker_should_run"]:
                    break
                try:
                    events = requests.get(
                        "https://ris-live.ripe.net/v1/stream/?format=json&client=artemis-{}"
                        .format(ris_suffix),
                        stream=True,
                        timeout=10,
                    )
                    # http://docs.python-requests.org/en/latest/user/advanced/#streaming-requests
                    iterator = events.iter_lines()
                    next(iterator)
                    for data in iterator:
                        if not self.shared_memory_manager_dict[
                                "data_worker_should_run"]:
                            break
                        try:
                            parsed = json.loads(data)
                            msg = parsed["data"]
                            if "type" in parsed and parsed[
                                    "type"] == "ris_error":
                                log.error(msg)
                            # also check if ris host is in the configuration
                            elif (
                                    "type" in msg and msg["type"] == "UPDATE"
                                    and
                                (not self.hosts or msg["host"] in self.hosts)):
                                norm_ris_msgs = self.normalize_ripe_ris(
                                    msg, prefix_tree)
                                for norm_ris_msg in norm_ris_msgs:
                                    redis.set(
                                        "ris_seen_bgp_update",
                                        "1",
                                        ex=MON_TIMEOUT_LAST_BGP_UPDATE,
                                    )
                                    try:
                                        if validator.validate(norm_ris_msg):
                                            norm_path_msgs = normalize_msg_path(
                                                norm_ris_msg)
                                            for norm_path_msg in norm_path_msgs:
                                                key_generator(norm_path_msg)
                                                log.debug(norm_path_msg)
                                                producer.publish(
                                                    norm_path_msg,
                                                    exchange=self.
                                                    update_exchange,
                                                    routing_key="update",
                                                    serializer="ujson",
                                                )
                                        else:
                                            log.warning(
                                                "Invalid format message: {}".
                                                format(msg))
                                    except BaseException:
                                        log.exception("exception")
                                        log.error(
                                            "Error when normalizing BGP message: {}"
                                            .format(norm_ris_msg))
                        except Exception:
                            log.exception("exception")
                            log.error("exception message {}".format(data))
                    log.warning(
                        "Iterator ran out of data; the connection will be retried"
                    )
                except Exception:
                    log.exception("exception")
                    log.info(
                        "RIPE RIS Server closed connection. Restarting socket in 10 seconds.."
                    )
                    time.sleep(10)