def run(self): # update redis ping_redis(redis) redis.set("bgpstreamkafka_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE) # create a new bgpstream instance and a reusable bgprecord instance stream = _pybgpstream.BGPStream() # set kafka data interface stream.set_data_interface("kafka") # set host connection details stream.set_data_interface_option("kafka", "brokers", "{}:{}".format(self.host, self.port)) # set topic stream.set_data_interface_option("kafka", "topic", self.topic) # filter prefixes for prefix in self.prefixes: stream.add_filter("prefix", prefix) # build monitored prefix tree prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128) } for prefix in self.prefixes: ip_version = get_ip_version(prefix) prefix_tree[ip_version].insert(prefix, "") # filter record type stream.add_filter("record-type", "updates") # filter based on timing (if end=0 --> live mode) # Bypass for https://github.com/FORTH-ICS-INSPIRE/artemis/issues/411#issuecomment-661325802 start_time = int(time.time()) - START_TIME_OFFSET if "BGPSTREAM_TIMESTAMP_BYPASS" in os.environ: log.warn( "Using BGPSTREAM_TIMESTAMP_BYPASS, meaning BMP timestamps are thrown away from BGPStream" ) start_time = 0 stream.add_interval_filter(start_time, 0) # set live mode stream.set_live_mode() # start the stream stream.start() # start producing validator = MformatValidator() with Producer(self.connection) as producer: while True: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break # get next record try: rec = stream.get_next_record() except BaseException: continue if (rec.status != "valid") or (rec.type != "update"): continue # get next element try: elem = rec.get_next_elem() except BaseException: continue while elem: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break if elem.type in {"A", "W"}: redis.set( "bgpstreamkafka_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE, ) this_prefix = str(elem.fields["prefix"]) service = "bgpstreamkafka|{}".format(str( rec.collector)) type_ = elem.type if type_ == "A": as_path = elem.fields["as-path"].split(" ") communities = [{ "asn": int(comm.split(":")[0]), "value": int(comm.split(":")[1]), } for comm in elem.fields["communities"]] else: as_path = [] communities = [] timestamp = float(rec.time) if timestamp == 0: timestamp = time.time() log.debug("fixed timestamp: {}".format(timestamp)) peer_asn = elem.peer_asn ip_version = get_ip_version(this_prefix) if this_prefix in prefix_tree[ip_version]: msg = { "type": type_, "timestamp": timestamp, "path": as_path, "service": service, "communities": communities, "prefix": this_prefix, "peer_asn": peer_asn, } try: if validator.validate(msg): msgs = normalize_msg_path(msg) for msg in msgs: key_generator(msg) log.debug(msg) producer.publish( msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) else: log.debug( "Invalid format message: {}".format( msg)) except BaseException: log.exception( "Error when normalizing BGP message: {}". format(msg)) try: elem = rec.get_next_elem() except BaseException: continue
def run(self): # update redis ping_redis(redis) redis.set("bgpstreamlive_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE) # create a new bgpstream instance and a reusable bgprecord instance stream = _pybgpstream.BGPStream() # consider collectors from given projects for project in self.monitor_projects: # ignore deprecated projects if project in DEPRECATED_PROJECTS: continue # add the classic project sources stream.add_filter("project", project) # plus their real-time counterparts if project in LIVE_PROJECT_MAPPINGS: stream.add_filter("project", LIVE_PROJECT_MAPPINGS[project]) # filter prefixes for prefix in self.prefixes: stream.add_filter("prefix", prefix) # build monitored prefix tree prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128) } for prefix in self.prefixes: ip_version = get_ip_version(prefix) prefix_tree[ip_version].insert(prefix, "") # filter record type stream.add_filter("record-type", "updates") # filter based on timing (if end=0 --> live mode) stream.add_interval_filter(int(time.time()) - START_TIME_OFFSET, 0) # set live mode stream.set_live_mode() # start the stream stream.start() # start producing validator = MformatValidator() with Producer(self.connection) as producer: while True: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break # get next record try: rec = stream.get_next_record() except BaseException: continue if (rec.status != "valid") or (rec.type != "update"): continue # get next element try: elem = rec.get_next_elem() except BaseException: continue while elem: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break if elem.type in {"A", "W"}: redis.set( "bgpstreamlive_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE, ) this_prefix = str(elem.fields["prefix"]) service = "bgpstreamlive|{}|{}".format( str(rec.project), str(rec.collector)) type_ = elem.type if type_ == "A": as_path = elem.fields["as-path"].split(" ") communities = [{ "asn": int(comm.split(":")[0]), "value": int(comm.split(":")[1]), } for comm in elem.fields["communities"]] else: as_path = [] communities = [] timestamp = float(rec.time) peer_asn = elem.peer_asn ip_version = get_ip_version(this_prefix) if this_prefix in prefix_tree[ip_version]: msg = { "type": type_, "timestamp": timestamp, "path": as_path, "service": service, "communities": communities, "prefix": this_prefix, "peer_asn": peer_asn, } try: if validator.validate(msg): msgs = normalize_msg_path(msg) for msg in msgs: key_generator(msg) log.debug(msg) producer.publish( msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) else: log.warning( "Invalid format message: {}".format( msg)) except BaseException: log.exception( "Error when normalizing BGP message: {}". format(msg)) try: elem = rec.get_next_elem() except BaseException: continue
def run(self): # build monitored prefix tree prefix_tree = {"v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128)} for prefix in self.prefixes: ip_version = get_ip_version(prefix) prefix_tree[ip_version].insert(prefix, "") # start producing validator = MformatValidator() with Producer(self.connection) as producer: for csv_file in glob.glob("{}/*.csv".format(self.input_dir)): if not self.shared_memory_manager_dict["data_worker_should_run"]: break try: with open(csv_file, "r") as f: csv_reader = csv.reader(f, delimiter="|") for row in csv_reader: if not self.shared_memory_manager_dict[ "data_worker_should_run" ]: break try: if len(row) != 9: continue if row[0].startswith("#"): continue # example row: 139.91.0.0/16|8522|1403|1403 6461 2603 21320 # 5408 # 8522|routeviews|route-views2|A|"[{""asn"":1403,""value"":6461}]"|1517446677 this_prefix = row[0] if row[6] == "A": as_path = row[3].split(" ") communities = json.loads(row[7]) else: as_path = [] communities = [] service = "historical|{}|{}".format(row[4], row[5]) type_ = row[6] timestamp = float(row[8]) peer_asn = int(row[2]) ip_version = get_ip_version(this_prefix) if this_prefix in prefix_tree[ip_version]: msg = { "type": type_, "timestamp": timestamp, "path": as_path, "service": service, "communities": communities, "prefix": this_prefix, "peer_asn": peer_asn, } try: if validator.validate(msg): msgs = normalize_msg_path(msg) for msg in msgs: key_generator(msg) log.debug(msg) producer.publish( msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) time.sleep(0.01) else: log.warning( "Invalid format message: {}".format(msg) ) except BaseException: log.exception( "Error when normalizing BGP message: {}".format( msg ) ) except Exception: log.exception("row") except Exception: log.exception("exception") # run until instructed to stop while True: if not self.shared_memory_manager_dict["data_worker_should_run"]: break time.sleep(1)
def run(self): # update redis ping_redis(redis) redis.set("ris_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE) # build monitored prefix tree prefix_tree = { "v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128) } for prefix in self.prefixes: ip_version = get_ip_version(prefix) prefix_tree[ip_version].insert(prefix, "") # set RIS suffix on connection ris_suffix = RIS_ID # main loop to process BGP updates validator = MformatValidator() with Producer(self.connection) as producer: while True: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break try: events = requests.get( "https://ris-live.ripe.net/v1/stream/?format=json&client=artemis-{}" .format(ris_suffix), stream=True, timeout=10, ) # http://docs.python-requests.org/en/latest/user/advanced/#streaming-requests iterator = events.iter_lines() next(iterator) for data in iterator: if not self.shared_memory_manager_dict[ "data_worker_should_run"]: break try: parsed = json.loads(data) msg = parsed["data"] if "type" in parsed and parsed[ "type"] == "ris_error": log.error(msg) # also check if ris host is in the configuration elif ( "type" in msg and msg["type"] == "UPDATE" and (not self.hosts or msg["host"] in self.hosts)): norm_ris_msgs = self.normalize_ripe_ris( msg, prefix_tree) for norm_ris_msg in norm_ris_msgs: redis.set( "ris_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE, ) try: if validator.validate(norm_ris_msg): norm_path_msgs = normalize_msg_path( norm_ris_msg) for norm_path_msg in norm_path_msgs: key_generator(norm_path_msg) log.debug(norm_path_msg) producer.publish( norm_path_msg, exchange=self. update_exchange, routing_key="update", serializer="ujson", ) else: log.warning( "Invalid format message: {}". format(msg)) except BaseException: log.exception("exception") log.error( "Error when normalizing BGP message: {}" .format(norm_ris_msg)) except Exception: log.exception("exception") log.error("exception message {}".format(data)) log.warning( "Iterator ran out of data; the connection will be retried" ) except Exception: log.exception("exception") log.info( "RIPE RIS Server closed connection. Restarting socket in 10 seconds.." ) time.sleep(10)
def handle_exabgp_msg(bgp_message): redis.set("exabgp_seen_bgp_update", "1", ex=MON_TIMEOUT_LAST_BGP_UPDATE) msg = { "type": bgp_message["type"], "communities": bgp_message.get("communities", []), "timestamp": float(bgp_message["timestamp"]), "path": bgp_message.get("path", []), "service": "exabgp|{}".format(host), "prefix": bgp_message["prefix"], "peer_asn": int(bgp_message["peer_asn"]), } this_prefix = msg["prefix"] ip_version = get_ip_version(this_prefix) if this_prefix in prefix_tree[ip_version]: try: if validator.validate(msg): msgs = normalize_msg_path(msg) for msg in msgs: key_generator(msg) log.debug(msg) if autoconf: try: if learn_neighbors: msg["learn_neighbors"] = True shared_memory_locks[ "autoconf_updates"].acquire() autoconf_updates = self.shared_memory_manager_dict[ "autoconf_updates"] autoconf_updates[msg["key"]] = msg self.shared_memory_manager_dict[ "autoconf_updates"] = autoconf_updates # mark the autoconf BGP updates for configuration # processing in redis redis_pipeline = redis.pipeline() redis_pipeline.sadd( "autoconf-update-keys-to-process", msg["key"], ) redis_pipeline.execute() except Exception: log.exception("exception") finally: shared_memory_locks[ "autoconf_updates"].release() else: with Producer(self.connection) as producer: producer.publish( msg, exchange=self.update_exchange, routing_key="update", serializer="ujson", ) else: log.warning( "Invalid format message: {}".format(msg)) except BaseException: log.exception( "Error when normalizing BGP message: {}".format( msg))