def __init__(self, sensor_service, config):
     super(ProbesDiscoSensor, self).__init__(sensor_service=sensor_service,
                                             config=config)
     self._logger = self.sensor_service.get_logger(
         name=self.__class__.__name__)
     self._probes_state = {}
     self._ases_v4_state = {}
     self._ases_v6_state = {}
     self.atlas_stream = AtlasStream()
class Streaming(Process):
    """Wraps AtlasStream in a process.

    Given a measurement filter and duration setting,
    begins results streaming.
    By setting a start time in the past, it can play back results in the past.
    filter setting : https://atlas.ripe.net/docs/result-streaming/
    Streaming results are put into a queue.

    Attributes:
        duration (int): how long the streaming shall last, if None, it will last forever.
        param (dict): the filters for streaming.
        queue (multiprocessing.Queue): the queue to which streaming results are written.
        stream (ripe.atlas.cousteau.AtlasStream): the stuff does the real streaming job.
    """

    def __init__(self, vis_q, analyze_q, second=None, param={}):
        """
        Args:
            duration (int): how long the streaming shall last, if None, it will last forever.
            param (dict): the filters for streaming.
            queue (multiprocessing.Queue): the queue to which streaming results are written.
        """
        self.duration = second
        self.param = param
        self.id = (param['msm'], param['prb'])
        self.vis = vis_q
        self.analyze = analyze_q
        self.stream = AtlasStream()
        super(Streaming, self).__init__()

    def run(self):
        """What should be doing when the process is started.
        """
        self.stream.connect()
        self.stream.bind_channel('result', self.on_result_recieved)
        self.stream.start_stream(stream_type="result", **self.param)
        self.stream.timeout(seconds=self.duration)
        self.stream.disconnect()
        self.analyze.put('STOP')
        return

    def on_result_recieved(self, *args):
        """Put the received streaming result to queue.

        Args:
            *args: args[0] contains the right json object as per RIPE documentation.
        """
        print '{0} received streaming data.'.format(self.name)
        self.vis.put(dict(id=self.id, type='mes', rec=args[0]))
        self.analyze.put(dict(id=self.id, rec=args[0]))
 def __init__(self, vis_q, analyze_q, second=None, param={}):
     """
     Args:
         duration (int): how long the streaming shall last, if None, it will last forever.
         param (dict): the filters for streaming.
         queue (multiprocessing.Queue): the queue to which streaming results are written.
     """
     self.duration = second
     self.param = param
     self.id = (param['msm'], param['prb'])
     self.vis = vis_q
     self.analyze = analyze_q
     self.stream = AtlasStream()
     super(Streaming, self).__init__()
Exemple #4
0
def stream_results(v4_nets, v6_nets, seconds=None, filters={}):
    """Set up the atlas stream for all traceroute results"""
    atlas_stream = AtlasStream()
    atlas_stream.connect()
    atlas_stream.bind_channel('result', on_result_recieved)
    prefixes = []
    prefixes.extend([net.strip() for net in open(v4_nets).readlines()])
    prefixes.extend([net.strip() for net in open(v6_nets).readlines()])
    #     for prefix in prefixes:
    #         stream_parameters = {"type": "traceroute", "passThroughPrefix": prefix}
    #         stream_parameters.update(filters)
    #         atlas_stream.start_stream(stream_type="result", **stream_parameters)
    stream_parameters = {"type": "traceroute"}
    stream_parameters.update(filters)
    atlas_stream.start_stream(stream_type="result", **stream_parameters)
    print("Before streaming")
    atlas_stream.timeout(seconds=seconds)
    atlas_stream.disconnect()
    db = client.atlas

    lastTimestamp = 0
    currCollection = None
    lastDownload = None
    lastConnection = None

    allmsm = []
    for msmId in sys.argv[1:]:
        allmsm.append(int(msmId))


    while (datetime.datetime.now()-starttime).seconds < 3600:
        try:
            lastConnection = datetime.datetime.now()
            atlas_stream = AtlasStream()
            atlas_stream.connect()
            # Measurement results
            channel = "atlas_result"
            # Bind function we want to run with every result message received
            atlas_stream.socketIO.on("connect", on_connect)
            atlas_stream.socketIO.on("disconnect", on_disconnect)
            atlas_stream.socketIO.on("reconnect", on_reconnect)
            atlas_stream.socketIO.on("error", on_error)
            atlas_stream.socketIO.on("close", on_close)
            atlas_stream.socketIO.on("connect_error", on_connect_error)
            atlas_stream.socketIO.on("atlas_error", on_atlas_error)
            atlas_stream.socketIO.on("atlas_unsubscribed", on_atlas_unsubscribe)
            # Subscribe to new stream 
            atlas_stream.bind_channel(channel, on_result_response)
            
Exemple #6
0
    def test_stream_probe(self):
        """Unittest for Atlas probe connections request."""
        if self.server == "":
            raise SkipTest

        results = []

        def on_result_response(*args):
            """
            Function that will be called every time we receive a new event.
            Args is a tuple, so you should use args[0] to access the real message.
            """
            results.append(args[0])

        atlas_stream = AtlasStream()
        atlas_stream.connect()
        channel = "probe"
        atlas_stream.bind_channel(channel, on_result_response)
        stream_parameters = {"enrichProbes": True}
        atlas_stream.start_stream(stream_type="probestatus", **stream_parameters)
        atlas_stream.timeout(seconds=30)
        atlas_stream.disconnect()
        self.assertNotEqual(results, [])
Exemple #7
0
    def run(self):
        """
        Function which adds a new stream.
        """
        atlas_stream = AtlasStream()
        atlas_stream.connect()

        atlas_stream.bind_channel(
            self.channel,
            self.on_result_response,
        )
        atlas_stream.start_stream(
            stream_type=self.stream_type,
            **self.parameters
        )

        atlas_stream.timeout()
        atlas_stream.disconnect()
def stream_results(v4_nets, v6_nets, seconds=None, filters={}):
    """Set up the atlas stream for all traceroute results"""
    atlas_stream = AtlasStream()
    atlas_stream.connect()
    atlas_stream.bind_channel('result', on_result_recieved)
    prefixes = []
    prefixes.extend([net.strip() for net in open(v4_nets).readlines()])
    prefixes.extend([net.strip() for net in open(v6_nets).readlines()])
#     for prefix in prefixes:
#         stream_parameters = {"type": "traceroute", "passThroughPrefix": prefix}
#         stream_parameters.update(filters)
#         atlas_stream.start_stream(stream_type="result", **stream_parameters)
    stream_parameters = {"type": "traceroute"}
    stream_parameters.update(filters)
    atlas_stream.start_stream(stream_type="result", **stream_parameters)
    print("Before streaming")
    atlas_stream.timeout(seconds=seconds)
    atlas_stream.disconnect()
    def test_stream_results(self):
        """Unittest for Atlas results request."""
        if self.server == "":
            pytest.skip("No ATLAS_SERVER defined")

        results = []

        def on_result_response(*args):
            """
            Function that will be called every time we receive a new result.
            Args is a tuple, so you should use args[0] to access the real message.
            """
            results.append(args[0])

        atlas_stream = AtlasStream()
        atlas_stream.connect()
        channel = "result"
        atlas_stream.bind_channel(channel, on_result_response)
        stream_parameters = {"msm": 1001}
        atlas_stream.start_stream(stream_type="result", **stream_parameters)
        atlas_stream.timeout(seconds=5)
        atlas_stream.disconnect()
        self.assertNotEqual(results, [])
Exemple #10
0
class ProbesDiscoSensor(Sensor):
    def __init__(self, sensor_service, config):
        super(ProbesDiscoSensor, self).__init__(sensor_service=sensor_service,
                                                config=config)
        self._logger = self.sensor_service.get_logger(
            name=self.__class__.__name__)
        self._probes_state = {}
        self._ases_v4_state = {}
        self._ases_v6_state = {}
        self.atlas_stream = AtlasStream()

    def setup(self):
        r = requests.get(ALL_PROBES_STATE_URL)
        if r.status_code == 200:
            self._create_state_dicts(r.json()["probes"])
            # self._logger.info(self._create_probes_dict(r.json()["probes"]))
            self._logger.info(self._ases_v4_state[3333])
            self._logger.info(self._ases_v6_state[3333])

    def _create_state_dicts(self, probes):
        """
        #         [
        #   *   0           probe.pk,
        #   *   1           probe.asn_v4 if probe.asn_v4 else 0,
        #   *   2           probe.asn_v6 if probe.asn_v6 else 0,
        #   *   3           probe.prb_country_code.code,
        #   *   4           1 if probe.is_anchor else 0,
        #   *   5           1 if probe.prb_public else 0,
        #   *   6          lat,
        #   *   7          lng,
        #   *   8           probe.prefix_v4 if probe.prefix_v4 else 0,
        #   *   9           probe.prefix_v6 if probe.prefix_v6 else 0,
        #   *   10          probe.address_v4 if probe.prb_public and probe.address_v4 else 0,
        #   *   11           probe.address_v6 if probe.prb_public and probe.address_v6 else 0,
        #   *   12          probe.status,
        #   *   13         int(probe.status_since.strftime("%s")) if probe.status_since is not None else None
        #   *  ]
        """
        for p in probes:
            status = STATUS_MAP[p[12]]
            self._probes_state[p[0]] = {
                "prb_id": p[0],
                "asn_v4": p[1],
                "asn_v6": p[2],
                "status": status
            }

            if p[1] and self._ases_v4_state.get(p[1]):
                self._ases_v4_state[p[1]].setdefault(status, set()).add(p[0])
            elif p[1] and p[1] != 0:
                self._ases_v4_state[p[1]] = {status: set([p[0]])}

            if p[2] and self._ases_v6_state.get(p[2]):
                self._ases_v6_state[p[2]].setdefault(status, set()).add(p[0])
            elif p[2] and p[2] != 0:
                self._ases_v6_state[p[1]] = {status: set([p[0]])}

        # return {p[0]: {"prb_id": p[0], "asn_v4": p[1], "asn_v6": p[2], "status": STATUS_MAP[p[12]]} for p in probes}

    def _update_probe_status(self, probe):
        """
        {u'prefix_v4': u'41.74.136.0/21',
        u'first_connected': 1428072503,
        u'is_anchor': False,
        u'status_name': u'Connected',
        u'prefix_v6': None,
        u'status_id': 1,
        u'address_v6': None,
        u'long': -23.6185,
        u'address_v4': None,
        u'country_code': u'CV',
        u'is_public': False,
        u'lat': 15.1075,
        u'asn_v4': 37517,
        u'asn_v6': None,
        u'status_since': 1528289054,
        u'id': 20981,
        u'tags': [u'system-v3', u'system-resolves-a-correctly', u'system-resolves-aaaa-correctly', u'system-ipv4-works', u'system-auto-geoip-country', u'system-ipv4-capable', u'system-ipv4-rfc1918', u'system-ipv4-stable-1d'], u'total_uptime': 80039845},
        u'controller': u'ctr-ams01',
        u'asn': 37517,
        u'prefix': u'165.90.96.0/20',
        u'prb_id': 20981,
        u'type': u'connection',
        u'event': u'disconnect'}
        """
        p_state = self._probes_state.get(probe["prb_id"])
        event = probe["event"]
        prb_id = probe["prb_id"]
        probe_p = probe["probe"]
        asn_v4 = probe["probe"]["asn_v4"]
        asn_v6 = probe["probe"]["asn_v6"]
        if event == "disconnect" and p_state["status"] == "Connected":
            p_state["status"] = "Disconnected"
            if asn_v4:
                self._ases_v4_state[asn_v4].setdefault("Connected",
                                                       set()).remove(prb_id)
                self._ases_v4_state[asn_v4].setdefault("Disconnected",
                                                       set()).add(prb_id)
            if asn_v6:
                self._ases_v6_state["asn_v6"].setdefault("Connected",
                                                         set()).remove(prb_id)
                self._ases_v6_state["asn_v6"].setdefault(
                    "Disconnected", set()).add(prb_id)
        elif event == "connect" and p_state["status"] == "Disconnected":
            p_state["status"] = "Connected"
            if asn_v4:
                self._ases_v4_state["asn_v4"].setdefault(
                    "Disconnected", set()).remove(prb_id)
                self._ases_v4_state["asn_v4"].setdefault("Connected",
                                                         set()).add(prb_id)
            if asn_v6:
                self._ases_v6_state["asn_v6"].setdefault(
                    "Disconnected", set()).remove(prb_id)
                self._ases_v6_state["asn_v6"].setdefault("Connected",
                                                         set()).add(prb_id)

        self._logger.info(p_state)
        self._logger.info(probe["probe"])
        self._logger.info(self._ases_v4_state.get(probe_p.get("asn_v4")))
        self._logger.info(self._ases_v6_state.get(probe_p.get("asn_v6")))
        self._probes_state[probe["prb_id"]] = p_state

    def on_result_response(self, *args):
        """
        Process the result from the Atlas Ping Probe
        """
        probe_update = args[0]
        prb_id = probe_update['prb_id']
        event = probe_update['event']
        self._logger.info("Received a probe response")
        self._logger.info(probe_update)
        self._update_probe_status(probe_update)

        if event == "connect" or event == 'disconnect':
            trigger = 'atlas.probes_disco'
            payload = self._probes_state[prb_id]
            trace_tag = "{prb_id}-{event}-{timestamp}".format(
                prb_id=prb_id,
                event=event,
                timestamp=probe_update['timestamp'])

        self.sensor_service.dispatch(trigger=trigger,
                                     payload=payload,
                                     trace_tag=trace_tag)

    def run(self):
        stream_parameters = {"enrichProbes": True}
        self.atlas_stream.connect()
        channel = "atlas_probestatus"
        # Bind function we want to run with every result message received
        self.atlas_stream.bind_channel(channel, self.on_result_response)
        self.atlas_stream.start_stream(stream_type="probestatus",
                                       **stream_parameters)
        self.atlas_stream.timeout()

    def cleanup(self):
        self.atlas_stream.disconnect()

    def add_trigger(self, trigger):
        # This method is called when trigger is created
        pass

    def update_trigger(self, trigger):
        # This method is called when trigger is updated
        pass

    def remove_trigger(self, trigger):
        # This method is called when trigger is deleted
        pass
Exemple #11
0
 def setup(self):
     self.atlas_stream = AtlasStream()
Exemple #12
0
class PingStreamingSensor(Sensor):
    def __init__(self, sensor_service, config):
        super(PingStreamingSensor,
              self).__init__(sensor_service=sensor_service, config=config)
        self._logger = self.sensor_service.get_logger(
            name=self.__class__.__name__)

    def setup(self):
        self.atlas_stream = AtlasStream()

    def on_result_response(self, *args):
        """
        Process the result from the Atlas Ping Probe
        """
        self._logger.info(
            "Received a probe response for measurement {}".format(
                PING_PROBE_ID))

        round_trip_times = self._get_round_trip_times(args)
        if len(round_trip_times) < MIN_SAMPLE_COUNT:
            self._logger.info(
                "Not enough samples in this result, sample count = {}".format(
                    len(round_trip_times)))
            return

        percentile = self._rtt_percentile(round_trip_times)
        if percentile > TARGET_RTT_PERCENTILE:
            self._dispatch_exceed_rtt_trigger(percentile)

    def _get_round_trip_times(self, args):
        round_trip_times = []
        for result in args[0]['result']:
            if 'result' in result.keys():
                for probe_result in result['result']:
                    try:
                        round_trip_times.append(probe_result['rtt'])
                    except KeyError:
                        self._logger.info("No rtt data in this result")
        return round_trip_times

    def _rtt_percentile(self, round_trip_times):
        self._logger.info("RTT samples: {}".format(round_trip_times))
        rtt_array = np.array(round_trip_times)
        percentile = np.percentile(rtt_array, PERCENTILE)
        self._logger.info("RTT p{}: {}".format(PERCENTILE, percentile))

        return percentile

    def _dispatch_exceed_rtt_trigger(self, percentile):
        self._logger.info(
            "Target rtt p{} of {}s exceeded, rtt p{} = {}s".format(
                PERCENTILE, TARGET_RTT_PERCENTILE, PERCENTILE, percentile))
        trigger = "atlas.rtt_p{}_exceeded".format(PERCENTILE)
        payload = {
            'percentile': PERCENTILE,
            'rtt': percentile,
        }
        self._sensor_service.dispatch(trigger=trigger, payload=payload)

    def run(self):
        stream_parameters = {"msm": 5001}
        self.atlas_stream.connect()
        channel = "atlas_result"
        # Bind function we want to run with every result message received
        self.atlas_stream.bind_channel(channel, self.on_result_response)
        self.atlas_stream.start_stream(stream_type="result",
                                       **stream_parameters)
        self.atlas_stream.timeout()

    def cleanup(self):
        self._logger.info("Good bye cruel world...")
        self.atlas_stream.disconnect()

    def add_trigger(self, trigger):
        # This method is called when trigger is created
        pass

    def update_trigger(self, trigger):
        # This method is called when trigger is updated
        pass

    def remove_trigger(self, trigger):
        # This method is called when trigger is deleted
        pass
Exemple #13
0
    db = client.atlas

    lastTimestamp = 0
    currCollection = None
    lastDownload = None
    lastConnection = None

    allmsm = []
    for msmId in sys.argv[1:]:
        allmsm.append(int(msmId))


    while (datetime.datetime.now()-starttime).seconds < 3600:
        try:
            lastConnection = datetime.datetime.now()
            atlas_stream = AtlasStream()
            atlas_stream.connect()
            # Measurement results
            channel = "atlas_result"
            # Bind function we want to run with every result message received
            atlas_stream.socketIO.on("connect", on_connect)
            atlas_stream.socketIO.on("disconnect", on_disconnect)
            atlas_stream.socketIO.on("reconnect", on_reconnect)
            atlas_stream.socketIO.on("error", on_error)
            atlas_stream.socketIO.on("close", on_close)
            atlas_stream.socketIO.on("connect_error", on_connect_error)
            atlas_stream.socketIO.on("atlas_error", on_atlas_error)
            atlas_stream.socketIO.on("atlas_unsubscribed", on_atlas_unsubscribe)
            # Subscribe to new stream 
            atlas_stream.bind_channel(channel, on_result_response)
            
    def test_stream_request(self):
        """Unittest for Atlas results request"""
        if self.server == "":
            raise SkipTest

        results = []

        def on_result_response(*args):
            """
            Function that will be called every time we receive a new result.
            Args is a tuple, so you should use args[0] to access the real message.
            """
            results.append(args[0])

        atlas_stream = AtlasStream()
        atlas_stream.connect()
        stream_type = "result"
        atlas_stream.bind_stream(stream_type, on_result_response)
        stream_parameters = {"msm": 1001}
        atlas_stream.start_stream(stream_type=stream_type, **stream_parameters)
        atlas_stream.timeout(seconds=5)
        atlas_stream.disconnect()
        self.assertNotEqual(results, [])
from __future__ import absolute_import
from ripe.atlas.cousteau import AtlasStream


def on_result_response(*args):
    print args[0]


atlas_stream = AtlasStream()
atlas_stream.timeout(seconds=5)
atlas_stream.connect()
# Measurement results
channel = "result"
# Bind function we want to run with every result message received
atlas_stream.bind_channel(channel, on_result_response)
# Subscribe to new stream for 1001 measurement results
stream_parameters = {"msm": 1001}
atlas_stream.start_stream(stream_type="result", **stream_parameters)

atlas_stream.disconnect()
Exemple #16
0
    "--only",
    action='append',
    default=None,
    help="set a filter to allow check a specifity key and value in DNS. IN,A")
args = parser.parse_args()

if args.debug:
    defaultloglevel = logging.DEBUG

logging.basicConfig(level=defaultloglevel)

if args.only is not None:
    filters = []
    for v in args.only:
        filters.append(v.split(","))
    logging.info("Filter applied: {}".format(filters))

atlas_stream = AtlasStream()
atlas_stream.connect()

channel = "atlas_result"
#channel = "atlas_subscribe"
atlas_stream.bind_channel(channel, on_result_response)

stream_parameters = {"type": "dns"}
#stream_parameters = {"startTime": 1489568000, "stopTime": 1489569100, "msm": 30001}
atlas_stream.start_stream(stream_type="result", **stream_parameters)

atlas_stream.timeout(seconds=args.timeout)
atlas_stream.disconnect()
    def stream(self):
        def on_result_response(result, *args):
            logging.warning("on_result_response fired")
            self.responses.append(result)
            self.probes_received += 1
            if self.probes_received >= self.probes_limit:
                print "Raise ProbesLimitExceeded()"
                raise ProbesLimitExceeded()

        stream = AtlasStream()
        stream.connect()
        stream.bind_channel(self.channel, on_result_response)

        try:
            stream.start_stream(stream_type=self.type, msm=self.msm)
            stream.timeout(self.timeout)
        except (KeyboardInterrupt, ProbesLimitExceeded) as e:
            stream.disconnect()

        return self.responses
Exemple #18
0
    def run(self):
        """
        Function which adds a new stream.
        """
        atlas_stream = AtlasStream()
        atlas_stream.connect()

        atlas_stream.bind_channel(
            self.channel,
            self.on_result_response,
        )
        atlas_stream.start_stream(
            stream_type=self.stream_type,
            **self.parameters
        )

        atlas_stream.timeout()
        atlas_stream.disconnect()
        a = args[0]
        t = int(time.time())

        if t != lastsec:
                be_verbose(2, "%d results ..." % (nrecs-lastrec))
                lastrec = nrecs
                lastsec = t


        a['pdb_received'] = t
        a['pdb_source'] = 1
        be_verbose(3,json.dumps(a))
        buf.append(a)
        nrecs +=1

atlas_stream = AtlasStream()

if cmdl.server:
        atlas_stream.iosocket_server = cmdl.server                      # override stream server address

atlas_stream.connect()  

channel = "result"
atlas_stream.bind_channel(channel, on_result_response)                  # establish callback

stream_parameters = {"type": "traceroute"}
atlas_stream.start_stream(stream_type="result", **stream_parameters)    # start streaming

be_verbose(1, "stream starting ...")
atlas_stream.timeout(seconds=cmdl.secs)                                 # this really starts it ....
    def run_stream(self, probes_filter):
        logger.info(" - using real-time results streaming")

        self.ensure_streaming_enabled(MeasurementProcessingError)

        try:
            atlas_stream = AtlasStream()
            atlas_stream.connect()
            atlas_stream.bind_channel("result", self.on_result_response)
            stream_params = {"msm": self.msm_id}
        except Exception as e:
            raise MeasurementProcessingError(
                "Error while creating the stream: {}".format(str(e))
            )

        self.results_queue = Queue()
        thread = MonitorResultsThread(self, probes_filter)

        try:
            thread.start()

            atlas_stream.start_stream(stream_type="result",
                                      **stream_params)
            atlas_stream.timeout(seconds=self.stream_timeout)
            atlas_stream.disconnect()
        except:
            try:
                atlas_stream.disconnect()
            except:
                pass
        finally:
            try:
                atlas_stream.disconnect()
            except:
                pass
            self.exit_thread = True
            thread.join(timeout=10)
class ProbesDiscoSensor(Sensor):
    def __init__(self, sensor_service, config):
        super(ProbesDiscoSensor, self).__init__(sensor_service=sensor_service,
                                                config=config)
        self._logger = self.sensor_service.get_logger(
            name=self.__class__.__name__)
        self._probes_state = {}
        self._ases_v4_state = {}
        self._ases_v6_state = {}
        self.atlas_stream = AtlasStream()

    def setup(self):
        r = requests.get(ALL_PROBES_STATE_URL)
        if r.status_code == 200:
            self._create_state_dicts(r.json()["probes"])
            # self._logger.info(self._create_probes_dict(r.json()["probes"]))

    def _create_state_dicts(self, probes):
        """
        #         [
        #   *   0           probe.pk,
        #   *   1           probe.asn_v4 if probe.asn_v4 else 0,
        #   *   2           probe.asn_v6 if probe.asn_v6 else 0,
        #   *   3           probe.prb_country_code.code,
        #   *   4           1 if probe.is_anchor else 0,
        #   *   5           1 if probe.prb_public else 0,
        #   *   6          lat,
        #   *   7          lng,
        #   *   8           probe.prefix_v4 if probe.prefix_v4 else 0,
        #   *   9           probe.prefix_v6 if probe.prefix_v6 else 0,
        #   *   10          probe.address_v4 if probe.prb_public and probe.address_v4 else 0,
        #   *   11           probe.address_v6 if probe.prb_public and probe.address_v6 else 0,
        #   *   12          probe.status,
        #   *   13         int(probe.status_since.strftime("%s")) if probe.status_since is not None else None
        #   *  ]
        """
        for p in probes:
            status = STATUS_MAP[p[12]]
            self._probes_state[p[0]] = {
                "prb_id": p[0],
                "asn_v4": p[1],
                "asn_v6": p[2],
                "status": status
            }

            if p[1] and self._ases_v4_state.get(p[1]):
                self._ases_v4_state[p[1]].setdefault(status, set()).add(p[0])
            elif p[1] and p[1] != 0:
                self._ases_v4_state[p[1]] = {status: set([p[0]])}

            if p[2] and self._ases_v6_state.get(p[2]):
                self._ases_v6_state[p[2]].setdefault(status, set()).add(p[0])
            elif p[2] and p[2] != 0:
                self._ases_v6_state[p[1]] = {status: set([p[0]])}

        for asn_v4 in self._ases_v4_state:
            # self._logger.info(asn_v4)
            # self._logger.info(self._ases_v4_state)
            self._update_vstate(self._ases_v4_state[asn_v4])

        for asn_v6 in self._ases_v6_state:
            self._update_vstate(self._ases_v6_state[asn_v6])

        # return {p[0]: {"prb_id": p[0], "asn_v4": p[1], "asn_v6": p[2], "status": STATUS_MAP[p[12]]} for p in probes}

    def _update_vstate(self, vstate, prb_id=None, event=None):
        def _update_disco_list(prb_id, event):
            if event == "connect":
                vstate.setdefault("Disconnected", set()).discard(prb_id)
                vstate.setdefault("Connected", set()).add(prb_id)
                try:
                    del vstate.get("LastDisconnected", {})[prb_id]
                except KeyError:
                    pass
                self._logger.info(vstate.get("LastDisconnected"))

                for p in vstate.get("LastDisconnected", {}):
                    if vstate["LastDisconnected"][p] < datetime.now(
                    ) - timedelta(minutes=30):
                        try:
                            del vstate.get("LastDisconnected", {})[p]
                        except KeyError:
                            pass

            if event == "disconnect":
                vstate.setdefault("Connected", set()).discard(prb_id)
                vstate.setdefault("Disconnected", set()).add(prb_id)

                # add the prb_id to the disconnected sliding window
                vstate.setdefault("LastDisconnected",
                                  {}).update({prb_id: datetime.now()})
                self._logger.info(vstate["LastDisconnected"])

                # check for late disconnected and delete those from the sliding window
                for p in vstate["LastDisconnected"]:
                    if vstate["LastDisconnected"][p] < datetime.now(
                    ) - timedelta(minutes=30):
                        try:
                            del vstate.get("LastDisconnected", {})[p]
                        except KeyError:
                            pass

        if prb_id:
            _update_disco_list(prb_id, event)

        perc = 100.0 * float(len(vstate.get("Connected", []))) / float(
            len(vstate.get("Connected", [])) +
            len(vstate.get("Disconnected", [])))

        vstate["connection_percentage"] = perc
        # self._logger.info('new perc: {}'.format(perc))

        return perc

    def _update_probe_status(self, probe):
        """
        {u'prefix_v4': u'41.74.136.0/21',
        u'first_connected': 1428072503,
        u'is_anchor': False,
        u'status_name': u'Connected',
        u'prefix_v6': None,
        u'status_id': 1,
        u'address_v6': None,
        u'long': -23.6185,
        u'address_v4': None,
        u'country_code': u'CV',
        u'is_public': False,
        u'lat': 15.1075,
        u'asn_v4': 37517,
        u'asn_v6': None,
        u'status_since': 1528289054,
        u'id': 20981,
        u'tags': [u'system-v3', u'system-resolves-a-correctly', u'system-resolves-aaaa-correctly', u'system-ipv4-works', u'system-auto-geoip-country', u'system-ipv4-capable', u'system-ipv4-rfc1918', u'system-ipv4-stable-1d'], u'total_uptime': 80039845},
        u'controller': u'ctr-ams01',
        u'asn': 37517,
        u'prefix': u'165.90.96.0/20',
        u'prb_id': 20981,
        u'type': u'connection',
        u'event': u'disconnect'}
        """
        p_state = self._probes_state.get(probe["prb_id"])
        event = probe["event"]
        prb_id = probe["prb_id"]
        asn_v4 = probe["probe"]["asn_v4"]
        asn_v6 = probe["probe"]["asn_v6"]
        if event == "disconnect":  # and p_state["status"] == "Connected":
            p_state["status"] = "Disconnected"
            if asn_v4:
                self._update_vstate(self._ases_v4_state[asn_v4], prb_id, event)
            if asn_v6:
                self._update_vstate(self._ases_v6_state[asn_v6], prb_id, event)
        elif event == "connect":  # and p_state["status"] == "Disconnected":
            p_state["status"] = "Connected"
            if asn_v4:
                self._update_vstate(self._ases_v4_state[asn_v4], prb_id, event)
            if asn_v6:
                self._update_vstate(self._ases_v6_state[asn_v6], prb_id, event)
        else:
            self._logger.info("probe {p} in wrong state".format(p=prb_id))

        if (event == "disconnect" and p_state["status"] == "Disconnected") or (
                event == "connect" and p_state["status"] == "Connected"):
            self._logger.info("NO STATE CHANGE for probe {}".format(prb_id))
        self._logger.info(p_state)
        self._logger.info(probe["probe"])

        self._probes_state[probe["prb_id"]] = p_state

    def on_result_response(self, *args):
        """
        Process the result from the Atlas Ping Probe
        """
        probe_update = args[0]
        prb_id = probe_update['prb_id']
        event = probe_update['event']
        asn_v4 = probe_update['probe']["asn_v4"]
        asn_v6 = probe_update['probe']["asn_v6"]
        curr_asn_v4_conn = self._ases_v4_state.get(
            asn_v4, {}).get('connection_percentage')
        curr_asn_v6_conn = self._ases_v6_state.get(
            asn_v6, {}).get('connection_percentage')
        self._logger.info(
            "Received a probe update for probe {prb_id} (asn v4:{asn_v4}, v6: {asn_v6}), event: \"{event}\""
            .format(prb_id=prb_id, event=event, asn_v4=asn_v4, asn_v6=asn_v6))
        # self._logger.info(probe_update)
        self._update_probe_status(probe_update)
        new_asn_v4_conn = self._ases_v4_state.get(
            asn_v4, {}).get('connection_percentage')
        new_asn_v6_conn = self._ases_v6_state.get(
            asn_v6, {}).get('connection_percentage')

        # Evaluate single probe disco
        if event == "connect" or event == 'disconnect':
            trigger = 'atlas.probes_disco'
            payload = {
                event:
                "probe with id {prb_id} {event}ed.".format(prb_id=prb_id,
                                                           event=event)
            }
            trace_tag = "{prb_id}-{event}-{timestamp}".format(
                prb_id=prb_id,
                event=event,
                timestamp=probe_update['timestamp'])

            self.sensor_service.dispatch(trigger=trigger,
                                         payload=payload,
                                         trace_tag=trace_tag)

        # Evaluate ASes disco
        if event == "disconnect":
            trigger = 'atlas.probes_disco'

            # AS presence goes below 50 percent
            if curr_asn_v4_conn >= 50.0:
                if new_asn_v4_conn < 50.0:
                    self._logger.info(
                        'AWAS! asn_v4 {asn_v4} less than 50 percent connected!'
                        .format(asn_v4=asn_v4))
                    self.sensor_service.dispatch(
                        trigger=trigger,
                        payload={
                            "event":
                            'AWAS! asn_v4 {asn_v4} less than 50 percent connected!'
                            .format(asn_v4=asn_v4)
                        },
                        trace_tag="{asn_v4}-lessthan-{timestamp}".format(
                            asn_v4=asn_v4,
                            timestamp=probe_update['timestamp']))
                elif new_asn_v4_conn - curr_asn_v4_conn <= 19.0:
                    self._logger.info(
                        'NO! asn_v4 {asn_v4} going down significantly'.format(
                            asn_v4=asn_v4))
                    self.sensor_service.dispatch(
                        trigger=trigger,
                        payload={
                            "event":
                            'NO! asn_v4 {asn_v4} going down significantly'.
                            format(asn_v4=asn_v4)
                        },
                        trace_tag="{asn_v4}-down-{timestamp}".format(
                            asn_v4=asn_v4,
                            timestamp=probe_update['timestamp']))
                if curr_asn_v6_conn >= 50.0 and new_asn_v6_conn < 50.0:
                    self._logger.info(
                        'AWAS! asn_v6 {asn_v6} less than 50 percent connected!'
                        .format(asn_v6=asn_v6))
                    self.sensor_service.dispatch(
                        trigger=trigger,
                        payload={
                            "event":
                            'AWAS! asn_v6 {asn_v6} less than 50 percent connected!'
                            .format(asn_v6=asn_v6)
                        },
                        trace_tag="{asn_v6}-lessthan-{timestamp}".format(
                            asn_v4=asn_v4,
                            timestamp=probe_update['timestamp']))

                elif new_asn_v6_conn and curr_asn_v6_conn and new_asn_v6_conn - curr_asn_v6_conn <= 19.0:
                    self._logger.info(
                        'NO! asn_v6 {asn_v6} going down significantly'.format(
                            asn_v6=asn_v6))
                    self.sensor_service.dispatch(
                        trigger=trigger,
                        payload={
                            "event":
                            'NO! asn_v6 {asn_v6} going down significantly'.
                            format(asn_v6=asn_v6)
                        },
                        trace_tag="{asn_v6}-down-{timestamp}")

            # no AS presence
            if curr_asn_v4_conn > 1.0 and new_asn_v4_conn < 1.0:
                self._logger.info(
                    'as{asn_v4} completely offline'.format(asn_v4=asn_v4))
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'as{asn_v4} went completely offline'.format(
                            asn_v4=asn_v4)
                    },
                    trace_tag="{asn_v4}-offline-{timestamp}".format(
                        asn_v4=asn_v4, timestamp=probe_update['timestamp']))
            if curr_asn_v6_conn > 1.0 and new_asn_v6_conn < 1.0:
                self._logger.info(
                    '{asn_v6} went completely offline'.format(asn_v6=asn_v6))
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'as{asn_v6} went completely offline'.format(
                            asn_v6=asn_v6)
                    },
                    trace_tag="{asn_v6}-offline-{timestamp}".format(
                        asn_v6=asn_v6, timestamp=probe_update['timestamp']))

            # AS picks up
            if new_asn_v4_conn and curr_asn_v4_conn and new_asn_v4_conn - curr_asn_v4_conn > 19.0:
                self._logger.info(
                    'YEAH! as{asn_v4} seeing significant uptake in online probes'
                    .format(asn_v4=asn_v4))
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'YEAH! as{asn_v4} seeing significant uptake in online probes'
                        .format(asn_v4=asn_v4)
                    },
                    trace_tag="{asn_v4}-uptake-{timestamp}".format(
                        asn_v4=asn_v4, timestamp=probe_update['timestamp']))
            if new_asn_v6_conn and curr_asn_v6_conn and new_asn_v6_conn - curr_asn_v6_conn > 19.0:
                self._logger.info(
                    'YEAH! as{asn_v6} seeing significant uptake in online probes'
                    .format(asn_v6=asn_v6))
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'YEAH! as{asn_v6} seeing significant uptake in online probes'
                        .format(asn_v6=asn_v6)
                    },
                    trace_tag="{asn_v6}-uptake-{timestamp}".format(
                        asn_v6=asn_v6, timestamp=probe_update['timestamp']))

            # Probes going down fast
            if len(
                    self._ases_v4_state.get(asn_v4,
                                            {}).get('LastDisconnected')) >= 3:
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'OMG! as{asn_v4} going down fast now'.format(
                            asn_v4=asn_v4)
                    },
                    trace_tag="{asn_v4}-downfast-{timestamp}".format(
                        asn_v4=asn_v4, timestamp=probe_update['timestamp']))
            if len(
                    self._ases_v6_state.get(asn_v6,
                                            {}).get('LastDisconnected')) >= 3:
                self.sensor_service.dispatch(
                    trigger=trigger,
                    payload={
                        "event":
                        'OMG! as{asn_v6} going down fast now'.format(
                            asn_v6=asn_v6)
                    },
                    trace_tag="{asn_v6}-downfast-{timestamp}".format(
                        asn_v4=asn_v6, timestamp=probe_update['timestamp']))

        self._logger.info(
            'connection percentage for asn_v4 {asn_v4} {old} -> {new}'.format(
                asn_v4=asn_v4, old=curr_asn_v4_conn, new=new_asn_v4_conn))
        self._logger.info(
            'connection percentage for asn_v6 {asn_v6} {old} -> {new}'.format(
                asn_v6=asn_v6, old=curr_asn_v6_conn, new=new_asn_v6_conn))
        self._logger.info('disconnect bin asn v4: {}'.format(
            self._ases_v4_state.get(asn_v4, {}).get('LastDisconnected')))
        self._logger.info('disconnect bin asn v6: {}'.format(
            self._ases_v6_state.get(asn_v6, {}).get('LastDisconnected')))

    def run(self):
        stream_parameters = {"enrichProbes": True}
        self.atlas_stream.connect()
        channel = "atlas_probestatus"
        # Bind function we want to run with every result message received
        self.atlas_stream.bind_channel(channel, self.on_result_response)
        self.atlas_stream.start_stream(stream_type="probestatus",
                                       **stream_parameters)
        self.atlas_stream.timeout()

    def cleanup(self):
        self.atlas_stream.disconnect()

    def add_trigger(self, trigger):
        # This method is called when trigger is created
        pass

    def update_trigger(self, trigger):
        # This method is called when trigger is updated
        pass

    def remove_trigger(self, trigger):
        # This method is called when trigger is deleted
        pass
Exemple #22
0
    a = args[0]
    t = int(time.time())

    if t != lastsec:
        be_verbose(2, "%d results ..." % (nrecs - lastrec))
        lastrec = nrecs
        lastsec = t

    a['pdb_received'] = t
    a['pdb_source'] = 1
    be_verbose(3, json.dumps(a))
    buf.append(a)
    nrecs += 1


atlas_stream = AtlasStream()

if cmdl.server:
    atlas_stream.iosocket_server = cmdl.server  # override stream server address

atlas_stream.connect()

channel = "result"
atlas_stream.bind_channel(channel, on_result_response)  # establish callback

stream_parameters = {"type": "traceroute"}
atlas_stream.start_stream(stream_type="result",
                          **stream_parameters)  # start streaming

be_verbose(1, "stream starting ...")
atlas_stream.timeout(seconds=cmdl.secs)  # this really starts it ....
Exemple #23
0
def getLive(allmsm=[7000]):

    #Start time of this script, we'll try to get it working for 1 hour
    starttime = datetime.datetime.now()


    lastTimestamp = 0
    currCollection = None
    lastDownload = None
    lastConnection = None


    while (datetime.datetime.now()-starttime).seconds < 3600:
        try:
            lastConnection = datetime.datetime.now()
            atlas_stream = AtlasStream()
            atlas_stream.connect()
            # Measurement results
            channel = "atlas_result"
            # Bind function we want to run with every result message received
            atlas_stream.socketIO.on("connect", on_connect)
            atlas_stream.socketIO.on("disconnect", on_disconnect)
            atlas_stream.socketIO.on("reconnect", on_reconnect)
            atlas_stream.socketIO.on("error", on_error)
            atlas_stream.socketIO.on("close", on_close)
            atlas_stream.socketIO.on("connect_error", on_connect_error)
            atlas_stream.socketIO.on("atlas_error", on_atlas_error)
            atlas_stream.socketIO.on("atlas_unsubscribed", on_atlas_unsubscribe)
            # Subscribe to new stream 
            atlas_stream.bind_channel(channel, on_result_response)
            
            for msm in allmsm:
                # stream_parameters = {"type": "traceroute", "buffering":True, "equalsTo":{"af": 4},   "msm": msm}
                stream_parameters = { "buffering":True, "equalsTo":{"af": 4},   "msm": msm}
                atlas_stream.start_stream(stream_type="result", **stream_parameters)

            # Run for 1 hour
            #print "start stream for msm ids: %s" % allmsm
            atlas_stream.timeout(seconds=3600-(datetime.datetime.now()-starttime).seconds)
            # Shut down everything
            atlas_stream.disconnect()

        except ConnectionError as e:
            now = datetime.datetime.utcnow()
            #print "%s: %s" % (now, e)
            #print "last download: %s" % lastDownload
            #print "last connection: %s" % lastConnection
            atlas_stream.disconnect()

            # Wait a bit if the connection was made less than a minute ago
            if lastConnection + datetime.timedelta(60) > now:
                time.sleep(60) 
            #print "Go back to the loop and reconnect"

        except Exception as e: 
            save_note = "Exception dump: %s : %s.\nCommand: %s" % (type(e).__name__, e, sys.argv)
            exception_fp = open("dump_%s.err" % datetime.datetime.now(), "w")
            exception_fp.write(save_note) 
            sys.exit()
def getLive(allmsm=[7000]):

    #Start time of this script, we'll try to get it working for 1 hour
    starttime = datetime.datetime.now()

    lastTimestamp = 0
    currCollection = None
    lastDownload = None
    lastConnection = None

    while (datetime.datetime.now() - starttime).seconds < 3600:
        try:
            lastConnection = datetime.datetime.now()
            atlas_stream = AtlasStream()
            atlas_stream.connect()
            # Measurement results
            channel = "atlas_result"
            # Bind function we want to run with every result message received
            atlas_stream.socketIO.on("connect", on_connect)
            atlas_stream.socketIO.on("disconnect", on_disconnect)
            atlas_stream.socketIO.on("reconnect", on_reconnect)
            atlas_stream.socketIO.on("error", on_error)
            atlas_stream.socketIO.on("close", on_close)
            atlas_stream.socketIO.on("connect_error", on_connect_error)
            atlas_stream.socketIO.on("atlas_error", on_atlas_error)
            atlas_stream.socketIO.on("atlas_unsubscribed",
                                     on_atlas_unsubscribe)
            # Subscribe to new stream
            atlas_stream.bind_channel(channel, on_result_response)

            for msm in allmsm:
                # stream_parameters = {"type": "traceroute", "buffering":True, "equalsTo":{"af": 4},   "msm": msm}
                stream_parameters = {
                    "buffering": True,
                    "equalsTo": {
                        "af": 4
                    },
                    "msm": msm
                }
                atlas_stream.start_stream(stream_type="result",
                                          **stream_parameters)

            # Run for 1 hour
            #print "start stream for msm ids: %s" % allmsm
            atlas_stream.timeout(seconds=3600 -
                                 (datetime.datetime.now() - starttime).seconds)
            # Shut down everything
            atlas_stream.disconnect()

        except ConnectionError as e:
            now = datetime.datetime.utcnow()
            #print "%s: %s" % (now, e)
            #print "last download: %s" % lastDownload
            #print "last connection: %s" % lastConnection
            atlas_stream.disconnect()

            # Wait a bit if the connection was made less than a minute ago
            if lastConnection + datetime.timedelta(60) > now:
                time.sleep(60)
            #print "Go back to the loop and reconnect"

        except Exception as e:
            save_note = "Exception dump: %s : %s.\nCommand: %s" % (
                type(e).__name__, e, sys.argv)
            exception_fp = open("dump_%s.err" % datetime.datetime.now(), "w")
            exception_fp.write(save_note)
            sys.exit()
    def run(self):
        atlas_stream = AtlasStream()
        atlas_stream.connect()
        # Measurement results
        channel = "result"
        # Bind function we want to run with every result message received
        atlas_stream.bind_channel(channel, self.on_result_response)
        # Subscribe to new stream for 1001 measurement results
        #stream_parameters = {"msm": ID_list,"startTime": ts}
        #ID_list.append(1001)
        #print self.id

        #ID_list = map(int, self.id)
        #ID_list=(1001)

        #ID_list.append('1001')

        stream_parameters = {"msm":self.id} #,"startTime": self.ts
        atlas_stream.start_stream(stream_type="result",**stream_parameters)

        # Probe's connection status results
        channel = "probe"
        atlas_stream.bind_channel(channel, self.on_result_response)
        #stream_parameters = {"prb": (12605,13663,850),"startTime": ts,"enrichProbes": True}
        #,"start_time":1456948500
        stream_parameters = {} #,"startTime": self.ts
        atlas_stream.start_stream(stream_type="probestatus", **stream_parameters)

        # Timeout all subscriptions after 5 secs. Leave seconds empty for no timeout.
        # Make sure you have this line after you start *all* your streams
        atlas_stream.timeout(seconds=1200)
        # Shut down everything
        atlas_stream.disconnect()
Exemple #26
0
    def stream(self, renderer_name, kind, pk):

        renderer = Renderer.get_renderer(name=renderer_name, kind=kind)()

        def on_result_response(result, *args):
            sys.stdout.write(renderer.on_result(Result.get(
                result,
                on_error=Result.ACTION_IGNORE,
                on_malformation=Result.ACTION_IGNORE
            )))
            self.captured += 1
            if self.capture_limit and self.captured >= self.capture_limit:
                raise CaptureLimitExceeded()

        stream = AtlasStream()
        stream.connect()

        stream.bind_channel("result", on_result_response)
        try:
            stream.start_stream(stream_type="result", msm=pk)
            stream.timeout(self.timeout)
        except (KeyboardInterrupt, CaptureLimitExceeded) as e:
            stream.disconnect()
            raise e
def stream():
    stream = AtlasStream()
    stream.connect()

    try:
        stream.bind_channel("result", got_result)
        for meas in MeasurementType:
            #print(meas)
            #print(wanted_measurements)
            if not do_all and meas not in wanted_measurements:
                print("skipping")
                continue
            stream_parameters = {
                "msm": meas,
                "type": "dns",
                "enrichProbes": True,
                "sendBacklog": True,
            }
            stream.start_stream(stream_type="result", **stream_parameters)
            stream.timeout(5)

        while True:
            import time
            time.sleep(0.1)

    except Exception as ex:
        _LOGGER.warning("Got ex: %s" % ex)
        raise Exception() from ex
    finally:
        stream.disconnect()
    def stream(self, renderer_name, kind, pk):

        renderer = Renderer.get_renderer(name=renderer_name, kind=kind)()

        def on_result_response(result, *args):
            sys.stdout.write(
                renderer.on_result(
                    Result.get(result,
                               on_error=Result.ACTION_IGNORE,
                               on_malformation=Result.ACTION_IGNORE)))
            self.captured += 1
            if self.capture_limit and self.captured >= self.capture_limit:
                raise CaptureLimitExceeded()

        stream = AtlasStream()
        stream.connect()

        stream.bind_stream("result", on_result_response)
        try:
            stream.start_stream(stream_type="result", msm=pk)
            stream.timeout(self.timeout)
        except (KeyboardInterrupt, CaptureLimitExceeded) as e:
            stream.disconnect()
            raise e
    def stream(self, renderer_name, kind, pk):

        renderer = Renderer.get_renderer(name=renderer_name, kind=kind)()
        sys.stdout.write(renderer.on_start())

        def on_result_response(result, *args):
            sys.stdout.write(renderer.on_result(Result.get(result)))
            self.captured += 1
            if self.capture_limit and self.captured >= self.capture_limit:
                raise CaptureLimitExceeded()

        stream = AtlasStream()
        stream.connect()

        stream.bind_stream("result", on_result_response)
        try:
            stream.start_stream(stream_type="result", msm=pk)
            stream.timeout()
        except (KeyboardInterrupt, CaptureLimitExceeded), e:
            stream.disconnect()
            sys.stdout.write(renderer.on_finish())
            raise e