def stream(self, renderer_name, arguments, kind, pk): renderer = Renderer.get_renderer(name=renderer_name, kind=kind)( arguments=arguments ) def on_result_response(result, *args): sys.stdout.write(renderer.on_result(Result.get( result, on_error=Result.ACTION_IGNORE, on_malformation=Result.ACTION_IGNORE ))) self.captured += 1 if self.capture_limit and self.captured >= self.capture_limit: raise CaptureLimitExceeded() stream = AtlasStream() stream.connect() stream.bind_channel("atlas_result", on_result_response) try: stream.start_stream(stream_type="result", msm=pk) stream.timeout(self.timeout) except (KeyboardInterrupt, CaptureLimitExceeded) as e: stream.disconnect() raise e
def stream(): stream = AtlasStream() stream.connect() try: stream.bind_channel("result", got_result) for meas in MeasurementType: #print(meas) #print(wanted_measurements) if not do_all and meas not in wanted_measurements: print("skipping") continue stream_parameters = { "msm": meas, "type": "dns", "enrichProbes": True, "sendBacklog": True, } stream.start_stream(stream_type="result", **stream_parameters) stream.timeout(5) while True: import time time.sleep(0.1) except Exception as ex: _LOGGER.warning("Got ex: %s" % ex) raise Exception() from ex finally: stream.disconnect()
def __init__(self, sensor_service, config): super(ProbesDiscoSensor, self).__init__(sensor_service=sensor_service, config=config) self._logger = self.sensor_service.get_logger( name=self.__class__.__name__) self._probes_state = {} self._ases_v4_state = {} self._ases_v6_state = {} self.atlas_stream = AtlasStream()
def stream_results(v4_nets, v6_nets, seconds=None, filters={}): """Set up the atlas stream for all traceroute results""" atlas_stream = AtlasStream() atlas_stream.connect() atlas_stream.bind_channel('result', on_result_recieved) prefixes = [] prefixes.extend([net.strip() for net in open(v4_nets).readlines()]) prefixes.extend([net.strip() for net in open(v6_nets).readlines()]) # for prefix in prefixes: # stream_parameters = {"type": "traceroute", "passThroughPrefix": prefix} # stream_parameters.update(filters) # atlas_stream.start_stream(stream_type="result", **stream_parameters) stream_parameters = {"type": "traceroute"} stream_parameters.update(filters) atlas_stream.start_stream(stream_type="result", **stream_parameters) print("Before streaming") atlas_stream.timeout(seconds=seconds) atlas_stream.disconnect()
def run(self): """ Function which adds a new stream. """ atlas_stream = AtlasStream() atlas_stream.connect() atlas_stream.bind_channel( self.channel, self.on_result_response, ) atlas_stream.start_stream( stream_type=self.stream_type, **self.parameters ) atlas_stream.timeout() atlas_stream.disconnect()
def stream(self): def on_result_response(result, *args): logging.warning("on_result_response fired") self.responses.append(result) self.probes_received += 1 if self.probes_received >= self.probes_limit: print "Raise ProbesLimitExceeded()" raise ProbesLimitExceeded() stream = AtlasStream() stream.connect() stream.bind_channel(self.channel, on_result_response) try: stream.start_stream(stream_type=self.type, msm=self.msm) stream.timeout(self.timeout) except (KeyboardInterrupt, ProbesLimitExceeded) as e: stream.disconnect() return self.responses
def test_stream_probe(self): """Unittest for Atlas probe connections request.""" if self.server == "": raise SkipTest results = [] def on_result_response(*args): """ Function that will be called every time we receive a new event. Args is a tuple, so you should use args[0] to access the real message. """ results.append(args[0]) atlas_stream = AtlasStream() atlas_stream.connect() channel = "probe" atlas_stream.bind_channel(channel, on_result_response) stream_parameters = {"enrichProbes": True} atlas_stream.start_stream(stream_type="probestatus", **stream_parameters) atlas_stream.timeout(seconds=30) atlas_stream.disconnect() self.assertNotEqual(results, [])
def test_stream_results(self): """Unittest for Atlas results request.""" if self.server == "": pytest.skip("No ATLAS_SERVER defined") results = [] def on_result_response(*args): """ Function that will be called every time we receive a new result. Args is a tuple, so you should use args[0] to access the real message. """ results.append(args[0]) atlas_stream = AtlasStream() atlas_stream.connect() channel = "result" atlas_stream.bind_channel(channel, on_result_response) stream_parameters = {"msm": 1001} atlas_stream.start_stream(stream_type="result", **stream_parameters) atlas_stream.timeout(seconds=5) atlas_stream.disconnect() self.assertNotEqual(results, [])
def run_stream(self, probes_filter): logger.info(" - using real-time results streaming") self.ensure_streaming_enabled(MeasurementProcessingError) try: atlas_stream = AtlasStream() atlas_stream.connect() atlas_stream.bind_channel("result", self.on_result_response) stream_params = {"msm": self.msm_id} except Exception as e: raise MeasurementProcessingError( "Error while creating the stream: {}".format(str(e)) ) self.results_queue = Queue() thread = MonitorResultsThread(self, probes_filter) try: thread.start() atlas_stream.start_stream(stream_type="result", **stream_params) atlas_stream.timeout(seconds=self.stream_timeout) atlas_stream.disconnect() except: try: atlas_stream.disconnect() except: pass finally: try: atlas_stream.disconnect() except: pass self.exit_thread = True thread.join(timeout=10)
def getLive(allmsm=[7000]): #Start time of this script, we'll try to get it working for 1 hour starttime = datetime.datetime.now() lastTimestamp = 0 currCollection = None lastDownload = None lastConnection = None while (datetime.datetime.now() - starttime).seconds < 3600: try: lastConnection = datetime.datetime.now() atlas_stream = AtlasStream() atlas_stream.connect() # Measurement results channel = "atlas_result" # Bind function we want to run with every result message received atlas_stream.socketIO.on("connect", on_connect) atlas_stream.socketIO.on("disconnect", on_disconnect) atlas_stream.socketIO.on("reconnect", on_reconnect) atlas_stream.socketIO.on("error", on_error) atlas_stream.socketIO.on("close", on_close) atlas_stream.socketIO.on("connect_error", on_connect_error) atlas_stream.socketIO.on("atlas_error", on_atlas_error) atlas_stream.socketIO.on("atlas_unsubscribed", on_atlas_unsubscribe) # Subscribe to new stream atlas_stream.bind_channel(channel, on_result_response) for msm in allmsm: # stream_parameters = {"type": "traceroute", "buffering":True, "equalsTo":{"af": 4}, "msm": msm} stream_parameters = { "buffering": True, "equalsTo": { "af": 4 }, "msm": msm } atlas_stream.start_stream(stream_type="result", **stream_parameters) # Run for 1 hour #print "start stream for msm ids: %s" % allmsm atlas_stream.timeout(seconds=3600 - (datetime.datetime.now() - starttime).seconds) # Shut down everything atlas_stream.disconnect() except ConnectionError as e: now = datetime.datetime.utcnow() #print "%s: %s" % (now, e) #print "last download: %s" % lastDownload #print "last connection: %s" % lastConnection atlas_stream.disconnect() # Wait a bit if the connection was made less than a minute ago if lastConnection + datetime.timedelta(60) > now: time.sleep(60) #print "Go back to the loop and reconnect" except Exception as e: save_note = "Exception dump: %s : %s.\nCommand: %s" % ( type(e).__name__, e, sys.argv) exception_fp = open("dump_%s.err" % datetime.datetime.now(), "w") exception_fp.write(save_note) sys.exit()
a = args[0] t = int(time.time()) if t != lastsec: be_verbose(2, "%d results ..." % (nrecs - lastrec)) lastrec = nrecs lastsec = t a['pdb_received'] = t a['pdb_source'] = 1 be_verbose(3, json.dumps(a)) buf.append(a) nrecs += 1 atlas_stream = AtlasStream() if cmdl.server: atlas_stream.iosocket_server = cmdl.server # override stream server address atlas_stream.connect() channel = "result" atlas_stream.bind_channel(channel, on_result_response) # establish callback stream_parameters = {"type": "traceroute"} atlas_stream.start_stream(stream_type="result", **stream_parameters) # start streaming be_verbose(1, "stream starting ...") atlas_stream.timeout(seconds=cmdl.secs) # this really starts it ....
def setup(self): self.atlas_stream = AtlasStream()