def _expect_from_root(self, p_root): """ Start an event subscriber to the given root platform. To be called before any action that triggers publications from the root platform. It sets self._wait_root_event to a function to be called to wait for the event. """ async_result = AsyncResult() # subscribe: event_type = "DeviceAggregateStatusEvent" def consume_event(evt, *args, **kwargs): async_result.set(evt) sub = EventSubscriber(event_type=event_type, origin=p_root.platform_device_id, callback=consume_event) sub.start() self._data_subscribers.append(sub) sub._ready_event.wait(timeout=CFG.endpoint.receive.timeout) log.debug("registered for DeviceAggregateStatusEvent") # set new wait function: def wait(): root_evt = async_result.get(timeout=CFG.endpoint.receive.timeout) return root_evt self._wait_root_event = wait
def test_create_forever_interval_timer(self): # Test creating interval timer that runs forever self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 3 event_origin = "Interval Timer Forever" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval, end_time=-1, event_origin=event_origin, event_subtype=event_origin) self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait for 4 events to be published gevent.sleep((self.interval_timer_interval * 4) + 1) self.ssclient.cancel_timer(id) # Validate the timer id is invalid once it has been canceled with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id) # Validate events are not generated after canceling the timer self.assertEqual(self.interval_timer_count, 4)
class NotificationSubscription(object): """ Ties a notification's info to it's event subscriber """ def __init__(self, notification_request=None, callback=None): self._res_obj = notification_request # The Notification Request Resource Object self.subscriber = EventSubscriber( origin=notification_request.origin, origin_type = notification_request.origin_type, event_type=notification_request.event_type, sub_type=notification_request.event_subtype, callback=callback) self.notification_subscription_id = None def set_notification_id(self, id_=None): """ Set the notification id of the notification object @param notification id """ self.notification_subscription_id = id_ def activate(self): """ Start subscribing """ self.subscriber.start() def deactivate(self): """ Stop subscribing """ self.subscriber.stop()
def start_subscribers(): """ """ global platform_sub global result_sub global go_time global logfile go_time = time.time() loc_time = time.localtime(go_time) fname = '2caa_log_%d_%d_%d.txt' % (loc_time[3],loc_time[4],loc_time[5]) fname = tcaa_args['logfile_dir'] + fname logfile = open(fname, 'w') logfile.write('%15.6f %40s %6d %6d %6d %6d %6d %6d %6d\n' % (0.0, 'Start', status, queue_size, len(requests_sent), len(results_recv), len(results_pending), len(results_confirmed), len(results_error))) platform_sub = EventSubscriber( event_type='PlatformEvent', callback=consume_event, origin=tcaa_args['xs_name'] ) platform_sub.start() result_sub = EventSubscriber( event_type='RemoteCommandResult', callback=consume_event, origin='fake_id' ) result_sub.start()
def _start_platform(self): """ Starts the given platform waiting for it to transition to the UNINITIALIZED state (note that the agent starts in the LAUNCHING state). More in concrete the sequence of steps here are: - prepares subscriber to receive the UNINITIALIZED state transition - launches the platform process - waits for the start of the process - waits for the transition to the UNINITIALIZED state """ ############################################################## # prepare to receive the UNINITIALIZED state transition: async_res = AsyncResult() def consume_event(evt, *args, **kwargs): log.debug("Got ResourceAgentStateEvent %s from origin %r", evt.state, evt.origin) if evt.state == PlatformAgentState.UNINITIALIZED: async_res.set(evt) # start subscriber: sub = EventSubscriber(event_type="ResourceAgentStateEvent", origin=self.platform_device_id, callback=consume_event) sub.start() log.info( "registered event subscriber to wait for state=%r from origin %r", PlatformAgentState.UNINITIALIZED, self.platform_device_id) #self._event_subscribers.append(sub) sub._ready_event.wait(timeout=EVENT_TIMEOUT) ############################################################## # now start the platform: agent_instance_id = self.platform_agent_instance_id log.debug("about to call start_platform_agent_instance with id=%s", agent_instance_id) pid = self.imsclient.start_platform_agent_instance( platform_agent_instance_id=agent_instance_id) log.debug("start_platform_agent_instance returned pid=%s", pid) #wait for start agent_instance_obj = self.imsclient.read_platform_agent_instance( agent_instance_id) gate = AgentProcessStateGate(self.processdispatchclient.read_process, self.platform_device_id, ProcessStateEnum.RUNNING) self.assertTrue( gate. await (90), "The platform agent instance did not spawn in 90 seconds") # Start a resource agent client to talk with the agent. self._pa_client = ResourceAgentClient(self.platform_device_id, name=gate.process_id, process=FakeProcess()) log.debug("got platform agent client %s", str(self._pa_client)) ############################################################## # wait for the UNINITIALIZED event: async_res.get(timeout=self._receive_timeout)
def test_cancel_single_timer(self): # test creating a new timer that is one-time-only # create the timer resource # create the event listener # call scheduler to set the timer # create then cancel the timer, verify that event is not received # create the timer resource # create the event listener # call scheduler to set the timer # call scheduler to cancel the timer # wait until after expiry to verify that event is not sent self.single_timer_count = 0 event_origin = "Time_of_Day" sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin) sub.start() self.addCleanup(sub.stop) now = datetime.datetime.utcnow() + timedelta(seconds=3) times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }] id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test") self.assertEqual(type(id), str) self.ssclient.cancel_timer(id) gevent.sleep(3) # Validate the event is not generated self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))
def test_create_forever_interval_timer(self): # Test creating interval timer that runs forever self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 3 event_origin = "Interval Timer Forever" sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.addCleanup(sub.stop) id = self.ssclient.create_interval_timer(start_time=str(self.now_utc()), interval=self.interval_timer_interval, end_time="-1", event_origin=event_origin, event_subtype=event_origin) self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait for 4 events to be published gevent.sleep((self.interval_timer_interval * 4) + 1) self.ssclient.cancel_timer(id) time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds timer_counts = math.floor(time_diff/self.interval_timer_interval) # Validate the timer id is invalid once it has been canceled with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id) # Validate events are not generated after canceling the timer self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
def test_create_interval_timer_with_end_time(self): # create the interval timer resource # create the event listener # call scheduler to set the timer # receive a few intervals, validate that arrival time is as expected # Validate no more events are published after end_time expires # Validate the timer was canceled after the end_time expires self.interval_timer_count_2 = 0 self.interval_timer_sent_time_2 = 0 self.interval_timer_received_time_2 = 0 self.interval_timer_interval_2 = 3 event_origin = "Interval_Timer_2" sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin) sub.start() self.addCleanup(sub.stop) start_time = self.now_utc() self.interval_timer_end_time_2 = start_time + 7 id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2, end_time=self.interval_timer_end_time_2, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time_2 = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait until all events are published gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1) # Validate the number of events generated self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id)) # Validate the timer was canceled after the end_time is expired with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id)
def test_pub_on_different_subtypes(self): ar = event.AsyncResult() gq = queue.Queue() self.count = 0 def cb(event, *args, **kwargs): self.count += 1 gq.put(event) if event.description == "end": ar.set() sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb) sub.start() pub1 = EventPublisher(event_type="ResourceModifiedEvent") pub2 = EventPublisher(event_type="ContainerLifecycleEvent") pub1.publish_event(origin="two", sub_type="st2", description="2") pub2.publish_event(origin="three", sub_type="st1", description="3") pub1.publish_event(origin="one", sub_type="st1", description="1") pub1.publish_event(origin="four", sub_type="st1", description="end") ar.get(timeout=5) sub.stop() res = [] for x in xrange(self.count): res.append(gq.get(timeout=5)) self.assertEquals(len(res), 2) self.assertEquals(res[0].description, "1")
class InstrumentAgentEventSubscribers(object): """ Create subscribers for agent and driver events. """ log.info("Start event subscribers") def __init__(self, instrument_agent_resource_id = None): # Start event subscribers, add stop to cleanup. self.no_events = None self.events_received = [] self.async_event_result = AsyncResult() self.event_subscribers = [] def consume_event(*args, **kwargs): log.debug('#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.', str(args), str(kwargs), str(args[0])) log.debug("self.no_events = " + str(self.no_events)) log.debug("self.event_received = " + str(self.events_received)) self.events_received.append(args[0]) if self.no_events and self.no_events == len(self.events_received): log.debug("CALLING self.async_event_result.set()") self.async_event_result.set() self.event_subscribers = EventSubscriber( event_type='ResourceAgentEvent', callback=consume_event, origin=instrument_agent_resource_id) self.event_subscribers.start() self.event_subscribers._ready_event.wait(timeout=5)
def start_event_listener(self): es = EventSubscriber(event_type=OT.DataProcessStatusEvent, callback=self.validate_event) es.start() self.addCleanup(es.stop)
def start_DeviceStatusAlertEvent_subscriber(value_id, sub_type): """ @return async_event_result Use it to wait for the expected event """ event_type = "DeviceStatusAlertEvent" async_event_result = AsyncResult() def consume_event(evt, *args, **kwargs): log.info('DeviceStatusAlertEvent_subscriber received evt: %s', str(evt)) if evt.type_ != event_type or \ evt.value_id != value_id or \ evt.sub_type != sub_type: return async_event_result.set(evt) kwargs = dict(event_type=event_type, callback=consume_event, origin=self.p_root.platform_device_id, sub_type=sub_type) sub = EventSubscriber(**kwargs) sub.start() log.info("registered DeviceStatusAlertEvent subscriber: %s", kwargs) self._event_subscribers.append(sub) sub._ready_event.wait(timeout=self._receive_timeout) return async_event_result
def _start_event_subscriber(self, event_type="DeviceEvent", sub_type=None, count=0): """ Starts event subscriber for events of given event_type ("DeviceEvent" by default) and given sub_type ("platform_event" by default). """ def consume_event(evt, *args, **kwargs): # A callback for consuming events. log.info('Event subscriber received evt: %s.', str(evt)) self._events_received.append(evt) if count == 0: self._async_event_result.set(evt) elif count == len(self._events_received): self._async_event_result.set() sub = EventSubscriber(event_type=event_type, sub_type=sub_type, callback=consume_event) sub.start() log.info("registered event subscriber for event_type=%r, sub_type=%r, count=%d", event_type, sub_type, count) self._event_subscribers.append(sub) sub._ready_event.wait(timeout=EVENT_TIMEOUT)
def start_listener(self, dataset_id=''): dataset_modified = Event() #callback to use retrieve to get data from the coverage def cb(*args, **kwargs): self.get_retrieve_client(dataset_id=dataset_id) #callback to keep execution going once dataset has been fully ingested def cb2(*args, **kwargs): dataset_modified.set() es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id) es.start() es2 = EventSubscriber(event_type=OT.DeviceCommonLifecycleEvent, callback=cb2, origin='BaseDataHandler._acquire_sample') es2.start() self.addCleanup(es.stop) self.addCleanup(es2.stop) #let it go for up to 120 seconds, then stop the agent and reset it dataset_modified.wait(120) self.stop_agent()
def do_listen_for_incoming(self): subscription_id = self.pubsub.create_subscription( 'validator', data_product_ids=[self.data_product._id]) self.addCleanup(self.pubsub.delete_subscription, subscription_id) self.granule_capture = [] self.granule_count = 0 def on_granule(msg, route, stream_id): self.granule_count += 1 if self.granule_count < 5: self.granule_capture.append(msg) validator = StandaloneStreamSubscriber('validator', callback=on_granule) validator.start() self.addCleanup(validator.stop) self.pubsub.activate_subscription(subscription_id) self.addCleanup(self.pubsub.deactivate_subscription, subscription_id) self.dataset_modified = Event() def cb2(*args, **kwargs): self.dataset_modified.set() # TODO: event isn't using the ExternalDataset, but a different ID for a Dataset es = EventSubscriber(event_type=OT.DatasetModified, callback=cb2, origin=self.dataset_id) es.start() self.addCleanup(es.stop)
def start_subscribers(): """ """ global platform_sub global result_sub global go_time global logfile go_time = time.time() loc_time = time.localtime(go_time) fname = '2caa_log_%d_%d_%d.txt' % (loc_time[3], loc_time[4], loc_time[5]) fname = tcaa_args['logfile_dir'] + fname logfile = open(fname, 'w') logfile.write('%15.6f %40s %6d %6d %6d %6d %6d %6d %6d\n' % (0.0, 'Start', status, queue_size, len(requests_sent), len(results_recv), len(results_pending), len(results_confirmed), len(results_error))) platform_sub = EventSubscriber(event_type='PlatformEvent', callback=consume_event, origin=tcaa_args['xs_name']) platform_sub.start() result_sub = EventSubscriber(event_type='RemoteCommandResult', callback=consume_event, origin='fake_id') result_sub.start()
def test_create_interval_timer_with_end_time(self): # create the interval timer resource # create the event listener # call scheduler to set the timer # receive a few intervals, validate that arrival time is as expected # Validate no more events are published after end_time expires # Validate the timer was canceled after the end_time expires self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 2 event_origin = "Interval Timer" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() start_time = self.now_utc() self.interval_timer_end_time = start_time + 5 id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval, end_time=self.interval_timer_end_time, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait until all events are published gevent.sleep((self.interval_timer_end_time - start_time) + self.interval_timer_interval + 1) # Validate only 2 events are published self.assertEqual(self.interval_timer_count, 2) # Validate the timer was canceled after the end_time is expired with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id)
def test_instrument_device_metadata_notification_l4_ci_sa_rq_145_323(self): """ Instrument management shall update physical resource metadata when change occurs For example, when there is a change of state. note from maurice 2012-05-18: consider this to mean a change of stored RR data """ inst_obj = any_old(RT.InstrumentDevice) instrument_device_id, _ = self.RR.create(inst_obj) self.received_event = AsyncResult() #Create subscribers for agent and driver events. def consume_event(*args, **kwargs): self.received_event.set(True) log.info("L4-CI-SA-RQ-323") log.info("L4-CI-SA-RQ-145") event_sub = EventSubscriber(event_type="ResourceModifiedEvent", callback=consume_event) event_sub.start() inst_obj = self.RR.read(instrument_device_id) inst_obj.description = "brand new description" self.RR.update(inst_obj) #wait for event result = self.received_event.get(timeout=10) event_sub.stop() self.assertTrue(result)
def test_create_single_timer(self): # test creating a new timer that is one-time-only # create the timer resource # create the event listener # call scheduler to set the timer # create then cancel the timer, verify that event is not received # create the timer resource # create the event listener # call scheduler to set the timer # call scheduler to cancel the timer # wait until after expiry to verify that event is not sent self.single_timer_count = 0 event_origin = "Time of Day" sub = EventSubscriber(event_type="ResourceEvent", callback=self.single_timer_call_back, origin=event_origin) sub.start() # Time out in 3 seconds now = datetime.datetime.utcnow() + timedelta(seconds=3) times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }] ss = SchedulerService() id = ss.create_time_of_day_timer(times_of_day=times_of_day, expires=time.time()+25200+60, event_origin=event_origin, event_subtype="") self.assertEqual(type(id), str) ss.cancel_timer(id) gevent.sleep(5) # Validate the event is not sent self.assertEqual(self.single_timer_count, 0)
def test_ingestion_failover(self): stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset() self.start_ingestion(stream_id, dataset_id) event = Event() def cb(*args, **kwargs): event.set() sub = EventSubscriber(event_type="ExceptionEvent", callback=cb, origin="stream_exception") sub.start() self.publish_fake_data(stream_id, route) self.wait_until_we_have_enough_granules(dataset_id, 40) file_path = DatasetManagementService._get_coverage_path(dataset_id) master_file = os.path.join(file_path, '%s_master.hdf5' % dataset_id) with open(master_file, 'w') as f: f.write('this will crash HDF') self.publish_hifi(stream_id, route, 5) self.assertTrue(event.wait(10)) sub.stop()
def test_quit_stops_timers(self): ar = AsyncResult() def cb(*args, **kwargs): ar.set(args) self.interval_timer_count += 1 event_origin = "test_quitter" sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin) sub.start() self.addCleanup(sub.stop) tid = self.ssclient.create_interval_timer(start_time="now", interval=1, event_origin=event_origin) # wait until at least one scheduled message ar.get(timeout=5) # shut it down! p = self.container.proc_manager.procs_by_name['scheduler'] self.container.terminate_process(p.id) # assert empty self.assertEquals(p.schedule_entries, {})
def test_quit_stops_timers(self): ar = AsyncResult() def cb(*args, **kwargs): ar.set(args) self.interval_timer_count += 1 event_origin = "test_quitter" sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin) sub.start() self.addCleanup(sub.stop) tid = self.ssclient.create_interval_timer(start_time="now", end_time="-1", interval=1, event_origin=event_origin) # wait until at least one scheduled message ar.get(timeout=5) # shut it down! p = self.container.proc_manager.procs_by_name['scheduler'] self.container.terminate_process(p.id) # assert empty self.assertEquals(p.schedule_entries, {})
def test_qc_events(self): ph = ParameterHelper(self.dataset_management, self.addCleanup) pdict_id = ph.create_qc_pdict() stream_def_id = self.pubsub_management.create_stream_definition('qc stream def', parameter_dictionary_id=pdict_id) self.addCleanup(self.pubsub_management.delete_stream_definition, stream_def_id) stream_id, route = self.pubsub_management.create_stream('qc stream', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id) self.addCleanup(self.pubsub_management.delete_stream, stream_id) ingestion_config_id = self.get_ingestion_config() dataset_id = self.create_dataset(pdict_id) config = DotDict() self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingestion_config_id, dataset_id=dataset_id, config=config) self.addCleanup(self.ingestion_management.unpersist_data_stream, stream_id, ingestion_config_id) publisher = StandaloneStreamPublisher(stream_id, route) rdt = RecordDictionaryTool(stream_definition_id=stream_def_id) rdt['time'] = np.arange(10) rdt['temp'] = np.arange(10) * 3 verified = Event() def verification(event, *args, **kwargs): self.assertEquals(event.qc_parameter, 'temp_qc') self.assertEquals(event.temporal_value, 7) verified.set() es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=dataset_id, callback=verification, auto_delete=True) es.start() self.addCleanup(es.stop) publisher.publish(rdt.to_granule()) self.assertTrue(verified.wait(10))
def on_start(self): #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.ssclient = SchedulerServiceProcessClient(process=self) self.rrclient = ResourceRegistryServiceProcessClient(process=self) self.data_retriever_client = DataRetrieverServiceProcessClient(process=self) self.dsm_client = DatasetManagementServiceProcessClient(process=self) self.pubsub_client = PubsubManagementServiceProcessClient(process = self) self.stream_info = self.CFG.get_safe('process.publish_streams',{}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('MPL Transform has no output streams.') graph_time_periods= self.CFG.get_safe('graph_time_periods') # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours self.event_timer_interval = self.CFG.get_safe('graph_gen_interval') if self.event_timer_interval: event_origin = "Interval_Timer_Matplotlib" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval), event_origin=event_origin, event_subtype="") super(VizTransformMatplotlibGraphs,self).on_start()
def test_ingestion_failover(self): stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset( ) self.start_ingestion(stream_id, dataset_id) event = Event() def cb(*args, **kwargs): event.set() sub = EventSubscriber(event_type="ExceptionEvent", callback=cb, origin="stream_exception") sub.start() self.publish_fake_data(stream_id, route) self.wait_until_we_have_enough_granules(dataset_id, 40) file_path = DatasetManagementService._get_coverage_path(dataset_id) master_file = os.path.join(file_path, '%s_master.hdf5' % dataset_id) with open(master_file, 'w') as f: f.write('this will crash HDF') self.publish_hifi(stream_id, route, 5) self.assertTrue(event.wait(10)) sub.stop()
def test_timeoffday_timer(self): # test creating a new timer that is one-time-only # create the timer resource # get the current time, set the timer to several seconds from current time # create the event listener # call scheduler to set the timer # verify that event arrival is within one/two seconds of current time ss = SchedulerService() event_origin = "Time Of Day2" self.expire_sec_1 = 4 self.expire_sec_2 = 4 self.tod_count = 0 expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1) expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2) # Create two timers times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) }, {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}] sub = EventSubscriber(event_type="ResourceEvent", callback=self.tod_callback, origin=event_origin) sub.start() # Expires in one days e = time.mktime((datetime.datetime.utcnow() + timedelta(days=1)).timetuple()) self.tod_sent_time = datetime.datetime.utcnow() id = ss.create_time_of_day_timer(times_of_day=times_of_day, expires=e, event_origin=event_origin, event_subtype="") self.assertEqual(type(id), str) gevent.sleep(15) # After waiting for 15 seconds, validate only 2 events are generated. self.assertTrue(self.tod_count == 2)
def test_create_forever_interval_timer(self): # Test creating interval timer that runs forever self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 3 event_origin = "Interval Timer Forever" sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.addCleanup(sub.stop) id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval, end_time=-1, event_origin=event_origin, event_subtype=event_origin) self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait for 4 events to be published gevent.sleep((self.interval_timer_interval * 4) + 1) self.ssclient.cancel_timer(id) time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds timer_counts = math.floor(time_diff/self.interval_timer_interval) # Validate the timer id is invalid once it has been canceled with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id) # Validate events are not generated after canceling the timer self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
def test_system_restart(self): # create the interval timer resource # create the event listener # call scheduler to set the timer # receive a few intervals, validate that arrival time is as expected # cancel the timer # wait until after next interval to verify that timer was correctly cancelled self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 3 event_origin = "Interval_Timer_4444" sub = EventSubscriber(event_type="ResourceEvent", callback=self.on_restart_callback, origin=event_origin) sub.start() start_time = self.now_utc() self.interval_timer_end_time = start_time + 20 id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval, end_time=self.interval_timer_end_time, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Validate the timer is stored in RR ss = self.rrclient.read(id) self.assertEqual(ss.entry.event_origin, event_origin) # Wait until 1 event is published gevent.sleep((self.interval_timer_interval) + 1) # Validate 1 event is published self.assertEqual(self.interval_timer_count, 1) self.ssclient.on_system_restart() # after system restart, validate the timer is restored ss = self.rrclient.read(id) self.assertEqual(ss.entry.event_origin, event_origin) # Wait until another event is published gevent.sleep((self.interval_timer_interval * 2) + 1) # Validate 1 event is published self.assertGreater(self.interval_timer_count, 2) #Cancle the timer ss = self.ssclient.cancel_timer(id) # wait until after next interval to verify that timer was correctly cancelled gevent.sleep(self.interval_timer_interval) # Validate the timer correctly cancelled with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id) # Validate the timer is removed from resource regsitry with self.assertRaises(NotFound): self.rrclient.read(id)
def start_listener(self, dataset_id): def cb(*args, **kwargs): self.data_modified.set() es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id) es.start() self.addCleanup(es.stop)
class EventPersister(StandaloneProcess): def on_init(self): # Time in between event persists self.persist_interval = 1.0 # Holds received events FIFO self.event_queue = Queue() # Temporarily holds list of events to persist while datastore operation not yet completed self.events_to_persist = None # bookkeeping for timeout greenlet self._persist_greenlet = None self._terminate_persist = Event() # when set, exits the timeout greenlet # The event subscriber self.event_sub = None def on_start(self): # Persister thread self._persist_greenlet = spawn(self._trigger_func, self.persist_interval) log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__) # Event subscription self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._on_event) self.event_sub.start() def on_quit(self): # Stop event subscriber self.event_sub.stop() # tell the trigger greenlet we're done self._terminate_persist.set() # wait on the greenlet to finish cleanly self._persist_greenlet.join(timeout=10) def _on_event(self, event, *args, **kwargs): self.event_queue.put(event) def _trigger_func(self, persist_interval): log.debug('Starting event persister thread with persist_interval=%s', persist_interval) # Event.wait returns False on timeout (and True when set in on_quit), so we use this to both exit cleanly and do our timeout in a loop while not self._terminate_persist.wait(timeout=persist_interval): try: self.events_to_persist = [self.event_queue.get() for x in xrange(self.event_queue.qsize())] self._persist_events(self.events_to_persist) self.events_to_persist = None except Exception as ex: log.exception("Failed to persist received events") return False def _persist_events(self, event_list): if event_list: bootstrap.container_instance.event_repository.put_events(event_list)
def test_timeoffday_timer_in_past_seconds(self): # test creating a new timer that is one-time-only # create the timer resource # get the current time, set the timer to several seconds from current time # create the event listener # call scheduler to set the timer # verify that event arrival is within one/two seconds of current time event_origin = "Time_Of_Day3" expire_sec = -4 self.tod_count2 = 0 now = datetime.datetime.utcnow() expire1 = now + timedelta(seconds=expire_sec) # Create two timers times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}] sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin) sub.start() self.addCleanup(sub.stop) # Expires in 3 days expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=3)).timetuple()) self.tod_sent_time = datetime.datetime.utcnow() id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait and see if the any events are generated gevent.sleep(5) # After waiting, validate no event is generated self.assertEqual(self.tod_count2, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.tod_count2, id)) # Cancel the timer self.ssclient.cancel_timer(id) # This is example for the following case # Example current time is 8:00AM. User setups a timer for 6:00AM. Since it is 8am, it tries to # setup a timer for tomorrow 6am but the expire time is set at 5AM tomorrow event_origin = "Time_Of_Day4" expire_sec = -4 self.tod_count2 = 0 now = datetime.datetime.utcnow() expire1 = now + timedelta(seconds=expire_sec) times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}] sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin) sub.start() self.addCleanup(sub.stop) # Expires before the first event time_delta = timedelta(days=1) + timedelta(seconds=-(abs(expire_sec*2))) # Notice the minus sign. It expires before the first event expires = calendar.timegm((now + time_delta).timetuple()) self.tod_sent_time = datetime.datetime.utcnow() with self.assertRaises(BadRequest): id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
def test_replay_with_parameters(self): #-------------------------------------------------------------------------------- # Create the configurations and the dataset #-------------------------------------------------------------------------------- # Get a precompiled parameter dictionary with basic ctd fields pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True) context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True) # Add a field that supports binary data input. bin_context = ParameterContext('binary', param_type=ArrayType()) context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump())) # Add another field that supports dictionary elements. rec_context = ParameterContext('records', param_type=RecordType()) context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump())) pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time') stream_def_id = self.pubsub_management.create_stream_definition('replay_stream', parameter_dictionary_id=pdict_id) stream_id, route = self.pubsub_management.create_stream('replay_with_params', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id) config_id = self.get_ingestion_config() dataset_id = self.create_dataset(pdict_id) self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id) dataset_modified = Event() def cb(*args, **kwargs): dataset_modified.set() es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id) es.start() self.addCleanup(es.stop) self.publish_fake_data(stream_id, route) self.assertTrue(dataset_modified.wait(30)) query = { 'start_time': 0 - 2208988800, 'end_time': 20 - 2208988800, 'stride_time' : 2, 'parameters': ['time','temp'] } retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id,query=query) rdt = RecordDictionaryTool.load_from_granule(retrieved_data) comp = np.arange(0,20,2) == rdt['time'] self.assertTrue(comp.all(),'%s' % rdt.pretty_print()) self.assertEquals(set(rdt.iterkeys()), set(['time','temp'])) extents = self.dataset_management.dataset_extents(dataset_id=dataset_id, parameters=['time','temp']) self.assertTrue(extents['time']>=20) self.assertTrue(extents['temp']>=20) self.streams.append(stream_id) self.stop_ingestion(stream_id)
def _start_platform(self): """ Starts the given platform waiting for it to transition to the UNINITIALIZED state (note that the agent starts in the LAUNCHING state). More in concrete the sequence of steps here are: - prepares subscriber to receive the UNINITIALIZED state transition - launches the platform process - waits for the start of the process - waits for the transition to the UNINITIALIZED state """ ############################################################## # prepare to receive the UNINITIALIZED state transition: async_res = AsyncResult() def consume_event(evt, *args, **kwargs): log.debug("Got ResourceAgentStateEvent %s from origin %r", evt.state, evt.origin) if evt.state == PlatformAgentState.UNINITIALIZED: async_res.set(evt) # start subscriber: sub = EventSubscriber(event_type="ResourceAgentStateEvent", origin=self.platform_device_id, callback=consume_event) sub.start() log.info("registered event subscriber to wait for state=%r from origin %r", PlatformAgentState.UNINITIALIZED, self.platform_device_id) #self._event_subscribers.append(sub) sub._ready_event.wait(timeout=EVENT_TIMEOUT) ############################################################## # now start the platform: agent_instance_id = self.platform_agent_instance_id log.debug("about to call start_platform_agent_instance with id=%s", agent_instance_id) pid = self.imsclient.start_platform_agent_instance(platform_agent_instance_id=agent_instance_id) log.debug("start_platform_agent_instance returned pid=%s", pid) #wait for start agent_instance_obj = self.imsclient.read_platform_agent_instance(agent_instance_id) gate = AgentProcessStateGate(self.processdispatchclient.read_process, self.platform_device_id, ProcessStateEnum.RUNNING) self.assertTrue(gate.await(90), "The platform agent instance did not spawn in 90 seconds") # Start a resource agent client to talk with the agent. self._pa_client = ResourceAgentClient(self.platform_device_id, name=gate.process_id, process=FakeProcess()) log.debug("got platform agent client %s", str(self._pa_client)) ############################################################## # wait for the UNINITIALIZED event: async_res.get(timeout=self._receive_timeout)
def test_create_interval_timer(self): # create the interval timer resource # create the event listener # call scheduler to set the timer # receive a few intervals, validate that arrival time is as expected # cancel the timer # wait until after next interval to verify that timer was correctly cancelled self.interval_timer_count = 0 self.interval_timer_sent_time = 0 self.interval_timer_received_time = 0 self.interval_timer_interval = 3 event_origin = "Interval_Timer_233" sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.addCleanup(sub.stop) start_time = self.now_utc() self.interval_timer_end_time = start_time + 10 id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval, end_time=self.interval_timer_end_time, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Validate the timer is stored in RR ss = self.rrclient.read(id) self.assertEqual(ss.entry.event_origin, event_origin) # Wait until two events are published gevent.sleep((self.interval_timer_interval * 2) + 1) time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds timer_counts = math.floor(time_diff/self.interval_timer_interval) #Cancle the timer ss = self.ssclient.cancel_timer(id) # wait until after next interval to verify that timer was correctly cancelled gevent.sleep(self.interval_timer_interval) # Validate the timer correctly cancelled with self.assertRaises(BadRequest): self.ssclient.cancel_timer(id) # Validate the timer is removed from resource regsitry with self.assertRaises(NotFound): self.rrclient.read(id) # Validate the number of timer counts self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
class TransformEventListener(TransformEventProcess): def on_start(self): event_type = self.CFG.get_safe('process.event_type', '') self.listener = EventSubscriber(event_type=event_type, callback=self.process_event) self.listener.start() def process_event(self, msg, headers): raise NotImplementedError('Method process_event not implemented') def on_quit(self): self.listener.stop()
class DatasetMonitor(object): def __init__(self, dataset_id): self.dataset_id = dataset_id self.event = Event() self.es = EventSubscriber(event_type=OT.DatasetModiied, callback=self.cb, origin=self.dataset_id, auto_delete=True) self.es.start() def cb(self, *args, **kwargs): self.event.set() def stop(self): self.es.stop()
def _start_event_subscribers(self): """ Create subscribers for agent and driver events. """ def consume_event(*args, **kwargs): log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.', str(args), str(kwargs), str(args[0])) self._events_received.append(args[0]) if self._no_events and self._no_events == len(self._event_received): self._async_event_result.set() event_sub = EventSubscriber(event_type="DeviceEvent", callback=consume_event) event_sub.start() self._event_subscribers.append(event_sub)
class InstrumentAgentEventSubscribers(object): """ Create subscribers for agent and driver events. """ log.info("Start event subscribers") def __init__(self, instrument_agent_resource_id=None): # Start event subscribers, add stop to cleanup. self.no_events = None self.events_received = [] self.async_event_result = AsyncResult() self.event_subscribers = [] def consume_event(*args, **kwargs): log.debug( "#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.", str(args), str(kwargs), str(args[0]), ) log.debug("self.no_events = " + str(self.no_events)) log.debug("self.event_received = " + str(self.events_received)) self.events_received.append(args[0]) if self.no_events and self.no_events == len(self.events_received): log.debug("CALLING self.async_event_result.set()") self.async_event_result.set() self.event_subscribers = EventSubscriber( event_type="ResourceAgentEvent", callback=consume_event, origin=instrument_agent_resource_id ) self.event_subscribers.start() self.event_subscribers._ready_event.wait(timeout=5) def clear_events(self): """ Reset event counter """ self._events_received = [] def stop(self): try: self.event_subscribers.stop() except Exception as ex: log.warn("Failed to stop event subscriber gracefully (%s)" % ex) self.event_subscribers = []
class DatasetMonitor(object): def __init__(self, dataset_id): self.dataset_id = dataset_id self.event = Event() self.es = EventSubscriber(event_type=OT.DatasetModified, callback=self.cb, origin=self.dataset_id, auto_delete=True) self.es.start() def cb(self, *args, **kwargs): self.event.set() def stop(self): self.es.stop()
def _start_event_subscribers(self): """ Create subscribers for agent and driver events. """ def consume_event(*args, **kwargs): log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.', str(args), str(kwargs), str(args[0])) self._events_received.append(args[0]) if self._no_events and self._no_events == len( self._event_received): self._async_event_result.set() event_sub = EventSubscriber(event_type="DeviceEvent", callback=consume_event) event_sub.start() self._event_subscribers.append(event_sub)
class InstrumentAgentEventSubscribers(object): """ Create subscribers for agent and driver events. """ log.info("Start event subscribers") def __init__(self, instrument_agent_resource_id=None): # Start event subscribers, add stop to cleanup. self.no_events = None self.events_received = [] self.async_event_result = AsyncResult() self.event_subscribers = [] def consume_event(*args, **kwargs): log.debug( '#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.', str(args), str(kwargs), str(args[0])) log.debug("self.no_events = " + str(self.no_events)) log.debug("self.event_received = " + str(self.events_received)) self.events_received.append(args[0]) if self.no_events and self.no_events == len(self.events_received): log.debug("CALLING self.async_event_result.set()") self.async_event_result.set() self.event_subscribers = EventSubscriber( event_type='ResourceAgentEvent', callback=consume_event, origin=instrument_agent_resource_id) self.event_subscribers.start() self.event_subscribers._ready_event.wait(timeout=5) def clear_events(self): """ Reset event counter """ self.events_received = [] def stop(self): try: self.event_subscribers.stop() except Exception as ex: log.warn("Failed to stop event subscriber gracefully (%s)" % ex) self.event_subscribers = []
class TransformEventListener(TransformEventProcess): def __init__(self): super(TransformEventListener, self).__init__() def on_start(self): super(TransformEventListener, self).on_start() event_type = self.CFG.get_safe('process.event_type', '') queue_name = self.CFG.get_safe('process.queue_name', None) self.listener = EventSubscriber(event_type=event_type, queue_name=queue_name, callback=self.process_event) self.listener.start() def process_event(self, msg, headers): raise NotImplementedError('Method process_event not implemented') def on_quit(self): self.listener.stop() super(TransformEventListener, self).on_quit()
def test_timeoffday_timer(self): # test creating a new timer that is one-time-only # create the timer resource # get the current time, set the timer to several seconds from current time # create the event listener # call scheduler to set the timer # verify that event arrival is within one/two seconds of current time event_origin = "Time Of Day2" self.expire_sec_1 = 4 self.expire_sec_2 = 5 self.tod_count = 0 expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1) expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2) # Create two timers times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) }, {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}] sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback, origin=event_origin) sub.start() self.addCleanup(sub.stop) # Expires in one days expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=2)).timetuple()) self.tod_sent_time = datetime.datetime.utcnow() id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="") self.interval_timer_sent_time = datetime.datetime.utcnow() self.assertEqual(type(id), str) # Wait until all events are generated gevent.sleep(9) time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds timer_counts = math.floor(time_diff/self.expire_sec_1) + math.floor(time_diff/self.expire_sec_2) # After waiting, validate only 2 events are generated. self.assertEqual(self.tod_count, 2, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.tod_count, timer_counts, id)) # Cancel the timer self.ssclient.cancel_timer(id)
class DatasetMonitor(object): def __init__(self, dataset_id=None, data_product_id=None): if data_product_id and not dataset_id: dataset_id = Container.instance.resource_registry.find_objects(data_product_id, PRED.hasDataset, id_only=True)[0][0] self.dataset_id = dataset_id self.event = Event() self.es = EventSubscriber(event_type=OT.DatasetModified, callback=self.cb, origin=self.dataset_id, auto_delete=True) self.es.start() def cb(self, *args, **kwargs): self.event.set() def stop(self): self.es.stop() def wait(self, timeout=None): if timeout is None: timeout = CFG.get_safe('endpoint.receive.timeout', 10) return self.event.wait(timeout) def reset(self): self.event.clear()
def _start_event_subscriber(self, event_type="DeviceEvent", sub_type="platform_event"): """ Starts event subscriber for events of given event_type ("DeviceEvent" by default) and given sub_type ("platform_event" by default). """ def consume_event(evt, *args, **kwargs): # A callback for consuming events. log.info('Event subscriber received evt: %s.', str(evt)) self._events_received.append(evt) self._async_event_result.set(evt) sub = EventSubscriber(event_type=event_type, sub_type=sub_type, callback=consume_event) sub.start() log.info("registered event subscriber for event_type=%r, sub_type=%r", event_type, sub_type) self._event_subscribers.append(sub) sub._ready_event.wait(timeout=EVENT_TIMEOUT)
def on_start(self): #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG self.pubsub_management = PubsubManagementServiceProcessClient( process=self) self.ssclient = SchedulerServiceProcessClient(process=self) self.rrclient = ResourceRegistryServiceProcessClient(process=self) self.data_retriever_client = DataRetrieverServiceProcessClient( process=self) self.dsm_client = DatasetManagementServiceProcessClient(process=self) self.pubsub_client = PubsubManagementServiceProcessClient(process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('MPL Transform has no output streams.') graph_time_periods = self.CFG.get_safe('graph_time_periods') # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours self.event_timer_interval = self.CFG.get_safe('graph_gen_interval') if self.event_timer_interval: event_origin = "Interval_Timer_Matplotlib" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.interval_timer_id = self.ssclient.create_interval_timer( start_time="now", interval=self._str_to_secs(self.event_timer_interval), event_origin=event_origin, event_subtype="") super(VizTransformMatplotlibGraphs, self).on_start()
def test_cei_launch_mode(self): pdc = ProcessDispatcherServiceClient(node=self.container.node) p_def = ProcessDefinition(name='Agent007') p_def.executable = { 'module' : 'ion.agents.instrument.instrument_agent', 'class' : 'InstrumentAgent' } p_def_id = pdc.create_process_definition(p_def) pid = pdc.create_process(p_def_id) def event_callback(event, *args, **kwargs): print '######### proc %s in state %s' % (event.origin, ProcessStateEnum._str_map[event.state]) sub = EventSubscriber(event_type='ProcessLifecycleEvent', callback=event_callback, origin=pid, origin_type='DispatchedProcess') sub.start() agent_config = deepcopy(self._agent_config) agent_config['bootmode'] = 'restart' pdc.schedule_process(p_def_id, process_id=pid, configuration=agent_config) gevent.sleep(5) pdc.cancel_process(pid) gevent.sleep(15) sub.stop()
def start_event_transform_listener(self): es = EventSubscriber(event_type=OT.DeviceStatusAlertEvent, callback=self.validate_transform_event) es.start() self.addCleanup(es.stop)
def forward(self, *args, **kwargs): """ Forward a service method to the terrestrial endpoint through the service interface. """ func_name = kwargs.pop('func_name') try: link = kwargs.pop('link') except KeyError: link = True cid = '' try: remote_timeout = kwargs.pop('remote_timeout') if not isinstance(remote_timeout, int): remote_timeout = 0 elif remote_timeout < 0: remote_timeout = 0 elif remote_timeout == 0: pass else: cid = str(uuid.uuid4()) except KeyError: remote_timeout = 0 cmd = IonObject('RemoteCommand', resource_id=self._resource_id, svc_name=self._svc_name, command=func_name, command_id=cid, args=args, kwargs=kwargs) if remote_timeout == 0: return self._te_client.enqueue_command(cmd, link) else: if self._resource_id: origin = self._resource_id elif self._svc_name: origin = self._svc_name + self._xs_name pending_cmd = cmd async_result_evt = AsyncResult() def result_callback(evt, *args, **kwargs): """ Callback for subscriber retrive blocking results. """ #global async_result_evt if evt.type_ == 'RemoteCommandResult': cmd = evt.command if cmd.command_id == pending_cmd.command_id: async_result_evt.set(cmd) sub = EventSubscriber(event_type='RemoteCommandResult', origin=origin, callback=result_callback) sub.start() #self._pending_cmd = cmd cmd = self._te_client.enqueue_command(cmd, link) try: result = async_result_evt.get(timeout=remote_timeout) #self._pending_cmd = None sub.stop() except gevent.Timeout: #self._pending_cmd = None sub.stop() raise Timeout('Timed out waiting for remote result.') return result
class TestPrest(IonIntegrationTestCase): """ Test cases for instrument agent class. Functions in this class provide instrument agent integration tests and provide a tutorial on use of the agent setup and interface. """ def setUp(self): """ Set up driver integration support. Start port agent, add port agent cleanup. Start container. Start deploy services. Define agent config, start agent. Start agent client. """ print '#####################' print 'IN SETUP' self._ia_client = None # Start container. log.info('Staring capability container.') self._start_container() # Bring up services in a deploy file (no need to message) log.info('Staring deploy services.') self.container.start_rel_from_url('res/deploy/r2deploy.yml') log.info('building stream configuration') # Setup stream config. self._build_stream_config() #log.info('driver uri: %s', DRV_URI) #log.info('device address: %s', DEV_ADDR) #log.info('device port: %s', DEV_PORT) #log.info('work dir: %s', WORK_DIR) # Create agent config. agent_config = { 'driver_config': DVR_CONFIG, 'stream_config': self._stream_config, 'agent': { 'resource_id': IA_RESOURCE_ID }, 'test_mode': True, 'forget_past': True, 'enable_persistence': False } #if org_governance_name is not None: # agent_config['org_governance_name'] = org_governance_name # Start instrument agent. log.info("TestInstrumentAgent.setup(): starting IA.") container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) log.info("Agent setup") ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info('Agent pid=%s.', str(ia_pid)) self.addCleanup(self._verify_agent_reset) # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(self._ia_client)) log.info('test setup complete') ############################################################################### # Port agent helpers. ############################################################################### def _verify_agent_reset(self): """ Check agent state and reset if necessary. This called if a test fails and reset hasn't occurred. """ if self._ia_client is None: return state = self._ia_client.get_agent_state() if state != ResourceAgentState.UNINITIALIZED: cmd = AgentCommand(command=ResourceAgentEvent.RESET) retval = self._ia_client.execute_agent(cmd) ############################################################################### # Event helpers. ############################################################################### def _start_event_subscriber(self, type='ResourceAgentEvent', count=0): """ Start a subscriber to the instrument agent events. @param type The type of event to catch. @count Trigger the async event result when events received reaches this. """ def consume_event(*args, **kwargs): log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.', str(args), str(kwargs), str(args[0])) self._events_received.append(args[0]) if self._event_count > 0 and \ self._event_count == len(self._events_received): self._async_event_result.set() # Event array and async event result. self._event_count = count self._events_received = [] self._async_event_result = AsyncResult() self._event_subscriber = EventSubscriber(event_type=type, callback=consume_event, origin=IA_RESOURCE_ID) self._event_subscriber.start() self._event_subscriber._ready_event.wait(timeout=5) def _stop_event_subscriber(self): """ Stop event subscribers on cleanup. """ self._event_subscriber.stop() self._event_subscriber = None ############################################################################### # Data stream helpers. ############################################################################### def _build_stream_config(self): """ """ # Create a pubsub client to create streams. pubsub_client = PubsubManagementServiceClient(node=self.container.node) dataset_management = DatasetManagementServiceClient() encoder = IonObjectSerializer() # Create streams and subscriptions for each stream named in driver. self._stream_config = {} stream_name = 'parsed' param_dict_name = 'ctd_parsed_param_dict' pd_id = dataset_management.read_parameter_dictionary_by_name( param_dict_name, id_only=True) stream_def_id = pubsub_client.create_stream_definition( name=stream_name, parameter_dictionary_id=pd_id) stream_def = pubsub_client.read_stream_definition(stream_def_id) stream_def_dict = encoder.serialize(stream_def) pd = stream_def.parameter_dictionary stream_id, stream_route = pubsub_client.create_stream( name=stream_name, exchange_point='science_data', stream_definition_id=stream_def_id) stream_config = dict(routing_key=stream_route.routing_key, exchange_point=stream_route.exchange_point, stream_id=stream_id, parameter_dictionary=pd, stream_def_dict=stream_def_dict) self._stream_config[stream_name] = stream_config stream_name = 'raw' param_dict_name = 'ctd_raw_param_dict' pd_id = dataset_management.read_parameter_dictionary_by_name( param_dict_name, id_only=True) stream_def_id = pubsub_client.create_stream_definition( name=stream_name, parameter_dictionary_id=pd_id) stream_def = pubsub_client.read_stream_definition(stream_def_id) stream_def_dict = encoder.serialize(stream_def) pd = stream_def.parameter_dictionary stream_id, stream_route = pubsub_client.create_stream( name=stream_name, exchange_point='science_data', stream_definition_id=stream_def_id) stream_config = dict(routing_key=stream_route.routing_key, exchange_point=stream_route.exchange_point, stream_id=stream_id, parameter_dictionary=pd, stream_def_dict=stream_def_dict) self._stream_config[stream_name] = stream_config def _start_data_subscribers(self, count, raw_count): """ """ # Create a pubsub client to create streams. pubsub_client = PubsubManagementServiceClient(node=self.container.node) # Create streams and subscriptions for each stream named in driver. self._data_subscribers = [] self._samples_received = [] self._raw_samples_received = [] self._async_sample_result = AsyncResult() self._async_raw_sample_result = AsyncResult() # A callback for processing subscribed-to data. def recv_data(message, stream_route, stream_id): log.info('Received parsed data on %s (%s,%s)', stream_id, stream_route.exchange_point, stream_route.routing_key) self._samples_received.append(message) if len(self._samples_received) == count: self._async_sample_result.set() def recv_raw_data(message, stream_route, stream_id): log.info('Received raw data on %s (%s,%s)', stream_id, stream_route.exchange_point, stream_route.routing_key) self._raw_samples_received.append(message) if len(self._raw_samples_received) == raw_count: self._async_raw_sample_result.set() from pyon.util.containers import create_unique_identifier stream_name = 'parsed' parsed_config = self._stream_config[stream_name] stream_id = parsed_config['stream_id'] exchange_name = create_unique_identifier("%s_queue" % stream_name) self._purge_queue(exchange_name) sub = StandaloneStreamSubscriber(exchange_name, recv_data) sub.start() self._data_subscribers.append(sub) sub_id = pubsub_client.create_subscription(name=exchange_name, stream_ids=[stream_id]) pubsub_client.activate_subscription(sub_id) sub.subscription_id = sub_id # Bind the subscription to the standalone subscriber (easier cleanup, not good in real practice) stream_name = 'raw' parsed_config = self._stream_config[stream_name] stream_id = parsed_config['stream_id'] exchange_name = create_unique_identifier("%s_queue" % stream_name) self._purge_queue(exchange_name) sub = StandaloneStreamSubscriber(exchange_name, recv_raw_data) sub.start() self._data_subscribers.append(sub) sub_id = pubsub_client.create_subscription(name=exchange_name, stream_ids=[stream_id]) pubsub_client.activate_subscription(sub_id) sub.subscription_id = sub_id # Bind the subscription to the standalone subscriber (easier cleanup, not good in real practice) def _purge_queue(self, queue): xn = self.container.ex_manager.create_xn_queue(queue) xn.purge() def _stop_data_subscribers(self): for subscriber in self._data_subscribers: pubsub_client = PubsubManagementServiceClient() if hasattr(subscriber, 'subscription_id'): try: pubsub_client.deactivate_subscription( subscriber.subscription_id) except: pass pubsub_client.delete_subscription(subscriber.subscription_id) subscriber.stop() ############################################################################### # Tests. ############################################################################### @unittest.skip('Test should be run manually only.') def test_initialize(self): """ test_initialize Test agent initialize command. This causes creation of driver process and transition to inactive. """ print '#### in test' # We start in uninitialized state. # In this state there is no driver process. state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.UNINITIALIZED) # Ping the agent. retval = self._ia_client.ping_agent() log.info(retval) # Initialize the agent. # The agent is spawned with a driver config, but you can pass one in # optinally with the initialize command. This validates the driver # config, launches a driver process and connects to it via messaging. # If successful, we switch to the inactive state. cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.INACTIVE) # Ping the driver proc. retval = self._ia_client.ping_resource() log.info(retval) # Reset the agent. This causes the driver messaging to be stopped, # the driver process to end and switches us back to uninitialized. cmd = AgentCommand(command=ResourceAgentEvent.RESET) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.UNINITIALIZED) @unittest.skip('Test should be run manually only.') def test_xx(self): """ """ state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.UNINITIALIZED) with self.assertRaises(Conflict): res_state = self._ia_client.get_resource_state() cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.INACTIVE) cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() print '################################# sbe54 came up in state: ' + state if state == ResourceAgentState.IDLE: cmd = AgentCommand(command=ResourceAgentEvent.RUN) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.COMMAND) elif state == ResourceAgentState.STREAMING: cmd = AgentCommand(command='DRIVER_EVENT_STOP_AUTOSAMPLE') retval = self._ia_client.execute_resource(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.COMMAND) state = self._ia_client.get_agent_state() print '################################# sbe54 now in state: ' + state gevent.sleep(60 * 2.25) state = self._ia_client.get_agent_state() print '################################# sbe54 now in state: ' + state """ 'DRIVER_EVENT_START_AUTOSAMPLE' self.assertEqual(state, ResourceAgentState.IDLE) res_state = self._ia_client.get_resource_state() self.assertEqual(res_state, DriverProtocolState.COMMAND) cmd = AgentCommand(command=ResourceAgentEvent.RUN) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.COMMAND) res_state = self._ia_client.get_resource_state() self.assertEqual(res_state, DriverProtocolState.COMMAND) cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE) with self.assertRaises(Conflict): retval = self._ia_client.execute_resource(cmd) """ cmd = AgentCommand(command=ResourceAgentEvent.RESET) retval = self._ia_client.execute_agent(cmd) state = self._ia_client.get_agent_state() self.assertEqual(state, ResourceAgentState.UNINITIALIZED)