Ejemplo n.º 1
0
    def _start_platform(self):
        """
        Starts the given platform waiting for it to transition to the
        UNINITIALIZED state (note that the agent starts in the LAUNCHING state).

        More in concrete the sequence of steps here are:
        - prepares subscriber to receive the UNINITIALIZED state transition
        - launches the platform process
        - waits for the start of the process
        - waits for the transition to the UNINITIALIZED state
        """
        ##############################################################
        # prepare to receive the UNINITIALIZED state transition:
        async_res = AsyncResult()

        def consume_event(evt, *args, **kwargs):
            log.debug("Got ResourceAgentStateEvent %s from origin %r",
                      evt.state, evt.origin)
            if evt.state == PlatformAgentState.UNINITIALIZED:
                async_res.set(evt)

        # start subscriber:
        sub = EventSubscriber(event_type="ResourceAgentStateEvent",
                              origin=self.platform_device_id,
                              callback=consume_event)
        sub.start()
        log.info(
            "registered event subscriber to wait for state=%r from origin %r",
            PlatformAgentState.UNINITIALIZED, self.platform_device_id)
        #self._event_subscribers.append(sub)
        sub._ready_event.wait(timeout=EVENT_TIMEOUT)

        ##############################################################
        # now start the platform:
        agent_instance_id = self.platform_agent_instance_id
        log.debug("about to call start_platform_agent_instance with id=%s",
                  agent_instance_id)
        pid = self.imsclient.start_platform_agent_instance(
            platform_agent_instance_id=agent_instance_id)
        log.debug("start_platform_agent_instance returned pid=%s", pid)

        #wait for start
        agent_instance_obj = self.imsclient.read_platform_agent_instance(
            agent_instance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     self.platform_device_id,
                                     ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (90),
            "The platform agent instance did not spawn in 90 seconds")

        # Start a resource agent client to talk with the agent.
        self._pa_client = ResourceAgentClient(self.platform_device_id,
                                              name=gate.process_id,
                                              process=FakeProcess())
        log.debug("got platform agent client %s", str(self._pa_client))

        ##############################################################
        # wait for the UNINITIALIZED event:
        async_res.get(timeout=self._receive_timeout)
Ejemplo n.º 2
0
    def start(self):

        log.debug("GovernanceController starting ...")

        config = CFG.interceptor.interceptors.governance.config

        if config is None:
            config['enabled'] = False

        if "enabled" in config:
            self.enabled = config["enabled"]

        log.debug("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.resource_policy_event_subscriber = None

        if self.enabled:
            self.initialize_from_config(config)

            self.resource_policy_event_subscriber = EventSubscriber(
                event_type="ResourcePolicyEvent",
                callback=self.policy_event_callback)
            self.resource_policy_event_subscriber.activate()

            self.rr_client = ResourceRegistryServiceProcessClient(
                node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(
                node=self.container.node, process=self.container)
            self.org_client = OrgManagementServiceProcessClient(
                node=self.container.node, process=self.container)
Ejemplo n.º 3
0
    def test_instrument_device_metadata_notification_l4_ci_sa_rq_145_323(self):
        """
        Instrument management shall update physical resource metadata when change occurs

        For example, when there is a change of state.

        note from maurice 2012-05-18: consider this to mean a change of stored RR data
        """

        inst_obj = any_old(RT.InstrumentDevice)
        instrument_device_id, _ = self.RR.create(inst_obj)
        self.received_event = AsyncResult()

        #Create subscribers for agent and driver events.

        def consume_event(*args, **kwargs):
            self.received_event.set(True)
            log.info("L4-CI-SA-RQ-323")
            log.info("L4-CI-SA-RQ-145")

        event_sub = EventSubscriber(event_type="ResourceModifiedEvent", callback=consume_event)
        event_sub.start()


        inst_obj = self.RR.read(instrument_device_id)
        inst_obj.description = "brand new description"
        self.RR.update(inst_obj)

        #wait for event
        result = self.received_event.get(timeout=10)
        event_sub.stop()

        self.assertTrue(result)
class NotificationSubscription(object):
    """
    Ties a notification's info to it's event subscriber
    """

    def  __init__(self, notification_request=None, callback=None):
        self._res_obj = notification_request  # The Notification Request Resource Object
        self.subscriber = EventSubscriber(  origin=notification_request.origin,
            origin_type = notification_request.origin_type,
            event_type=notification_request.event_type,
            sub_type=notification_request.event_subtype,
            callback=callback)
        self.notification_subscription_id = None

    def set_notification_id(self, id_=None):
        """
        Set the notification id of the notification object
        @param notification id
        """
        self.notification_subscription_id = id_

    def activate(self):
        """
        Start subscribing
        """
        self.subscriber.start()

    def deactivate(self):
        """
        Stop subscribing
        """
        self.subscriber.stop()
    def on_start(self):

        super(NotificationWorker, self).on_start()

        self.notifications = load_notifications()

        def _load_notifications_callback(msg, headers):
            """ local callback method so this can be used as callback in EventSubscribers """
            self.notifications = load_notifications(
            )  # from uns_utility_methods

        # the subscriber for the ReloadUserInfoEvent (new subscriber, subscription deleted, notifications changed, etc)
        self.reload_user_info_subscriber = EventSubscriber(
            event_type=OT.ReloadUserInfoEvent,
            #origin='UserNotificationService',
            callback=_load_notifications_callback)
        self.add_endpoint(self.reload_user_info_subscriber)

        # the subscriber for the UserInfo resource update events
        self.userinfo_rsc_mod_subscriber = EventSubscriber(
            event_type=OT.ResourceModifiedEvent,
            sub_type="UPDATE",
            origin_type="UserInfo",
            callback=_load_notifications_callback)
        self.add_endpoint(self.userinfo_rsc_mod_subscriber)
Ejemplo n.º 6
0
    def start(self):

        log.debug("GovernanceController starting ...")

        config = CFG.interceptor.interceptors.governance.config

        if config is None:
            config['enabled'] = False

        if "enabled" in config:
            self.enabled = config["enabled"]

        log.debug("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.resource_policy_event_subscriber = None
        self.service_policy_event_subscriber = None

        if self.enabled:
            self.initialize_from_config(config)

            self.resource_policy_event_subscriber = EventSubscriber(event_type="ResourcePolicyEvent", callback=self.resource_policy_event_callback)
            self.resource_policy_event_subscriber.start()

            self.service_policy_event_subscriber = EventSubscriber(event_type="ServicePolicyEvent", callback=self.service_policy_event_callback)
            self.service_policy_event_subscriber.start()

            self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
            self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=self.container)
Ejemplo n.º 7
0
    def __init__(self,
                 orgname=None,
                 datastore_manager=None,
                 events_enabled=False):
        # Get an instance of datastore configured as directory.
        datastore_manager = datastore_manager or bootstrap.container_instance.datastore_manager
        self.dir_store = datastore_manager.get_datastore(
            DataStore.DS_DIRECTORY)

        self.orgname = orgname or CFG.system.root_org
        self.is_root = (self.orgname == CFG.system.root_org)

        self.events_enabled = events_enabled
        self.event_pub = None
        self.event_sub = None

        # Create directory root entry (for current org) if not existing
        if CFG.system.auto_bootstrap:
            root_de = self.register("/",
                                    "DIR",
                                    sys_name=bootstrap.get_sys_name())
            if root_de is None:
                # We created this directory just now
                pass

        if self.events_enabled:
            # init change event publisher
            self.event_pub = EventPublisher()

            # Register to receive directory changes
            self.event_sub = EventSubscriber(
                event_type="ContainerConfigModifiedEvent",
                origin="Directory",
                callback=self.receive_directory_change_event)
Ejemplo n.º 8
0
    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()

        id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval,
            end_time=-1,
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)

        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, 4)
        def start_DeviceStatusAlertEvent_subscriber(value_id, sub_type):
            """
            @return async_event_result  Use it to wait for the expected event
            """
            event_type = "DeviceStatusAlertEvent"

            async_event_result = AsyncResult()

            def consume_event(evt, *args, **kwargs):
                log.info('DeviceStatusAlertEvent_subscriber received evt: %s', str(evt))
                if evt.type_ != event_type or \
                   evt.value_id != value_id or \
                   evt.sub_type != sub_type:
                    return

                async_event_result.set(evt)

            kwargs = dict(event_type=event_type,
                          callback=consume_event,
                          origin=self.p_root.platform_device_id,
                          sub_type=sub_type)

            sub = EventSubscriber(**kwargs)
            sub.start()
            log.info("registered DeviceStatusAlertEvent subscriber: %s", kwargs)

            self._event_subscribers.append(sub)
            sub._ready_event.wait(timeout=self._receive_timeout)

            return async_event_result
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')

        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = IonObject(OT.ProcessDefinition,
                                            name='test_process')
        self.process_definition.executable = {
            'module': 'ion.services.cei.test.test_process_state_gate',
            'class': 'TestProcess'
        }
        self.process_definition_id = self.pd_cli.create_process_definition(
            self.process_definition)
        self.event_queue = queue.Queue()

        self.process_schedule = IonObject(OT.ProcessSchedule)
        self.process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        self.pid = self.pd_cli.create_process(self.process_definition_id)

        self.event_queue = queue.Queue()

        self.event_sub = EventSubscriber(event_type="ProcessLifecycleEvent",
                                         callback=self._event_callback,
                                         origin=self.pid,
                                         origin_type="DispatchedProcess")
    def _start_event_subscriber(self, event_type="DeviceEvent",
                                sub_type=None,
                                count=0):
        """
        Starts event subscriber for events of given event_type ("DeviceEvent"
        by default) and given sub_type ("platform_event" by default).
        """

        def consume_event(evt, *args, **kwargs):
            # A callback for consuming events.
            log.info('Event subscriber received evt: %s.', str(evt))
            self._events_received.append(evt)
            if count == 0:
                self._async_event_result.set(evt)

            elif count == len(self._events_received):
                self._async_event_result.set()

        sub = EventSubscriber(event_type=event_type,
                              sub_type=sub_type,
                              callback=consume_event)

        sub.start()
        log.info("registered event subscriber for event_type=%r, sub_type=%r, count=%d",
                 event_type, sub_type, count)

        self._event_subscribers.append(sub)
        sub._ready_event.wait(timeout=EVENT_TIMEOUT)
Ejemplo n.º 12
0
    def test_pub_on_different_subtypes(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(event, *args, **kwargs):
            self.count += 1
            gq.put(event)
            if event.description == "end":
                ar.set()

        sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb)
        sub.activate()

        pub1 = EventPublisher(event_type="ResourceModifiedEvent")
        pub2 = EventPublisher(event_type="ContainerLifecycleEvent")

        pub1.publish_event(origin="two", sub_type="st2", description="2")
        pub2.publish_event(origin="three", sub_type="st1", description="3")
        pub1.publish_event(origin="one", sub_type="st1", description="1")
        pub1.publish_event(origin="four", sub_type="st1", description="end")

        ar.get(timeout=5)
        sub.deactivate()

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), 2)
        self.assertEquals(res[0].description, "1")
Ejemplo n.º 13
0
    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval,
            end_time=-1,
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
Ejemplo n.º 14
0
    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)
Ejemplo n.º 15
0
        def start_DeviceStatusAlertEvent_subscriber(value_id, sub_type):
            """
            @return async_event_result  Use it to wait for the expected event
            """
            event_type = "DeviceStatusAlertEvent"

            async_event_result = AsyncResult()

            def consume_event(evt, *args, **kwargs):
                log.info('DeviceStatusAlertEvent_subscriber received evt: %s',
                         str(evt))
                if evt.type_ != event_type or \
                   evt.value_id != value_id or \
                   evt.sub_type != sub_type:
                    return

                async_event_result.set(evt)

            kwargs = dict(event_type=event_type,
                          callback=consume_event,
                          origin=self.p_root.platform_device_id,
                          sub_type=sub_type)

            sub = EventSubscriber(**kwargs)
            sub.start()
            log.info("registered DeviceStatusAlertEvent subscriber: %s",
                     kwargs)

            self._event_subscribers.append(sub)
            sub._ready_event.wait(timeout=self._receive_timeout)

            return async_event_result
class InstrumentAgentEventSubscribers(object):
    """
    Create subscribers for agent and driver events.
    """
    log.info("Start event subscribers")
    def __init__(self, instrument_agent_resource_id = None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug('#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()


        self.event_subscribers = EventSubscriber(
            event_type='ResourceAgentEvent', callback=consume_event,
            origin=instrument_agent_resource_id)
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)
    def set_process_batch_key(self, process_batch_key = ''):
        """
        This method allows an operator to set the process_batch_key, a string.
        Once this method is used by the operator, the UNS will start listening for timer events
        published by the scheduler with origin = process_batch_key.

        @param process_batch_key str
        """

        def process(event_msg, headers):
            self.end_time = get_ion_ts()

            log.debug("process_batch being called with start_time = %s, end_time = %s", self.start_time, self.end_time)

            # run the process_batch() method
            self.process_batch(start_time=self.start_time, end_time=self.end_time)
            self.start_time = self.end_time

        # the subscriber for the batch processing
        """
        To trigger the batch notification, have the scheduler create a timer with event_origin = process_batch_key
        """
        self.batch_processing_subscriber = EventSubscriber(
            event_type="ResourceEvent",
            origin=process_batch_key,
            queue_name='user_notification',
            callback=process
        )
        self.batch_processing_subscriber.start()
        self._subscribers.append(self.batch_processing_subscriber)
Ejemplo n.º 18
0
 def _start_event_subscriber(self, type='StreamAlarmEvent', count=0):
     """
     Start a subscriber to the instrument agent events.
     @param type The type of event to catch.
     @count Trigger the async event result when events received reaches this.
     """
     def consume_event(*args, **kwargs):
         print '#################'
         log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.', 
                  str(args), str(kwargs), str(args[0]))
         self._events_received.append(args[0])
         if self._event_count > 0 and \
             self._event_count == len(self._events_received):
             self._async_event_result.set()
         
     # Event array and async event result.
     self._event_count = count
     self._events_received = []
     self._async_event_result = AsyncResult()
         
     self._event_subscriber = EventSubscriber(
         event_type=type, callback=consume_event,
         origin=IA_RESOURCE_ID)
     self._event_subscriber.start()
     self._event_subscriber._ready_event.wait(timeout=5)
    def start_event_listener(self):

        es = EventSubscriber(event_type=OT.DataProcessStatusEvent,
                             callback=self.validate_event)
        es.start()

        self.addCleanup(es.stop)
    def __init__(self, instrument_agent_resource_id=None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug(
                '#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()

        self.event_subscribers = EventSubscriber(
            event_type='ResourceAgentEvent',
            callback=consume_event,
            origin=instrument_agent_resource_id)
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)
Ejemplo n.º 21
0
    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process = self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()
Ejemplo n.º 22
0
    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))
Ejemplo n.º 23
0
    def _expect_from_root(self, p_root):
        """
        Start an event subscriber to the given root platform.
        To be called before any action that triggers publications from the root
        platform. It sets self._wait_root_event to a function to be called to
        wait for the event.
        """
        async_result = AsyncResult()

        # subscribe:
        event_type = "DeviceAggregateStatusEvent"

        def consume_event(evt, *args, **kwargs):
            async_result.set(evt)

        sub = EventSubscriber(event_type=event_type,
                              origin=p_root.platform_device_id,
                              callback=consume_event)

        sub.start()
        self._data_subscribers.append(sub)
        sub._ready_event.wait(timeout=CFG.endpoint.receive.timeout)

        log.debug("registered for DeviceAggregateStatusEvent")

        # set new wait function:
        def wait():
            root_evt = async_result.get(timeout=CFG.endpoint.receive.timeout)
            return root_evt

        self._wait_root_event = wait
Ejemplo n.º 24
0
    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time=str(self.now_utc()), interval=self.interval_timer_interval,
            end_time="-1",
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
Ejemplo n.º 25
0
    def start(self):

        log.debug("GovernanceController starting ...")

        self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)

        log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.resource_policy_event_subscriber = None
        self.service_policy_event_subscriber = None

        #containers default to not Org Boundary and ION Root Org
        self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
        self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
        self._container_org_id = None
        self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')

        self._is_root_org_container = (self._container_org_name == self._system_root_org_name)

        if self.enabled:

            config = CFG.get_safe('interceptor.interceptors.governance.config')

            self.initialize_from_config(config)

            self.resource_policy_event_subscriber = EventSubscriber(event_type="ResourcePolicyEvent", callback=self.resource_policy_event_callback)
            self.resource_policy_event_subscriber.start()

            self.service_policy_event_subscriber = EventSubscriber(event_type="ServicePolicyEvent", callback=self.service_policy_event_callback)
            self.service_policy_event_subscriber.start()

            self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
Ejemplo n.º 26
0
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  end_time="-1",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Ejemplo n.º 27
0
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Ejemplo n.º 28
0
    def do_listen_for_incoming(self):
        subscription_id = self.pubsub.create_subscription(
            'validator', data_product_ids=[self.data_product._id])
        self.addCleanup(self.pubsub.delete_subscription, subscription_id)

        self.granule_capture = []
        self.granule_count = 0

        def on_granule(msg, route, stream_id):
            self.granule_count += 1
            if self.granule_count < 5:
                self.granule_capture.append(msg)

        validator = StandaloneStreamSubscriber('validator',
                                               callback=on_granule)
        validator.start()
        self.addCleanup(validator.stop)

        self.pubsub.activate_subscription(subscription_id)
        self.addCleanup(self.pubsub.deactivate_subscription, subscription_id)

        self.dataset_modified = Event()

        def cb2(*args, **kwargs):
            self.dataset_modified.set()
            # TODO: event isn't using the ExternalDataset, but a different ID for a Dataset

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb2,
                             origin=self.dataset_id)
        es.start()
        self.addCleanup(es.stop)
Ejemplo n.º 29
0
    def setUp(self):
        """
        Set up subscribers for alarm events.
        """

        # Start container.
        log.info('Staring capability container.')
        self._start_container()

        self._event_count = 0
        self._events_received = []
        self._async_event_result = AsyncResult()
        self._resource_id = 'abc123'

        def consume_event(*args, **kwargs):
            log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.',
                     str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            if self._event_count > 0 and \
                self._event_count == len(self._events_received):
                self._async_event_result.set()

        self._event_subscriber = EventSubscriber(event_type='StreamAlarmEvent',
                                                 callback=consume_event,
                                                 origin=self._resource_id)

        self._event_subscriber.start()
        self._event_subscriber._ready_event.wait(timeout=5)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)
Ejemplo n.º 30
0
    def _expect_from_root(self, p_root):
        """
        Start an event subscriber to the given root platform.
        To be called before any action that triggers publications from the root
        platform. It sets self._wait_root_event to a function to be called to
        wait for the event.
        """
        async_result = AsyncResult()

        # subscribe:
        event_type = "DeviceAggregateStatusEvent"

        def consume_event(evt, *args, **kwargs):
            async_result.set(evt)

        sub = EventSubscriber(event_type=event_type,
                              origin=p_root.platform_device_id,
                              callback=consume_event)

        sub.start()
        self._data_subscribers.append(sub)
        sub._ready_event.wait(timeout=CFG.endpoint.receive.timeout)

        log.debug("registered for DeviceAggregateStatusEvent")

        # set new wait function:
        def wait():
            root_evt = async_result.get(timeout=CFG.endpoint.receive.timeout)
            return root_evt

        self._wait_root_event = wait
Ejemplo n.º 31
0
    def on_start(self):
        log.debug('EventAlertTransform.on_start()')
        super(EventAlertTransform, self).on_start()

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # get the algorithm to use
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        self.timer_origin = self.CFG.get_safe('process.timer_origin', 'Interval Timer')
        self.instrument_origin = self.CFG.get_safe('process.instrument_origin', '')

        self.event_times = []

        #-------------------------------------------------------------------------------------
        # Set up a listener for instrument events
        #-------------------------------------------------------------------------------------

        self.instrument_event_queue = gevent.queue.Queue()

        def instrument_event_received(message, headers):
            log.debug("EventAlertTransform received an instrument event here::: %s" % message)
            self.instrument_event_queue.put(message)

        self.instrument_event_subscriber = EventSubscriber(origin = self.instrument_origin,
            callback=instrument_event_received)

        self.instrument_event_subscriber.start()

        #-------------------------------------------------------------------------------------
        # Create the publisher that will publish the Alert message
        #-------------------------------------------------------------------------------------

        self.event_publisher = EventPublisher()
Ejemplo n.º 32
0
    def test_ingestion_failover(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset()
        self.start_ingestion(stream_id, dataset_id)
        
        event = Event()

        def cb(*args, **kwargs):
            event.set()

        sub = EventSubscriber(event_type="ExceptionEvent", callback=cb, origin="stream_exception")
        sub.start()

        self.publish_fake_data(stream_id, route)
        self.wait_until_we_have_enough_granules(dataset_id, 40)
        
        file_path = DatasetManagementService._get_coverage_path(dataset_id)
        master_file = os.path.join(file_path, '%s_master.hdf5' % dataset_id)

        with open(master_file, 'w') as f:
            f.write('this will crash HDF')

        self.publish_hifi(stream_id, route, 5)


        self.assertTrue(event.wait(10))

        sub.stop()
Ejemplo n.º 33
0
    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 2

        event_origin = "Interval Timer"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 5
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time - start_time) + self.interval_timer_interval + 1)

        # Validate only 2 events are published
        self.assertEqual(self.interval_timer_count, 2)

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)
Ejemplo n.º 34
0
    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)
Ejemplo n.º 35
0
    def test_qc_events(self):
        ph = ParameterHelper(self.dataset_management, self.addCleanup)
        pdict_id = ph.create_qc_pdict()
        stream_def_id = self.pubsub_management.create_stream_definition('qc stream def', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsub_management.delete_stream_definition, stream_def_id)

        stream_id, route = self.pubsub_management.create_stream('qc stream', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
        self.addCleanup(self.pubsub_management.delete_stream, stream_id)

        ingestion_config_id = self.get_ingestion_config()
        dataset_id = self.create_dataset(pdict_id)
        config = DotDict()

        self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=ingestion_config_id, dataset_id=dataset_id, config=config)
        self.addCleanup(self.ingestion_management.unpersist_data_stream, stream_id, ingestion_config_id)

        publisher = StandaloneStreamPublisher(stream_id, route)
        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        rdt['time'] = np.arange(10)
        rdt['temp'] = np.arange(10) * 3

        verified = Event()
        def verification(event, *args, **kwargs):
            self.assertEquals(event.qc_parameter, 'temp_qc')
            self.assertEquals(event.temporal_value, 7)
            verified.set()

        es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=dataset_id, callback=verification, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())
        self.assertTrue(verified.wait(10))
Ejemplo n.º 36
0
    def test_create_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time of Day"

        sub = EventSubscriber(event_type="ResourceEvent", callback=self.single_timer_call_back, origin=event_origin)
        sub.start()

        # Time out in 3 seconds
        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        ss = SchedulerService()
        id = ss.create_time_of_day_timer(times_of_day=times_of_day,  expires=time.time()+25200+60, event_origin=event_origin, event_subtype="")

        self.assertEqual(type(id), str)
        ss.cancel_timer(id)
        gevent.sleep(5)
        # Validate the event is not sent
        self.assertEqual(self.single_timer_count, 0)
Ejemplo n.º 37
0
    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))
Ejemplo n.º 38
0
    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        ss = SchedulerService()
        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 4
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="ResourceEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()
        # Expires in one days
        e = time.mktime((datetime.datetime.utcnow() + timedelta(days=1)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = ss.create_time_of_day_timer(times_of_day=times_of_day, expires=e, event_origin=event_origin, event_subtype="")
        self.assertEqual(type(id), str)
        gevent.sleep(15)
        # After waiting for 15 seconds, validate only 2 events are generated.
        self.assertTrue(self.tod_count == 2)
Ejemplo n.º 39
0
    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        # Validate 1 event is published
        self.assertEqual(self.interval_timer_count, 1)

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        # Validate 1 event is published
        self.assertGreater(self.interval_timer_count, 2)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)
Ejemplo n.º 40
0
    def __init__(self, dataset_id=None, data_product_id=None):
        if data_product_id and not dataset_id:
            dataset_id = Container.instance.resource_registry.find_objects(data_product_id, PRED.hasDataset, id_only=True)[0][0]
        self.dataset_id = dataset_id
        self.event = Event()

        self.es = EventSubscriber(event_type=OT.DatasetModified, callback=self.cb, origin=self.dataset_id, auto_delete=True)
        self.es.start()
    def start_listener(self, dataset_id):
        def cb(*args, **kwargs):
            self.data_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id)
        es.start()

        self.addCleanup(es.stop)
    def on_init(self):
        self.event_pub = EventPublisher()

        self.policy_event_subscriber = EventSubscriber(
            event_type="ResourceModifiedEvent",
            origin_type="Policy",
            callback=self.policy_event_callback)
        self.policy_event_subscriber.activate()
    def test_oms_events_receive(self):

        log.debug("oms_uri = %s", OMS_URI)
        self.oms = CIOMSClientFactory.create_instance(OMS_URI)

        #CI url
        #url = "http://128.54.26.120:5000/ion-service/oms_event"

        #buddha url
        url = "http://128.54.29.48:5000/ion-service/oms_event"

        log.info("test_oms_events_receive:setup http url %s", url)

        result = self.oms.event.register_event_listener(url)
        log.info("test_oms_events_receive:setup register_event_listener result %s", result)

        #-------------------------------------------------------------------------------------
        # Set up the subscriber to catch the alert event
        #-------------------------------------------------------------------------------------

        def callback_for_alert(event, *args, **kwargs):
            log.debug("caught an OMSDeviceStatusEvent: %s", event)
            self.catch_alert.put(event)

        self.event_subscriber = EventSubscriber(event_type='OMSDeviceStatusEvent',
            callback=callback_for_alert)

        self.event_subscriber.start()
        self.addCleanup(self.event_subscriber.stop)


        #test the listener
        result = self.oms.event.generate_test_event({'platform_id': 'fake_platform_id1', 'message': "fake event triggered from CI using OMS' generate_test_event", 'severity': '3', 'group ': 'power'})
        log.info("test_oms_events_receive:setup generate_test_event result %s", result)

        result = self.oms.event.generate_test_event({'platform_id': 'fake_platform_id2', 'message': "fake event triggered from CI using OMS' generate_test_event", 'severity': '3', 'group ': 'power'})
        log.info("test_oms_events_receive:setup generate_test_event result %s", result)

        result = self.oms.event.generate_test_event({'platform_id': 'fake_platform_id3', 'message': "fake event triggered from CI using OMS' generate_test_event", 'severity': '3', 'group ': 'power'})
        log.info("test_oms_events_receive:setup generate_test_event result %s", result)


        oms_event_counter = 0
        runtime = 0
        starttime = time.time()
        caught_events = []

        #catch several events to get some samples from OMS
        while oms_event_counter < 10 and runtime < 60 :
            a = self.catch_alert.get(timeout=60)
            caught_events.append(a)
            oms_event_counter += 1
            runtime = time.time() - starttime

        result = self.oms.event.unregister_event_listener(url)
        log.debug("unregister_event_listener result: %s", result)

        self.assertTrue(oms_event_counter > 0)
Ejemplo n.º 44
0
class EventPersister(StandaloneProcess):

    def on_init(self):
        # Time in between event persists
        self.persist_interval = 1.0

        # Holds received events FIFO
        self.event_queue = Queue()

        # Temporarily holds list of events to persist while datastore operation not yet completed
        self.events_to_persist = None

        # bookkeeping for timeout greenlet
        self._persist_greenlet = None
        self._terminate_persist = Event() # when set, exits the timeout greenlet

        # The event subscriber
        self.event_sub = None

    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._on_event)
        self.event_sub.start()

    def on_quit(self):
        # Stop event subscriber
        self.event_sub.stop()

        # tell the trigger greenlet we're done
        self._terminate_persist.set()

        # wait on the greenlet to finish cleanly
        self._persist_greenlet.join(timeout=10)

    def _on_event(self, event, *args, **kwargs):
        self.event_queue.put(event)

    def _trigger_func(self, persist_interval):
        log.debug('Starting event persister thread with persist_interval=%s', persist_interval)

        # Event.wait returns False on timeout (and True when set in on_quit), so we use this to both exit cleanly and do our timeout in a loop
        while not self._terminate_persist.wait(timeout=persist_interval):
            try:
                self.events_to_persist = [self.event_queue.get() for x in xrange(self.event_queue.qsize())]

                self._persist_events(self.events_to_persist)
                self.events_to_persist = None
            except Exception as ex:
                log.exception("Failed to persist received events")
                return False

    def _persist_events(self, event_list):
        if event_list:
            bootstrap.container_instance.event_repository.put_events(event_list)
Ejemplo n.º 45
0
    def on_start(self):
        super(NotificationWorker, self).on_start()

        self.smtp_client = setting_up_smtp_client()

        # ------------------------------------------------------------------------------------
        # Start by loading the user info and reverse user info dictionaries
        # ------------------------------------------------------------------------------------

        try:
            self.user_info = self.load_user_info()
            self.reverse_user_info = calculate_reverse_user_info(self.user_info)

            log.info("On start up, notification workers loaded the following user_info dictionary: %s" % self.user_info)
            log.info("The calculated reverse user info: %s" % self.reverse_user_info)

        except NotFound as exc:
            if exc.message.find("users_index") > -1:
                log.warning("Notification workers found on start up that users_index have not been loaded yet.")
            else:
                raise NotFound(exc.message)

        # ------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        # ------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            """
            Callback method for the subscriber to ReloadUserInfoEvent
            """

            notification_id = event_msg.notification_id
            log.info(
                "(Notification worker received a ReloadNotificationEvent. The relevant notification_id is %s"
                % notification_id
            )

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info = calculate_reverse_user_info(self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)

            log.debug("After a reload, the user_info: %s" % self.user_info)
            log.debug("The recalculated reverse_user_info: %s" % self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(event_type="ReloadUserInfoEvent", callback=reload_user_info)
        self.reload_user_info_subscriber.start()

        # ------------------------------------------------------------------------------------
        # Create an event subscriber for all events that are of interest for notifications
        # ------------------------------------------------------------------------------------

        self.event_subscriber = EventSubscriber(queue_name="uns_queue", callback=self.process_event)
        self.event_subscriber.start()
Ejemplo n.º 46
0
    def on_start(self):
        super(TransformEventListener, self).on_start()
        event_type = self.CFG.get_safe('process.event_type', '')
        queue_name = self.CFG.get_safe('process.queue_name', None)

        self.listener = EventSubscriber(event_type=event_type,
                                        queue_name=queue_name,
                                        callback=self.process_event)
        self.listener.start()
Ejemplo n.º 47
0
    def __init__(self, dataset_id):
        self.dataset_id = dataset_id
        self.event = Event()

        self.es = EventSubscriber(event_type=OT.DatasetModified,
                                  callback=self.cb,
                                  origin=self.dataset_id,
                                  auto_delete=True)
        self.es.start()
Ejemplo n.º 48
0
    def test_replay_with_parameters(self):
        #--------------------------------------------------------------------------------
        # Create the configurations and the dataset
        #--------------------------------------------------------------------------------
        # Get a precompiled parameter dictionary with basic ctd fields
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
        context_ids = self.dataset_management.read_parameter_contexts(pdict_id, id_only=True)

        # Add a field that supports binary data input.
        bin_context = ParameterContext('binary',  param_type=ArrayType())
        context_ids.append(self.dataset_management.create_parameter_context('binary', bin_context.dump()))
        # Add another field that supports dictionary elements.
        rec_context = ParameterContext('records', param_type=RecordType())
        context_ids.append(self.dataset_management.create_parameter_context('records', rec_context.dump()))

        pdict_id = self.dataset_management.create_parameter_dictionary('replay_pdict', parameter_context_ids=context_ids, temporal_context='time')
        

        stream_def_id = self.pubsub_management.create_stream_definition('replay_stream', parameter_dictionary_id=pdict_id)
        
        stream_id, route  = self.pubsub_management.create_stream('replay_with_params', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
        config_id  = self.get_ingestion_config()
        dataset_id = self.create_dataset(pdict_id)
        self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)

        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id)
        es.start()

        self.addCleanup(es.stop)

        self.publish_fake_data(stream_id, route)

        self.assertTrue(dataset_modified.wait(30))

        query = {
            'start_time': 0 - 2208988800,
            'end_time':   20 - 2208988800,
            'stride_time' : 2,
            'parameters': ['time','temp']
        }
        retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id,query=query)

        rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
        comp = np.arange(0,20,2) == rdt['time']
        self.assertTrue(comp.all(),'%s' % rdt.pretty_print())
        self.assertEquals(set(rdt.iterkeys()), set(['time','temp']))

        extents = self.dataset_management.dataset_extents(dataset_id=dataset_id, parameters=['time','temp'])
        self.assertTrue(extents['time']>=20)
        self.assertTrue(extents['temp']>=20)

        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)
Ejemplo n.º 49
0
    def test_event_subscriber_auto_delete(self):
        mocknode = Mock()
        ev = EventSubscriber(event_type="ProcessLifecycleEvent", callback=lambda *a,**kw: None, auto_delete=sentinel.auto_delete, node=mocknode)
        self.assertEquals(ev._auto_delete, sentinel.auto_delete)

        # we don't want to have to patch out everything here, so call initialize directly, which calls create_channel for us
        ev._setup_listener = Mock()
        ev.initialize(sentinel.binding)

        self.assertEquals(ev._chan.queue_auto_delete, sentinel.auto_delete)
    def start_listener(self, dataset_id):
        def cb(*args, **kwargs):
            self.data_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id)
        es.start()

        self.addCleanup(es.stop)
Ejemplo n.º 51
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('EventPersister timer greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
                                         callback=self._on_event,
                                         queue_name="event_persister")

        self.event_sub.start()
    def _start_platform(self):
        """
        Starts the given platform waiting for it to transition to the
        UNINITIALIZED state (note that the agent starts in the LAUNCHING state).

        More in concrete the sequence of steps here are:
        - prepares subscriber to receive the UNINITIALIZED state transition
        - launches the platform process
        - waits for the start of the process
        - waits for the transition to the UNINITIALIZED state
        """
        ##############################################################
        # prepare to receive the UNINITIALIZED state transition:
        async_res = AsyncResult()

        def consume_event(evt, *args, **kwargs):
            log.debug("Got ResourceAgentStateEvent %s from origin %r", evt.state, evt.origin)
            if evt.state == PlatformAgentState.UNINITIALIZED:
                async_res.set(evt)

        # start subscriber:
        sub = EventSubscriber(event_type="ResourceAgentStateEvent",
                              origin=self.platform_device_id,
                              callback=consume_event)
        sub.start()
        log.info("registered event subscriber to wait for state=%r from origin %r",
                 PlatformAgentState.UNINITIALIZED, self.platform_device_id)
        #self._event_subscribers.append(sub)
        sub._ready_event.wait(timeout=EVENT_TIMEOUT)

        ##############################################################
        # now start the platform:
        agent_instance_id = self.platform_agent_instance_id
        log.debug("about to call start_platform_agent_instance with id=%s", agent_instance_id)
        pid = self.imsclient.start_platform_agent_instance(platform_agent_instance_id=agent_instance_id)
        log.debug("start_platform_agent_instance returned pid=%s", pid)

        #wait for start
        agent_instance_obj = self.imsclient.read_platform_agent_instance(agent_instance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     self.platform_device_id,
                                     ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(90), "The platform agent instance did not spawn in 90 seconds")

        # Start a resource agent client to talk with the agent.
        self._pa_client = ResourceAgentClient(self.platform_device_id,
                                              name=gate.process_id,
                                              process=FakeProcess())
        log.debug("got platform agent client %s", str(self._pa_client))

        ##############################################################
        # wait for the UNINITIALIZED event:
        async_res.get(timeout=self._receive_timeout)
Ejemplo n.º 53
0
    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
Ejemplo n.º 54
0
    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))
Ejemplo n.º 55
0
class DatasetMonitor(object):
    def __init__(self, dataset_id):
        self.dataset_id = dataset_id
        self.event = Event()

        self.es = EventSubscriber(event_type=OT.DatasetModiied, callback=self.cb, origin=self.dataset_id, auto_delete=True)
        self.es.start()
    
    def cb(self, *args, **kwargs):
        self.event.set()

    def stop(self):
        self.es.stop()
Ejemplo n.º 56
0
class TransformEventListener(TransformEventProcess):

    def on_start(self):
        event_type = self.CFG.get_safe('process.event_type', '')

        self.listener = EventSubscriber(event_type=event_type, callback=self.process_event)
        self.listener.start()

    def process_event(self, msg, headers):
        raise NotImplementedError('Method process_event not implemented')

    def on_quit(self):
        self.listener.stop()
Ejemplo n.º 57
0
    def test_event_subscriber_auto_delete(self):
        mocknode = Mock()
        ev = EventSubscriber(event_type="ProcessLifecycleEvent",
                             callback=lambda *a, **kw: None,
                             auto_delete=sentinel.auto_delete,
                             node=mocknode)
        self.assertEquals(ev._auto_delete, sentinel.auto_delete)

        # we don't want to have to patch out everything here, so call initialize directly, which calls create_channel for us
        ev._setup_listener = Mock()
        ev.initialize(sentinel.binding)

        self.assertEquals(ev._chan.queue_auto_delete, sentinel.auto_delete)
Ejemplo n.º 58
0
    def _start_finished_event_subscriber(self):

        def consume_event(*args,**kwargs):
            log.debug('EventSubscriber event received: %s', str(args[0]) )
            if args[0].description == 'TestingFinished':
                log.debug('TestingFinished event received')
                self._finished_events_received.append(args[0])
                if self._finished_count and self._finished_count == len(self._finished_events_received):
                    log.debug('Finishing test...')
                    self._async_finished_result.set(len(self._finished_events_received))
                    log.debug('Called self._async_finished_result.set({0})'.format(len(self._finished_events_received)))

        self._finished_event_subscriber = EventSubscriber(event_type='DeviceEvent', callback=consume_event)
        self._finished_event_subscriber.start()
Ejemplo n.º 59
0
def start_subscribers():
    """
    """
    global platform_sub
    global result_sub
    global go_time
    global logfile
    go_time = time.time()
    loc_time = time.localtime(go_time)
    fname = '2caa_log_%d_%d_%d.txt' % (loc_time[3], loc_time[4], loc_time[5])
    fname = tcaa_args['logfile_dir'] + fname
    logfile = open(fname, 'w')
    logfile.write('%15.6f %40s %6d %6d %6d %6d %6d %6d %6d\n' %
                  (0.0, 'Start', status, queue_size, len(requests_sent),
                   len(results_recv), len(results_pending),
                   len(results_confirmed), len(results_error)))

    platform_sub = EventSubscriber(event_type='PlatformEvent',
                                   callback=consume_event,
                                   origin=tcaa_args['xs_name'])
    platform_sub.start()

    result_sub = EventSubscriber(event_type='RemoteCommandResult',
                                 callback=consume_event,
                                 origin='fake_id')
    result_sub.start()