コード例 #1
0
ファイル: test_event.py プロジェクト: ooici-dm/pyon
    def test_pub_on_different_subtypes(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(event, *args, **kwargs):
            self.count += 1
            gq.put(event)
            if event.description == "end":
                ar.set()

        sub = EventSubscriber(event_type="ResourceModifiedEvent", sub_type="st1", callback=cb)
        sub.activate()

        pub1 = EventPublisher(event_type="ResourceModifiedEvent")
        pub2 = EventPublisher(event_type="ContainerLifecycleEvent")

        pub1.publish_event(origin="two", sub_type="st2", description="2")
        pub2.publish_event(origin="three", sub_type="st1", description="3")
        pub1.publish_event(origin="one", sub_type="st1", description="1")
        pub1.publish_event(origin="four", sub_type="st1", description="end")

        ar.get(timeout=5)
        sub.deactivate()

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), 2)
        self.assertEquals(res[0].description, "1")
コード例 #2
0
    def test_sub(self):

        #start interaction observer
        io = InteractionObserver()
        io.start()

        #publish an event
        ev_pub = EventPublisher(event_type="ResourceEvent")
        ev_pub.publish_event(origin="specific", description="event")


        # publish a message
        msg_pub = Publisher()
        msg_pub.publish(to_name='anyone', msg="msg")

        # give 2 seconds for the messages to arrive
        time.sleep(2)

        #verify that two messages (an event and a message) are seen
        self.assertEquals(len(io.msg_log), 2)

        #iterate through the messages observed
        for item in io.msg_log:
            # if event
            if item[2]:
                #verify that the origin is what we sent
                self.assertEquals(item[1]['origin'], 'specific')
        dump = io._get_data(io.msg_log,{})
        sump = dump
コード例 #3
0
    def _acquire_data(cls, config, unlock_new_data_callback):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._new_data_constraints (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        """
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' key')
        #TODO: Configure the publisher
        publisher=None

        constraints = get_safe(config,'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            constraints = cls._new_data_constraints(config)
            config['constraints']=constraints

        cls._publish_data(publisher, config, cls._get_data(config))

        # Publish a 'TestFinished' event
        if get_safe(config,'TESTING'):
            log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_data', description='TestingFinished')
コード例 #4
0
    def _acquire_data(cls, config, publisher, unlock_new_data_callback):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._new_data_constraints (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        """
        log.debug('start _acquire_data: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config,'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            constraints = cls._new_data_constraints(config)
            if constraints is None:
                raise InstrumentParameterException("Data constraints returned from _new_data_constraints cannot be None")
            config['constraints'] = constraints

        cls._publish_data(publisher, cls._get_data(config))

        # Publish a 'TestFinished' event
        if get_safe(config,'TESTING'):
            log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_data', description='TestingFinished')
コード例 #5
0
class SystemManagementService(BaseSystemManagementService):
    """ container management requests are handled by the event listener
        ion.processes.event.container_manager.ContainerManager
        which must be running on each container.
    """
    def on_start(self,*a,**b):
        super(SystemManagementService,self).on_start(*a,**b)
        self.sender = EventPublisher()

    def on_quit(self,*a,**b):
        self.sender.close()

    def perform_action(self, predicate, action):
        userid = None # get from context
        self.sender.publish_event(event_type=OT.ContainerManagementRequest, origin=userid, predicate=predicate, action=action)

    def set_log_level(self, logger='', level='', recursive=False):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ChangeLogLevel, logger=logger, level=level, recursive=recursive))


    def reset_policy_cache(self, headers=None, timeout=None):
        """Clears and reloads the policy caches in all of the containers.

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ResetPolicyCache))

    def trigger_garbage_collection(self):
        """Triggers a garbage collection in all containers

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerGarbageCollection))

    def trigger_container_snapshot(self, snapshot_id='', include_snapshots=None, exclude_snapshots=None,
                                   take_at_time='', clear_all=False, persist_snapshot=True, snapshot_kwargs=None):

        if not snapshot_id:
            snapshot_id = get_ion_ts()
        if not snapshot_kwargs:
            snapshot_kwargs = {}

        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerContainerSnapshot,
                                                               snapshot_id=snapshot_id,
                                                               include_snapshots=include_snapshots,
                                                               exclude_snapshots=exclude_snapshots,
                                                               take_at_time=take_at_time,
                                                               clear_all=clear_all,
                                                               persist_snapshot=persist_snapshot,
                                                               snapshot_kwargs=snapshot_kwargs))
        log.info("Event to trigger container snapshots sent. snapshot_id=%s" % snapshot_id)

    def start_gevent_block(self, alarm_mode=False):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.StartGeventBlock, alarm_mode=alarm_mode))

    def stop_gevent_block(self):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.StopGeventBlock))

    def prepare_system_shutdown(self, mode=''):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.PrepareSystemShutdown, mode=mode))
コード例 #6
0
ファイル: test_event.py プロジェクト: dstuebe/pyon
    def test_base_subscriber_as_catchall(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(*args, **kwargs):
            self.count += 1
            gq.put(args[0])
            if self.count == 2:
                ar.set()

        sub = EventSubscriber(node=self.container.node, callback=cb)
        pub1 = self.TestEventPublisher(node=self.container.node)
        pub2 = EventPublisher(node=self.container.node)

        self._listen(sub)

        pub1.create_and_publish_event(origin="some", description="1")
        pub2.create_and_publish_event(origin="other", description="2")

        ar.get(timeout=5)

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), 2)
        self.assertEquals(res[0].description, "1")
        self.assertEquals(res[1].description, "2")
コード例 #7
0
ファイル: alerts.py プロジェクト: mbarry02/coi-services
 def publish_alert(self):
     """
     """
     event_data = self.make_event_data()
     print '########## publishing: ' + event_data['sub_type'] 
     pub = EventPublisher()
     pub.publish_event(**event_data)
コード例 #8
0
def process_oms_event():

    json_params = {}

    # oms direct request
    if request.data:
        json_params  = json_loads(str(request.data))
        log.debug('ServiceGatewayService:process_oms_event request.data:  %s', json_params)

    #validate payload
    if 'platform_id' not in json_params or 'message' not in json_params:
        log.warning('Invalid OMS event format. payload_data: %s', json_params)
        #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    #prepare the event information
    try:
        #create a publisher to relay OMS events into the system as DeviceEvents
        event_publisher = EventPublisher()

        event_publisher.publish_event(
            event_type='OMSDeviceStatusEvent',
            origin_type='OMS Platform',
            origin=json_params.get('platform_id', 'NOT PROVIDED'),
            sub_type='',
            description = json_params.get('message', ''),
            status_details = json_params)
    except Exception, e:
        log.error('Could not publish OMS  event: %s. Event data: %s', e.message, json_params)
コード例 #9
0
class SystemManagementService(BaseSystemManagementService):
    """ container management requests are handled by the event listener
        ion.processes.event.container_manager.ContainerManager
        which must be running on each container.
    """
    def on_start(self,*a,**b):
        super(SystemManagementService,self).on_start(*a,**b)
        self.sender = EventPublisher()
    def on_quit(self,*a,**b):
        self.sender.close()
    def perform_action(self, predicate, action):
        userid = None # get from context
        self.sender.publish_event(event_type=OT.ContainerManagementRequest, origin=userid, predicate=predicate, action=action)
    def set_log_level(self, logger='', level='', recursive=False):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ChangeLogLevel, logger=logger, level=level, recursive=recursive))


    def reset_policy_cache(self, headers=None, timeout=None):
        """Clears and reloads the policy caches in all of the containers.

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.ResetPolicyCache))

    def trigger_garbage_collection(self):
        """Triggers a garbage collection in all containers

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerGarbageCollection))
コード例 #10
0
ファイル: test_event.py プロジェクト: ooici-dm/pyon
    def test_pub_on_different_origins(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(*args, **kwargs):
            self.count += 1
            gq.put(args[0])
            if self.count == 3:
                ar.set()

        sub = EventSubscriber(event_type="ResourceEvent", callback=cb)
        pub = EventPublisher(event_type="ResourceEvent")

        self._listen(sub)

        pub.publish_event(origin="one", description="1")
        pub.publish_event(origin="two", description="2")
        pub.publish_event(origin="three", description="3")

        ar.get(timeout=5)

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), 3)
        self.assertEquals(res[0].description, "1")
        self.assertEquals(res[1].description, "2")
        self.assertEquals(res[2].description, "3")
コード例 #11
0
ファイル: status_manager.py プロジェクト: ednad/coi-services
def publish_event_for_diagnostics():  # pragma: no cover
    """
    Convenient method to do the publication of the event to generate diagnostic
    information about the statuses kept in each running platform agent.

    ><> from ion.agents.platform.status_manager import publish_event_for_diagnostics
    ><> publish_event_for_diagnostics()

    and something like the following will be logged out:

2013-05-17 17:25:16,076 INFO Dummy-247 ion.agents.platform.status_manager:760 'MJ01C': (99cb3e71302a4e5ca0c137292103e357) statuses:
                                           AGGREGATE_COMMS     AGGREGATE_DATA      AGGREGATE_LOCATION  AGGREGATE_POWER
        d231ccba8d674b4691b039ceecec8d95 : STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN
        40c787fc727a4734b219fde7c8df7543 : STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN
        55ee7225435444e3a862d7ceaa9d1875 : STATUS_OK           STATUS_OK           STATUS_OK           STATUS_OK
        1d27e0c2723149cc9692488dced7dd95 : STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN      STATUS_UNKNOWN
                               aggstatus : STATUS_OK           STATUS_OK           STATUS_OK           STATUS_OK
                           rollup_status : STATUS_OK           STATUS_OK           STATUS_OK           STATUS_OK
    """

    from pyon.event.event import EventPublisher
    ep = EventPublisher()
    evt = dict(event_type='DeviceStatusEvent', sub_type='diagnoser', origin='command_line')
    print("publishing: %s" % str(evt))
    ep.publish_event(**evt)
コード例 #12
0
    def test_event_in_stream_out_transform(self):
        """
        Test the event-in/stream-out transform
        """

        stream_id, _ = self.pubsub.create_stream('test_stream', exchange_point='science_data')
        self.exchange_cleanup.append('science_data')

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='EventToStreamTransform',
            description='For testing an event-in/stream-out transform')
        process_definition.executable['module']= 'ion.processes.data.transforms.event_in_stream_out_transform'
        process_definition.executable['class'] = 'EventToStreamTransform'
        proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = 'test_queue'
        config.process.exchange_point = 'science_data'
        config.process.publish_streams.output = stream_id
        config.process.event_type = 'ExampleDetectableEvent'
        config.process.variables = ['voltage', 'temperature' ]

        # Schedule the process
        pid = self.process_dispatcher.schedule_process(process_definition_id=proc_def_id, configuration=config)
        self.addCleanup(self.process_dispatcher.cancel_process,pid)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber for testing
        #---------------------------------------------------------------------------------------------

        ar_cond = gevent.event.AsyncResult()
        def subscriber_callback(m, r, s):
            ar_cond.set(m)
        sub = StandaloneStreamSubscriber('sub', subscriber_callback)
        self.addCleanup(sub.stop)
        sub_id = self.pubsub.create_subscription('subscription_cond',
            stream_ids=[stream_id],
            exchange_name='sub')
        self.pubsub.activate_subscription(sub_id)
        self.queue_cleanup.append(sub.xn.queue)
        sub.start()

        gevent.sleep(4)

        #---------------------------------------------------------------------------------------------
        # Publish an event. The transform has been configured to receive this event
        #---------------------------------------------------------------------------------------------

        event_publisher = EventPublisher("ExampleDetectableEvent")
        event_publisher.publish_event(origin = 'fake_origin', voltage = '5', temperature = '273')

        # Assert that the transform processed the event and published data on the output stream
        result_cond = ar_cond.get(timeout=10)
        self.assertTrue(result_cond)
コード例 #13
0
ファイル: alerts.py プロジェクト: ednad/coi-services
 def publish_alert(self):
     """
     Publishes the alert to ION.
     """
     event_data = self.make_event_data()
     log.trace("publishing alert: %s", event_data)
     pub = EventPublisher()
     pub.publish_event(**event_data)
コード例 #14
0
ファイル: transform.py プロジェクト: swarbhanu/pyon
 def call_process(self, packet, headers=None):
     try:
         self.process(packet)
     except Exception as e:
         log.exception('Unhandled caught in transform process')
         event_publisher = EventPublisher()
         event_publisher.publish_event(origin=self._transform_id, event_type='ExceptionEvent',
             exception_type=str(type(e)), exception_message=e.message)
コード例 #15
0
ファイル: shell_utils.py プロジェクト: ednad/coi-services
def publish_link_event(up_down, terrestrial_remote=2):
    """
    """
    status = TelemetryStatusType.AVAILABLE if up_down \
        else TelemetryStatusType.UNAVAILABLE
    platform_id = tcaa_args['terrestrial_platform_id'] if terrestrial_remote \
        else tcaa_args['remote_platform_id']
    
    pub = EventPublisher()
    if terrestrial_remote == 0:
        pub.publish_event(
            event_type='PlatformTelemetryEvent',
            origin=tcaa_args['terrestrial_platform_id'],
            status = status)
        
    elif terrestrial_remote == 1:
        pub.publish_event(
            event_type='PlatformTelemetryEvent',
            origin=tcaa_args['remote_platform_id'],
            status = status)
        
    elif terrestrial_remote == 2:
        pub.publish_event(
            event_type='PlatformTelemetryEvent',
            origin=tcaa_args['terrestrial_platform_id'],
            status = status)
        pub.publish_event(
            event_type='PlatformTelemetryEvent',
            origin=tcaa_args['remote_platform_id'],
            status = status)
        
    else:
        raise ValueError('terrestrial_remote must be in range [0,2].')
コード例 #16
0
def validate_salinity_array(a, context={}):
    from pyon.agent.agent import ResourceAgentState
    from pyon.event.event import  EventPublisher
    from pyon.public import OT

    stream_id = context['stream_id']
    dataprocess_id = context['dataprocess_id']

    event_publisher = EventPublisher(OT.DeviceStatusAlertEvent)

    event_publisher.publish_event(  origin = stream_id, values=[dataprocess_id], description="Invalid value for salinity")
コード例 #17
0
class ProcessDispatcherSimpleAPIClient(object):

    # State to use when state returned from PD is None
    unknown_state = "400-PENDING"

    state_map = {
        ProcessStateEnum.SPAWN: '500-RUNNING',
        ProcessStateEnum.TERMINATE: '700-TERMINATED',
        ProcessStateEnum.ERROR: '850-FAILED'
    }

    def __init__(self, name, **kwargs):
        self.real_client = ProcessDispatcherServiceClient(to_name=name, **kwargs)
        self.event_pub = EventPublisher()

    def dispatch_process(self, upid, spec, subscribers, constraints=None,
                         immediate=False):

        name = spec.get('name')
        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
            origin=name, origin_type="DispatchedHAProcess",
            state=ProcessStateEnum.SPAWN)
        process_def = ProcessDefinition(name=name)
        process_def.executable = {'module': spec.get('module'),
                'class': spec.get('class')}

        process_def_id = self.real_client.create_process_definition(process_def)

        pid = self.real_client.create_process(process_def_id)

        process_schedule = ProcessSchedule()

        sched_pid = self.real_client.schedule_process(process_def_id,
                process_schedule, configuration={}, process_id=pid)

        proc = self.real_client.read_process(sched_pid)
        dict_proc = {'upid': proc.process_id,
                'state': self.state_map.get(proc.process_state, self.unknown_state),
                }
        return dict_proc

    def terminate_process(self, pid):
        return self.real_client.cancel_process(pid)

    def describe_processes(self):
        procs = self.real_client.list_processes()
        dict_procs = []
        for proc in procs:
            dict_proc = {'upid': proc.process_id,
                    'state': self.state_map.get(proc.process_state, self.unknown_state),
                    }
            dict_procs.append(dict_proc)
        return dict_procs
コード例 #18
0
ファイル: transforma.py プロジェクト: swarbhanu/pyon
class TransformEventPublisher(TransformEventProcess):

    def on_start(self):
        event_type = self.CFG.get_safe('process.event_type', '')

        self.publisher = EventPublisher(event_type=event_type)

    def publish_event(self, *args, **kwargs):
        raise NotImplementedError('Method publish_event not implemented')

    def on_quit(self):
        self.publisher.close()
コード例 #19
0
ファイル: test_event.py プロジェクト: oldpatricka/pyon
    def test_pub_and_sub(self):
        ar = event.AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)
        sub = EventSubscriber(event_type="ResourceEvent", callback=cb, origin="specific")
        pub = EventPublisher(event_type="ResourceEvent")

        self._listen(sub)
        pub.publish_event(origin="specific", description="hello")

        evmsg, evheaders = ar.get(timeout=5)

        self.assertEquals(evmsg.description, "hello")
        self.assertAlmostEquals(int(evmsg.ts_created), int(get_ion_ts()), delta=5000)
コード例 #20
0
ファイル: test_alarms.py プロジェクト: swarbhanu/coi-services
    def test_greater_than_interval(self):
        """
        test_greater_than_interval
        Test interval alarm and alarm event publishing for a greater than
        inteval.
        """

        kwargs = {
            'name' : 'current_warning_interval',
            'stream_name' : 'fakestreamname',
            'value_id' : 'port_current',
            'message' : 'Current is above normal range.',
            'type' : StreamAlarmType.WARNING,
            'lower_bound' : 10.5,
            'lower_rel_op' : '<'
        }

        if TEST_ION_OBJECTS:
            # Create alarm object.
            alarm = IonObject('IntervalAlarmDef', **kwargs)
            alarm = construct_alarm_expression(alarm)
        else:
            alarm = IntervalAlarm(**kwargs)

        # This sequence will produce 5 alarms:
        # All clear on the first value,
        # Warning on the first 30,
        # All clear on the following 5.5,
        # Warning on the 15.1,
        # All clear on the following 3.3.
        self._event_count = 5
        test_vals = [5.5, 5.4, 5.5, 5.6, 30, 30.4, 5.5, 5.6, 15.1, 15.2,
                     15.3, 3.3, 3.4]

        pub = EventPublisher(event_type="StreamAlarmEvent",
            node=self.container.node)

        for x in test_vals:
            if TEST_ION_OBJECTS:
                (alarm, event_data) = eval_alarm(alarm, x)
                
            else:
                event_data = alarm.eval_alarm(x)

            if event_data:
                pub.publish_event(origin=self._resource_id, **event_data)
        
        self._async_event_result.get(timeout=30)
        
        """
コード例 #21
0
ファイル: test_alarms.py プロジェクト: newbrough/coi-services
    def test_two_sided_interval(self):
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        kwargs = {
            "name": "temp_high_warning",
            "stream_name": "fakestreamname",
            "value_id": "temp",
            "message": "Temperature is above normal range.",
            "type": StreamAlarmType.WARNING,
            "lower_bound": 10.0,
            "lower_rel_op": "<",
            "upper_bound": 20.0,
            "upper_rel_op": "<",
        }

        # Create alarm object.
        alarm = IonObject("IntervalAlarmDef", **kwargs)
        alarm = construct_alarm_expression(alarm)

        # This sequence will produce 5 alarms.
        # 5.5 warning
        # 10.2 all clear
        # 23.3 warning
        # 17.5 all clear
        # 8.8 warning
        self._event_count = 5
        test_vals = [5.5, 5.5, 5.4, 4.6, 4.5, 10.2, 10.3, 10.5, 15.5, 23.3, 23.3, 24.8, 17.5, 16.5, 12.5, 8.8, 7.7]

        pub = EventPublisher(event_type="StreamAlarmEvent", node=self.container.node)

        for x in test_vals:
            event_data = None
            eval_alarm(alarm, x)
            if alarm.first_time == 1:
                event_data = make_event_data(alarm)

            elif alarm.first_time > 1:
                if alarm.status != alarm.old_status:
                    event_data = make_event_data(alarm)

            if event_data:
                pub.publish_event(origin=self._resource_id, **event_data)

        self._async_event_result.get(timeout=30)

        """ 
コード例 #22
0
ファイル: test_alarms.py プロジェクト: swarbhanu/coi-services
    def test_two_sided_interval(self):
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        kwargs = {
            'name' : 'temp_high_warning',
            'stream_name' : 'fakestreamname',
            'value_id' : 'temp',
            'message' : 'Temperature is above normal range.',
            'type' : StreamAlarmType.WARNING,
            'lower_bound' : 10.0,
            'lower_rel_op' : '<',
            'upper_bound' : 20.0,
            'upper_rel_op' : '<'            
        }

        if TEST_ION_OBJECTS:
            # Create alarm object.
            alarm = IonObject('IntervalAlarmDef', **kwargs)
            alarm = construct_alarm_expression(alarm)
        else:
            alarm = IntervalAlarm(**kwargs)

        # This sequence will produce 5 alarms.
        self._event_count = 5
        test_vals = [5.5, 5.5, 5.4, 4.6, 4.5, 10.2, 10.3, 10.5, 15.5,
                     23.3, 23.3, 24.8, 17.5, 16.5, 12.5, 8.8, 7.7]

        pub = EventPublisher(event_type="StreamAlarmEvent",
            node=self.container.node)

        for x in test_vals:
            if TEST_ION_OBJECTS:
                (alarm, event_data) = eval_alarm(alarm, x)
                
            else:
                event_data = alarm.eval_alarm(x)

            if event_data:
                pub.publish_event(origin=self._resource_id, **event_data)
        
        self._async_event_result.get(timeout=30)
 

 
        
コード例 #23
0
class EventAlertTransform(TransformEventListener):
    def on_start(self):
        log.warn("EventAlertTransform.on_start()")
        super(EventAlertTransform, self).on_start()

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # get the algorithm to use
        # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        self.max_count = self.CFG.get_safe("process.max_count", 1)
        self.time_window = self.CFG.get_safe("process.time_window", 0)

        self.counter = 0
        self.event_times = []

        # -------------------------------------------------------------------------------------
        # Create the publisher that will publish the Alert message
        # -------------------------------------------------------------------------------------

        self.event_publisher = EventPublisher()

    def process_event(self, msg, headers):
        """
        The callback method.
        If the events satisfy the criteria, publish an alert event.
        """

        self.counter += 1

        self.event_times.append(msg.ts_created)

        if self.counter == self.max_count:

            time_diff = self.event_times[self.max_count - 1] - self.event_times[0]

            if time_diff <= self.time_window:

                self.publish()
                self.counter = 0
                self.event_times = []

    def publish(self):

        # -------------------------------------------------------------------------------------
        # publish an alert event
        # -------------------------------------------------------------------------------------
        self.event_publisher.publish_event(
            event_type="DeviceEvent", origin="EventAlertTransform", description="An alert event being published."
        )
コード例 #24
0
class SystemManagementService(BaseSystemManagementService):
    """ container management requests are handled by the event listener
        ion.processes.event.container_manager.ContainerManager
        which must be running on each container.
    """
    def on_start(self,*a,**b):
        super(SystemManagementService,self).on_start(*a,**b)
        self.sender = EventPublisher()
    def on_quit(self,*a,**b):
        self.sender.close()
    def perform_action(self, predicate, action):
        userid = None # get from context
        self.sender.publish_event(event_type="ContainerManagementRequest", origin=userid, predicate=predicate, action=action)
    def set_log_level(self, logger='', level='', recursive=False):
        self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject('ChangeLogLevel', logger=logger, level=level, recursive=recursive))
コード例 #25
0
ファイル: test_event.py プロジェクト: pkediyal/pyon
    def test_pub_and_sub(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(*args, **kwargs):
            self.count += 1
            gq.put(args[0])
            if self.count == 2:
                ar.set()

        sub = EventSubscriber(event_type="ResourceEvent", callback=cb, origin="specific")
        pub = EventPublisher(event_type="ResourceEvent")

        self._listen(sub)
        pub.publish_event(origin="specific", description="hello")

        event_obj = bootstrap.IonObject('ResourceEvent', origin='specific', description='more testing')
        self.assertEqual(event_obj, pub.publish_event_object(event_obj))

        with self.assertRaises(BadRequest) as cm:
            event_obj = bootstrap.IonObject('ResourceEvent', origin='specific', description='more testing', ts_created='2423')
            pub.publish_event_object(event_obj)
        self.assertIn( 'The ts_created value is not a valid timestamp',cm.exception.message)

        with self.assertRaises(BadRequest) as cm:
            event_obj = bootstrap.IonObject('ResourceEvent', origin='specific', description='more testing', ts_created='1000494978462')
            pub.publish_event_object(event_obj)
        self.assertIn( 'This ts_created value is too old',cm.exception.message)

        with self.assertRaises(BadRequest) as cm:
            event_obj = bootstrap.IonObject('ResourceEvent', origin='specific', description='more testing')
            event_obj._id = '343434'
            pub.publish_event_object(event_obj)
        self.assertIn( 'The event object cannot contain a _id field',cm.exception.message)

        ar.get(timeout=5)

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), self.count)
        self.assertEquals(res[0].description, "hello")
        self.assertAlmostEquals(int(res[0].ts_created), int(get_ion_ts()), delta=5000)

        self.assertEquals(res[1].description, "more testing")
        self.assertAlmostEquals(int(res[1].ts_created), int(get_ion_ts()), delta=5000)
コード例 #26
0
ファイル: test_alarms.py プロジェクト: newbrough/coi-services
    def test_greater_than_interval(self):
        """
        test_greater_than_interval
        Test interval alarm and alarm event publishing for a greater than
        inteval.
        """

        kwargs = {
            "name": "current_warning_interval",
            "stream_name": "fakestreamname",
            "value_id": "port_current",
            "message": "Current is above normal range.",
            "type": StreamAlarmType.WARNING,
            "lower_bound": 10.5,
            "lower_rel_op": "<",
        }

        # Create alarm object.
        alarm = IonObject("IntervalAlarmDef", **kwargs)
        alarm = construct_alarm_expression(alarm)

        # This sequence will produce 5 alarms:
        # Warning on the first value,
        # All clear on 30,
        # Warning on 5.5
        # All clear on 15.1
        # Warning on 3.3
        self._event_count = 5
        test_vals = [5.5, 5.4, 5.5, 5.6, 30, 30.4, 5.5, 5.6, 15.1, 15.2, 15.3, 3.3, 3.4]

        pub = EventPublisher(event_type="StreamAlarmEvent", node=self.container.node)

        for x in test_vals:
            event_data = None
            eval_alarm(alarm, x)
            if alarm.first_time == 1:
                event_data = make_event_data(alarm)

            elif alarm.first_time > 1:
                if alarm.status != alarm.old_status:
                    event_data = make_event_data(alarm)

            if event_data:
                pub.publish_event(origin=self._resource_id, **event_data)

        self._async_event_result.get(timeout=30)

        """
コード例 #27
0
ファイル: test_alarms.py プロジェクト: newbrough/coi-services
    def test_less_than_interval(self):
        """
        test_less_than_interval
        Test interval alarm and alarm event publishing for a less than
        inteval.
        """

        kwargs = {
            "name": "reserve_power_warning",
            "stream_name": "fakestreamname",
            "value_id": "battery_level",
            "message": "Battery is below normal range.",
            "type": StreamAlarmType.WARNING,
            "upper_bound": 4.0,
            "upper_rel_op": "<",
        }

        # Create alarm object.
        alarm = IonObject("IntervalAlarmDef", **kwargs)
        alarm = construct_alarm_expression(alarm)

        # This sequence will produce 5 alarms:
        # 5.5 warning
        # 3.3 all clear
        # 4.5 warning
        # 3.3 all clear
        # 4.8 warning
        self._event_count = 5
        test_vals = [5.5, 5.5, 5.4, 4.6, 4.5, 3.3, 3.3, 4.5, 4.5, 3.3, 3.3, 4.8]

        pub = EventPublisher(event_type="StreamAlarmEvent", node=self.container.node)

        for x in test_vals:
            event_data = None
            eval_alarm(alarm, x)
            if alarm.first_time == 1:
                event_data = make_event_data(alarm)

            elif alarm.first_time > 1:
                if alarm.status != alarm.old_status:
                    event_data = make_event_data(alarm)

            if event_data:
                pub.publish_event(origin=self._resource_id, **event_data)

        self._async_event_result.get(timeout=30)

        """        
コード例 #28
0
    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

        self.event_repo = self.container.instance.event_repository

        self.smtp_client = setting_up_smtp_client()

        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor(self.smtp_client)

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.event_types = CFG.event.types
        self.event_table = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.datastore_manager = DatastoreManager()

        self.event_publisher = EventPublisher()
        self.scheduler_service = SchedulerService()
コード例 #29
0
    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

        self.event_repo = self.container.instance.event_repository

        self.smtp_client = setting_up_smtp_client()

        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor(self.smtp_client)

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.notifications = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.event_publisher = EventPublisher()

        self.start_time = UserNotificationService.makeEpochTime(self.__now())
コード例 #30
0
    def on_start(self):
        log.debug('EventAlertTransform.on_start()')
        super(EventAlertTransform, self).on_start()

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # get the algorithm to use
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        self.timer_origin = self.CFG.get_safe('process.timer_origin', 'Interval Timer')
        self.instrument_origin = self.CFG.get_safe('process.instrument_origin', '')

        self.counter = 0
        self.event_times = []

        #-------------------------------------------------------------------------------------
        # Set up a listener for instrument events
        #-------------------------------------------------------------------------------------

        self.instrument_event_queue = gevent.queue.Queue()

        def instrument_event_received(message, headers):
            log.debug("EventAlertTransform received an instrument event here::: %s" % message)
            self.instrument_event_queue.put(message)

        self.instrument_event_subscriber = EventSubscriber(origin = self.instrument_origin,
            callback=instrument_event_received)

        self.instrument_event_subscriber.start()

        #-------------------------------------------------------------------------------------
        # Create the publisher that will publish the Alert message
        #-------------------------------------------------------------------------------------

        self.event_publisher = EventPublisher()
コード例 #31
0
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()
        #container = Container()
        #print 'starting container'
        #container.start()
        #print 'started container'

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.RR = ResourceRegistryServiceClient(node=self.container.node)
        self.RR2 = EnhancedResourceRegistryClient(self.RR)
        self.OMS = ObservatoryManagementServiceClient(node=self.container.node)
        self.org_management_service = OrgManagementServiceClient(node=self.container.node)
        self.IMS =  InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        #print 'TestObservatoryManagementServiceIntegration: started services'

        self.event_publisher = EventPublisher()
コード例 #32
0
    def on_start(self):

        ImmediateProcess.on_start(self)

        # necessary arguments, passed in via configuration kwarg to schedule_process. process namespace to avoid collisions
        fuc_id = self.CFG.get_safe('process.fuc_id',
                                   None)  # FileUploadContext ID

        # Clients
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.event_publisher = EventPublisher(OT.ResetQCEvent)
        self.data_product_management = DataProductManagementServiceProcessClient(
            process=self)
        self.create_map()

        # run process
        if fuc_id:
            self.process(fuc_id)

        # cleanup
        self.event_publisher.close()
コード例 #33
0
    def __init__(self, name, real_client=None, **kwargs):
        self.container = kwargs.get('container')
        if self.container:
            del(kwargs['container'])
        self.service_id = kwargs.get('service_id')
        if self.service_id:
            del(kwargs['service_id'])

        if real_client is not None:
            self.real_client = real_client
        else:
            self.real_client = ProcessDispatcherServiceClient(to_name=name, **kwargs)
        self.event_pub = EventPublisher()
コード例 #34
0
 def mixin_on_init(self):
     """
     """
     self._server = None
     self._client = None
     self._other_host = self.CFG.other_host
     self._other_port = self.CFG.other_port
     self._this_port = self.CFG.this_port
     self._platform_resource_id = self.CFG.platform_resource_id
     self._link_status = TelemetryStatusType.UNAVAILABLE
     self._event_subscriber = None
     self._server_greenlet = None
     self._publisher = EventPublisher()
コード例 #35
0
    def _acquire_sample(cls, config, publisher, unlock_new_data_callback, update_new_data_check_attachment):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._constraints_for_new_request (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param publisher the publisher used to publish data
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        @param update_new_data_check_attachment classmethod to update the external dataset resources file list attachment
        @throws InstrumentParameterException if the data constraints are not a dictionary
        @retval None
        """
        log.debug('start _acquire_sample: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config, 'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            try:
                constraints = cls._constraints_for_new_request(config)
            except NoNewDataWarning:
                #log.info(nndw.message)
                if get_safe(config, 'TESTING'):
                    #log.debug('Publish TestingFinished event')
                    pub = EventPublisher('DeviceCommonLifecycleEvent')
                    pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')
                return

            if constraints is None:
                raise InstrumentParameterException("Data constraints returned from _constraints_for_new_request cannot be None")
            config['constraints'] = constraints
        elif isinstance(constraints, dict):
            addnl_constr = cls._constraints_for_historical_request(config)
            if not addnl_constr is None and isinstance(addnl_constr, dict):
                constraints.update(addnl_constr)
        else:
            raise InstrumentParameterException('Data constraints must be of type \'dict\':  {0}'.format(constraints))

        cls._publish_data(publisher, cls._get_data(config))

        if 'set_new_data_check' in config:
            update_new_data_check_attachment(config['external_dataset_res_id'], config['set_new_data_check'])

        # Publish a 'TestFinished' event
        if get_safe(config, 'TESTING'):
            #log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')
コード例 #36
0
    def on_start(self):
        log.debug('EventAlertTransform.on_start()')
        super(EventAlertTransform, self).on_start()

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # get the algorithm to use
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        self.timer_origin = self.CFG.get_safe('process.timer_origin',
                                              'Interval Timer')
        self.instrument_origin = self.CFG.get_safe('process.instrument_origin',
                                                   '')

        self.event_times = []

        #-------------------------------------------------------------------------------------
        # Set up a listener for instrument events
        #-------------------------------------------------------------------------------------

        self.instrument_event_queue = gevent.queue.Queue()

        def instrument_event_received(message, headers):
            log.debug(
                "EventAlertTransform received an instrument event here::: %s" %
                message)
            self.instrument_event_queue.put(message)

        self.instrument_event_subscriber = EventSubscriber(
            origin=self.instrument_origin, callback=instrument_event_received)

        self.instrument_event_subscriber.start()

        #-------------------------------------------------------------------------------------
        # Create the publisher that will publish the Alert message
        #-------------------------------------------------------------------------------------

        self.event_publisher = EventPublisher()
コード例 #37
0
ファイル: test_event.py プロジェクト: ooici-dm/pyon
    def test_subscriber_listening_for_specific_origin(self):
        ar = event.AsyncResult()
        self.count = 0
        def cb(*args, **kwargs):
            self.count += 1
            ar.set(args[0])

        sub = EventSubscriber(event_type="ResourceEvent", origin="specific", callback=cb)
        pub = EventPublisher(event_type="ResourceEvent", node=self.container.node)

        self._listen(sub)

        pub.publish_event(origin="notspecific", description="1")
        pub.publish_event(origin="notspecific", description="2")
        pub.publish_event(origin="specific", description="3")
        pub.publish_event(origin="notspecific", description="4")

        evmsg = ar.get(timeout=5)
        self.assertEquals(self.count, 1)
        self.assertEquals(evmsg.description, "3")
コード例 #38
0
class PDBridgeBackend(object):
    """Scheduling backend to PD that bridges to external CEI Process Dispatcher
    """
    def __init__(self, conf):
        self.dashi = None
        self.consumer_thread = None

        # grab config parameters used to connect to backend Process Dispatcher
        try:
            self.uri = conf.uri
            self.topic = conf.topic
            self.exchange = conf.exchange
        except AttributeError, e:
            log.warn("Needed Process Dispatcher config not found: %s", e)
            raise

        self.dashi_name = self.topic + "_bridge"
        self.pd_process_subscribers = [(self.dashi_name, "process_state")]

        self.event_pub = EventPublisher()
コード例 #39
0
    def test_send_notification_emails(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {
            "name": "user1_info",
            "contact": {
                "email": '*****@*****.**'
            }
        })
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })
        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification2",
                "origin_list": ['Some_Resource_Agent_ID2'],
                "events_list": ['DataEvent']
            })
        self.unsc.create_notification(notification_object, user_id)

        # publish an event for each notification to generate the emails
        # this can't be easily check in SW so need to check for these at the [email protected] account
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event")
        de_publisher = EventPublisher("DataEvent")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event")
        gevent.sleep(1)
コード例 #40
0
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()
コード例 #41
0
    def setUp(self):
        # Start container
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

        self.egg_url_good = "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg"
        self.egg_url_bad = "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.1a-py2.7.egg"
        self.egg_url_404 = "http://sddevrepo.oceanobservatories.org/releases/completely_made_up_404.egg"
コード例 #42
0
class NotificationSentScanner(object):
    def __init__(self, container=None):

        self.container = container or bootstrap.container_instance
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.event_publisher = EventPublisher()

        # next_midnight is used to flush the counts (see NOTE in method)
        self.next_midnight = self._midnight(days=1)

        self.persist_interval = 300  # interval in seconds to persist/reload counts TODO: use CFG
        self.time_last_persist = 0

        # initalize volatile counts (memory only, should be routinely persisted)
        self._initialize_counts()

    def process_events(self, event_list):
        notifications = set(
        )  # set() of notifications to disable, _disable_notifications can happen >1 depending on len(event_list)
        for e in event_list:
            # skip if not a NotificationEvent
            if e.type_ not in NOTIFICATION_EVENTS:
                continue
            user_id = e.user_id
            notification_id = e.notification_id
            notification_max = e.notification_max  # default value is zero indicating no max
            # initialize user_id if necessary
            if user_id not in self.counts:
                self.counts[user_id] = Counter()
            # increment counts (user_id key to allow ALL to be counted)
            self.counts[user_id][
                'all'] += 1  # tracks total notifications by user
            self.counts[user_id][notification_id] += 1
            self.counts_updated_since_persist = True
            # disable notification if notification_max reached
            if notification_max:
                if self.counts[user_id][notification_id] >= notification_max:
                    # TODO this could be dict so key could be checked for insertion
                    notifications.add(
                        self._disable_notification(notification_id))
        # update notifications that have been disabled
        if notifications:
            self._update_notifications(notifications)
        # only attempt to persist counts if there was an update
        if self.counts_updated_since_persist:
            if time.time() > (self.time_last_persist + self.persist_interval):
                self._persist_counts()
        # reset counts if reset_interval has elapsed
        if time.time() > self.next_midnight:
            self._reset_counts()

    # NOTE: ObjectStore 'objects' contain '_id' and '_rev'
    def _initialize_counts(self):
        """ initialize the volatile (memory only) counts from ObjectStore if available """
        try:
            self.counts_obj = self.object_store.read_doc('notification_counts')
            # persisted as standard dicts, convert to Counter objects ignoring the ObjectStore '_id' and '_rev'
            self.counts = {
                k: Counter(v)
                for k, v in self.counts_obj.items()
                if not (k == '_id' or k == '_rev')
            }
        except NotFound:
            self.counts = {}
        self._persist_counts()

    def _persist_counts(self):
        """ persist the counts to ObjectStore """
        try:
            self.counts_obj = self.object_store.read_doc('notification_counts')
        except NotFound:
            self.object_store.create_doc({}, 'notification_counts')
            self.counts_obj = self.object_store.read_doc('notification_counts')
        # Counter objects cannot be persisted, convert to standard dicts (leaves '_id', '_rev' untouched)
        self.counts_obj.update({k: dict(v) for k, v in self.counts.items()})
        self.object_store.update_doc(self.counts_obj)
        self.time_last_persist = time.time()
        self.counts_updated_since_persist = False  # boolean to check if counts should be persisted

    def _reset_counts(self):
        """ clears the persisted counts """
        self.object_store.delete_doc('notification_counts')
        self._initialize_counts(
        )  # NOTE: NotificationRequest boolean disabled_by_system reset by UNS
        self.next_midnight = self._midnight(days=1)

    def _disable_notification(self, notification_id):
        """ set the disabled_by_system boolean to True """
        notification = self.resource_registry.read(notification_id)
        notification.disabled_by_system = True
        return notification

    def _update_notifications(self, notifications):
        """ updates notifications then publishes ReloadUserInfoEvent """
        for n in notifications:
            self.resource_registry.update(n)
        self.event_publisher.publish_event(event_type=OT.ReloadUserInfoEvent)

    def _midnight(self, days=0):
        """ NOTE: this is midnight PDT (+0700) """
        dt = datetime.combine(date.today(), datetime.min.time()) + timedelta(
            days=days, hours=7)
        return (dt - datetime.utcfromtimestamp(0)).total_seconds()
コード例 #43
0
 def on_start(self, *a, **b):
     super(SystemManagementService, self).on_start(*a, **b)
     self.sender = EventPublisher(process=self)
コード例 #44
0
    def test_event_detection(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name='new_user')
        user_id, _ = self.rrc.create(user)

        # Create detection notification
        dfilt = DetectionFilterConfig()

        dfilt.processing['condition'] = 5
        dfilt.processing['comparator'] = '>'
        dfilt.processing['filter_field'] = 'voltage'

        dfilt.delivery['message'] = 'I got my detection event!'

        notification_id = self.unsc.create_detection_filter(
            event_type='ExampleDetectableEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            filter_config=dfilt)

        #---------------------------------------------------------------------------------
        # Create event subscription for resulting detection event
        #---------------------------------------------------------------------------------

        # Create an email notification so that when the DetectionEventProcessor
        # detects an event and fires its own output event, this will caught by an
        # EmailEventProcessor and an email will be sent to the user

        notification_id_2 = self.unsc.create_email(
            event_type='DetectionEvent',
            event_subtype=None,
            origin='DetectionEventProcessor',
            origin_type=None,
            user_id=user_id,
            email='*****@*****.**',
            mode=DeliveryMode.UNFILTERED,
            message_header='Detection event',
            parser='parser',
            period=1)

        # Send event that is not detected
        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ExampleDetectableEvent")

        # since the voltage field in this event is less than 5, it will not be detected
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage=3)

        # Check at the end of the test to make sure this event never triggered a Detectable!

        # Send Event that is detected
        # publish an event for each notification to generate the emails

        # since the voltage field in this event is greater than 5, it WILL be detected
        rle_publisher = EventPublisher("ExampleDetectableEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage=10)

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        msg_tuple = proc1.event_processors[
            notification_id_2].smtp_client.sentmail.get(timeout=4)

        # The first event never triggered an email because the voltage was less than 5, the queue is now empty
        self.assertTrue(proc1.event_processors[notification_id_2].smtp_client.
                        sentmail.empty())

        self.assertEquals(msg_tuple[1], '*****@*****.**')
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)

        # parse the message body
        message = msg_tuple[2]
        list_lines = message.split("\n")

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #self.assertEquals(message_dict['From'], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['To'], '*****@*****.**')
        self.assertEquals(message_dict['Event'].rstrip('\r'), 'DetectionEvent')
        self.assertEquals(message_dict['Originator'].rstrip('\r'),
                          'DetectionEventProcessor')
        self.assertEquals(message_dict['Description'].rstrip('\r'),
                          'Event was detected by DetectionEventProcessor')
コード例 #45
0
    def test_sms(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name='new_user')
        user_id, _ = self.rrc.create(user)

        # set up....
        notification_id = self.unsc.create_sms(
            event_type='ResourceLifecycleEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            phone='401-XXX-XXXX',
            provider='T-Mobile',
            message_header='message_header',
            parser='parser',
        )

        #------------------------------------------------------------------------------------------------------
        # Setup so as to be able to get the message and headers going into the
        # subscription callback method of the EmailEventProcessor
        #------------------------------------------------------------------------------------------------------

        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event")

        msg_tuple = proc1.event_processors[
            notification_id].smtp_client.sentmail.get(timeout=4)

        self.assertTrue(proc1.event_processors[notification_id].smtp_client.
                        sentmail.empty())

        message = msg_tuple[2]
        list_lines = message.split("\n")

        #-------------------------------------------------------
        # parse the message body
        #-------------------------------------------------------

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        self.assertEquals(msg_tuple[1], '*****@*****.**')
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['Description'].rstrip('\r'),
                          'RLE test event')
コード例 #46
0
    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Self-register with Directory
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")
コード例 #47
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        self._capabilities = []

        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        from pyon.core import bootstrap
        bootstrap.container_instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Instantiate Directory and self-register
        # Has the additional side effect of either
        # bootstrapping the configuration into the
        # directory or read the configuration based
        # in the value of the auto_bootstrap setting
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Self-register with Directory
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")

    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
コード例 #48
0
class IngestionWorker(TransformDataProcess):
    """
    Instances of this class acts as Ingestion Workers. They receive packets and send them to couchdb datastore or
    hdf storage according to the policy in the data stream or the default policy of the ingestion configuration
    """
    def dataset_configs_event_test_hook(self, msg, headers):
        pass

    def ingest_process_test_hook(self, msg, headers):
        pass

    def on_init(self):
        self.event_pub = EventPublisher()

    def on_start(self):
        super(IngestionWorker, self).on_start()
        #----------------------------------------------
        # Start up couch
        #----------------------------------------------

        self.couch_config = self.CFG.get('couch_storage')
        self.hdf_storage = self.CFG.get('hdf_storage')

        self.number_of_workers = self.CFG.get('number_of_workers')
        self.description = self.CFG.get('description')

        self.ingest_config_id = self.CFG.get('configuration_id')

        self.datastore_name = self.couch_config.get('datastore_name',
                                                    None) or 'dm_datastore'
        try:
            self.datastore_profile = getattr(
                DataStore.DS_PROFILE,
                self.couch_config.get('datastore_profile', 'SCIDATA'))
        except AttributeError:
            log.exception(
                'Invalid datastore profile passed to ingestion worker. Defaulting to SCIDATA'
            )

            self.datastore_profile = DataStore.DS_PROFILE.SCIDATA
        log.debug('datastore_profile %s' % self.datastore_profile)
        self.db = self.container.datastore_manager.get_datastore(
            ds_name=self.datastore_name,
            profile=self.datastore_profile,
            config=self.CFG)

        self.resource_reg_client = ResourceRegistryServiceClient(
            node=self.container.node)

        self.dataset_configs = {}

        # update the policy
        def receive_dataset_config_event(event_msg, headers):
            log.info('Updating dataset config in ingestion worker: %s',
                     event_msg)

            if event_msg.type != DatasetIngestionTypeEnum.DATASETINGESTIONBYSTREAM:
                raise IngestionWorkerException(
                    'Received invalid type in dataset config event.')

            stream_id = event_msg.configuration.stream_id

            if event_msg.deleted:
                try:
                    del self.dataset_configs[stream_id]
                except KeyError:
                    log.info(
                        'Tried to remove dataset config that does not exist!')
            else:
                self.dataset_configs[stream_id] = event_msg

            # Hook to override just before processing is complete
            self.dataset_configs_event_test_hook(event_msg, headers)

        #Start the event subscriber - really - what a mess!
        self.event_subscriber = EventSubscriber(
            event_type="DatasetIngestionConfigurationEvent",
            origin=self.ingest_config_id,
            callback=receive_dataset_config_event)

        self.gl = spawn(self.event_subscriber.listen)
        self.event_subscriber._ready_event.wait(timeout=5)

        log.info(str(self.db))

    def process(self, packet):
        """Process incoming data!!!!
        """

        if isinstance(packet, (Granule, CompoundGranule)):
            log.info('Received new granule, but ingestion doesnt work yet!')
            return

        # Ignoring any packet that is not a stream granule!
        if not isinstance(packet, StreamGranuleContainer):
            raise IngestionWorkerException(
                'Received invalid message type: "%s"', type(packet))

        # Get the dataset config for this stream
        dset_config = self.get_dataset_config(packet)

        # Process the packet

        ingest_attributes = self.process_stream(packet, dset_config)

        #@todo - get this data from the dataset config...
        if dset_config:
            dataset_id = dset_config.dataset_id
            stream_id = dset_config.stream_id

            self.event_pub.publish_event(event_type="GranuleIngestedEvent",
                                         sub_type="DatasetIngest",
                                         origin=dataset_id,
                                         status=200,
                                         ingest_attributes=ingest_attributes,
                                         stream_id=stream_id)

            headers = ''
            # Hook to override just before processing is complete
            self.ingest_process_test_hook(packet, headers)

    def persist_immutable(self, obj):
        """
        This method is not functional yet - the doc object is python specific. The sha1 must be of a language independent form.
        """
        doc = self.db._ion_object_to_persistence_dict(obj)
        sha1 = sha1hex(doc)

        try:
            self.db.create_doc(doc, object_id=sha1)
            log.debug('Persisted document %s', type(obj))
        except BadRequest:
            # Deduplication in action!
            #@TODO why are we getting so many duplicate comments?
            log.exception('Failed to write packet!\n%s' % obj)

        # Do the id or revision have a purpose? do we need a return value?

    def process_stream(self, packet, dset_config):
        """
        Accepts a stream. Also accepts instruction (a dset_config). According to the received dset_config it processes the
        stream such as store in hfd_storage, couch_storage.
        @param: packet The incoming data stream of type stream.
        @param: dset_config The dset_config telling this method what to do with the incoming data stream.
        """

        ingestion_attributes = {
            'variables': [],
            'number_of_records': -1,
            'updated_metadata': False,
            'updated_data': False
        }

        if dset_config is None:
            log.info('No dataset config for this stream!')
            return

        values_string = ''
        sha1 = ''
        encoding_type = ''

        for key, value in packet.identifiables.iteritems():
            if isinstance(value, DataStream):
                values_string = value.values
                value.values = ''

            elif isinstance(value, Encoding):
                sha1 = value.sha1
                encoding_type = value.encoding_type

            elif isinstance(value, Coverage):
                ingestion_attributes['variables'].append(key)

            elif isinstance(value, CountElement):
                ingestion_attributes['number_of_records'] = value.value

        if dset_config.archive_metadata is True:
            log.debug("Persisting data....")
            ingestion_attributes['updated_metadata'] = True
            self.persist_immutable(packet)

        if dset_config.archive_data is True:
            #@todo - grab the filepath to save the hdf string somewhere..

            ingestion_attributes['updated_data'] = True
            if values_string:

                calculated_sha1 = hashlib.sha1(
                    values_string).hexdigest().upper()

                filename = FileSystem.get_hierarchical_url(
                    FS.CACHE, calculated_sha1, ".%s" % encoding_type)

                if sha1 != calculated_sha1:
                    raise IngestionWorkerException(
                        'The sha1 stored is different than the calculated from the received hdf_string'
                    )

                #log.warn('writing to filename: %s' % filename)

                with open(filename, mode='wb') as f:
                    f.write(values_string)
                    f.close()
            else:
                log.warn("Nothing to write!")

        return ingestion_attributes

    def on_stop(self):
        TransformDataProcess.on_stop(self)

        # close event subscriber safely
        self.event_subscriber.close()
        self.gl.join(timeout=5)
        self.gl.kill()

        self.db.close()

    def on_quit(self):
        TransformDataProcess.on_quit(self)

        # close event subscriber safely
        self.event_subscriber.close()
        self.gl.join(timeout=5)
        self.gl.kill()

        self.db.close()

    def get_dataset_config(self, incoming_packet):
        """
        Gets the dset_config for the data stream
        """

        try:
            stream_id = incoming_packet.stream_resource_id
        except AttributeError:
            log.info(
                'Packet does not have a data_stream_id: using default policy')
            return None

        dset_config = self.dataset_configs.get(stream_id, None)

        configuration = None
        if dset_config is None:
            log.info('No config found for stream id: %s ' % stream_id)
        else:
            log.info('Got config: %s for stream id: %s' %
                     (dset_config, stream_id))
            configuration = dset_config.configuration

        # return the extracted instruction
        return configuration
コード例 #49
0
 def on_start(self):
     if CFG.get_safe("process.start_mode") == "RESTART":
         self.on_system_restart()
     self.pub = EventPublisher(event_type="ResourceEvent")
コード例 #50
0
ファイル: resregistry.py プロジェクト: swarbhanu/pyon
class ResourceRegistry(object):
    """
    Class that uses a data store to provide a resource registry.
    """

    def __init__(self, datastore_manager=None):

        # Get an instance of datastore configured as resource registry.
        # May be persistent or mock, forced clean, with indexes
        datastore_manager = datastore_manager or bootstrap.container_instance.datastore_manager
        self.rr_store = datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES)

        self.event_pub = EventPublisher()

    def close(self):
        """
        Pass-through method to close the underlying datastore.
        """
        self.rr_store.close()

    def create(self, object=None, actor_id=None):
        if object is None:
            raise BadRequest("Object not present")
        if not isinstance(object, IonObjectBase):
            raise BadRequest("Object is not an IonObject")
        if not is_resource(object):
            raise BadRequest("Object is not a Resource")

        lcsm = get_restype_lcsm(object._get_type())
        object.lcstate = lcsm.initial_state if lcsm else "DEPLOYED_AVAILABLE"
        cur_time = get_ion_ts()
        object.ts_created = cur_time
        object.ts_updated = cur_time
        new_res_id = create_unique_resource_id()
        res = self.rr_store.create(object, new_res_id)
        res_id, rev = res

        if actor_id and actor_id != 'anonymous':
            log.debug("Associate resource_id=%s with owner=%s" % (res_id, actor_id))
            self.rr_store.create_association(res_id, PRED.hasOwner, actor_id)

        self.event_pub.publish_event(event_type="ResourceModifiedEvent",
                                     origin=res_id, origin_type=object._get_type(),
                                     sub_type="CREATE",
                                     mod_type=ResourceModificationType.CREATE)

        return res

    def _create_mult(self, res_list):
        cur_time = get_ion_ts()
        for resobj in res_list:
            lcsm = get_restype_lcsm(resobj._get_type())
            resobj.lcstate = lcsm.initial_state if lcsm else "DEPLOYED_AVAILABLE"
            resobj.ts_created = cur_time
            resobj.ts_updated = cur_time

        id_list = [create_unique_resource_id() for i in xrange(len(res_list))]
        res = self.rr_store.create_mult(res_list, id_list)
        res_list = [(rid,rrv) for success,rid,rrv in res]

        # TODO: Publish events (skipped, because this is inefficent one by one for a large list
#        for rid,rrv in res_list:
#            self.event_pub.publish_event(event_type="ResourceModifiedEvent",
#                origin=res_id, origin_type=object._get_type(),
#                mod_type=ResourceModificationType.CREATE)

        return res_list

    def read(self, object_id='', rev_id=''):
        if not object_id:
            raise BadRequest("The object_id parameter is an empty string")

        return self.rr_store.read(object_id, rev_id)

    def read_mult(self, object_ids=None):
        if not object_ids:
            raise BadRequest("The object_ids parameter is empty")
        return self.rr_store.read_mult(object_ids)

    def update(self, object):
        if object is None:
            raise BadRequest("Object not present")
        if not hasattr(object, "_id") or not hasattr(object, "_rev"):
            raise BadRequest("Object does not have required '_id' or '_rev' attribute")
            # Do an check whether LCS has been modified
        res_obj = self.read(object._id)

        object.ts_updated = get_ion_ts()
        if res_obj.lcstate != object.lcstate:
            log.warn("Cannot modify life cycle state in update current=%s given=%s. DO NOT REUSE THE SAME OBJECT IN CREATE THEN UPDATE" % (
                res_obj.lcstate, object.lcstate))
            object.lcstate = res_obj.lcstate

        self.event_pub.publish_event(event_type="ResourceModifiedEvent",
                                     origin=object._id, origin_type=object._get_type(),
                                     sub_type="UPDATE",
                                     mod_type=ResourceModificationType.UPDATE)

        return self.rr_store.update(object)

    def retire(self, resource_id):
        """
        This is the official "delete" for resource objects: they are set to RETIRED lcstate
        """
        self.set_lifecycle_state(resource_id, LCS.RETIRED)

    def delete(self, object_id='', del_associations=False):
        res_obj = self.read(object_id)
        if not res_obj:
            raise NotFound("Resource %s does not exist" % object_id)

        if not del_associations:
            self._delete_owners(object_id)

        res_obj.lcstate = LCS.RETIRED
        self.rr_store.update(res_obj)
        res = self.rr_store.delete(object_id, del_associations=del_associations)

        self.event_pub.publish_event(event_type="ResourceModifiedEvent",
                                     origin=res_obj._id, origin_type=res_obj._get_type(),
                                     sub_type="DELETE",
                                     mod_type=ResourceModificationType.DELETE)

        return res

    def _delete_owners(self, resource_id):
        # Delete all owner users.
        owners,assocs = self.rr_store.find_objects(resource_id, PRED.hasOwner, RT.ActorIdentity, id_only=True)
        for aid in assocs:
            self.rr_store.delete_association(aid)

    def execute_lifecycle_transition(self, resource_id='', transition_event=''):
        res_obj = self.read(resource_id)

        restype = res_obj._get_type()
        restype_workflow = get_restype_lcsm(restype)
        if not restype_workflow:
            raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype))

        old_state = res_obj.lcstate
        new_state = restype_workflow.get_successor(old_state, transition_event)
        if not new_state:
            raise BadRequest("Resource id=%s, type=%s, lcstate=%s has no transition for event %s" % (
                resource_id, restype, res_obj.lcstate, transition_event))

        res_obj.lcstate = new_state
        res_obj.ts_updated = get_ion_ts()
        updres = self.rr_store.update(res_obj)

        self.event_pub.publish_event(event_type="ResourceLifecycleEvent",
                                     origin=res_obj._id, origin_type=res_obj._get_type(),
                                     sub_type=new_state,
                                     old_state=old_state, new_state=new_state, transition_event=transition_event)

        return new_state

    def set_lifecycle_state(self, resource_id='', target_lcstate=''):
        if not target_lcstate or target_lcstate not in LCS:
            raise BadRequest("Unknown life-cycle state %s" % target_lcstate)

        res_obj = self.read(resource_id)
        old_state = res_obj.lcstate
        if target_lcstate != LCS.RETIRED:
            restype = res_obj._get_type()
            restype_workflow = get_restype_lcsm(restype)
            if not restype_workflow:
                raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype))

            # Check that target state is allowed
            if not target_lcstate in restype_workflow.get_successors(res_obj.lcstate).values():
                raise BadRequest("Target state %s not reachable for resource in state %s" % (target_lcstate, res_obj.lcstate))

        res_obj.lcstate = target_lcstate
        res_obj.ts_updated = get_ion_ts()

        updres = self.rr_store.update(res_obj)

        self.event_pub.publish_event(event_type="ResourceLifecycleEvent",
                                     origin=res_obj._id, origin_type=res_obj._get_type(),
                                     sub_type=target_lcstate,
                                     old_state=old_state, new_state=target_lcstate)

    def create_attachment(self, resource_id='', attachment=None):
        if attachment is None:
            raise BadRequest("Object not present")
        if not isinstance(attachment, Attachment):
            raise BadRequest("Object is not an Attachment")

        attachment.object_id = resource_id if resource_id else ""

        if attachment.attachment_type == AttachmentType.BLOB:
            if type(attachment.content) is not str:
                raise BadRequest("Attachment content must be str")
            attachment.content = base64.encodestring(attachment.content)
        elif attachment.attachment_type == AttachmentType.ASCII:
            if type(attachment.content) is not str:
                raise BadRequest("Attachment content must be str")
        elif attachment.attachment_type == AttachmentType.OBJECT:
            pass
        else:
            raise BadRequest("Unknown attachment-type: %s" % attachment.attachment_type)

        att_id,_ = self.create(attachment)

        if resource_id:
            self.rr_store.create_association(resource_id, PRED.hasAttachment, att_id)

        return att_id

    def read_attachment(self, attachment_id=''):
        attachment = self.read(attachment_id)
        if not isinstance(attachment, Attachment):
            raise Inconsistent("Object in datastore must be Attachment, not %s" % type(attachment))

        if attachment.attachment_type == AttachmentType.BLOB:
            if type(attachment.content) is not str:
                raise BadRequest("Attachment content must be str")
            attachment.content = base64.decodestring(attachment.content)

        return attachment

    def delete_attachment(self, attachment_id=''):
        return self.rr_store.delete(attachment_id, del_associations=True)

    def find_attachments(self, resource_id='', limit=0, descending=False, include_content=False, id_only=True):
        key = [resource_id]
        att_res = self.rr_store.find_by_view("attachment", "by_resource", start_key=key, end_key=list(key),
            descending=descending, limit=limit, id_only=id_only)

        if id_only:
            att_ids = [att[0] for att in att_res]
            return att_ids
        else:
            atts = [att[2] for att in att_res]
            if include_content:
                for att in atts:
                    att.content = None
            return atts

    def create_association(self, subject=None, predicate=None, object=None, assoc_type=None):
        return self.rr_store.create_association(subject, predicate, object, assoc_type)

    def delete_association(self, association=''):
        return self.rr_store.delete_association(association)

    def find(self, **kwargs):
        raise NotImplementedError("Do not use find. Use a specific find operation instead.")

    def read_object(self, subject="", predicate="", object_type="", assoc="", id_only=False):
        if assoc:
            if type(assoc) is str:
                assoc = self.read(assoc)
            return assoc.o if id_only else self.read(assoc.o)
        else:
            obj_list, assoc_list = self.find_objects(subject=subject, predicate=predicate, object_type=object_type, id_only=True)
            if not obj_list:
                raise NotFound("No object found for subject=%s, predicate=%s, object_type=%s" % (subject, predicate, object_type))
            elif len(obj_list) > 1:
                raise Inconsistent("More than one object found for subject=%s, predicate=%s, object_type=%s: count=%s" % (
                    subject, predicate, object_type, len(obj_list)))
            return obj_list[0] if id_only else self.read(obj_list[0])

    def read_subject(self, subject_type="", predicate="", object="", assoc="", id_only=False):
        if assoc:
            if type(assoc) is str:
                assoc = self.read(assoc)
            return assoc.s if id_only else self.read(assoc.s)
        else:
            sub_list, assoc_list = self.find_subjects(subject_type=subject_type, predicate=predicate, object=object, id_only=True)
            if not sub_list:
                raise NotFound("No subject found for subject_type=%s, predicate=%s, object=%s" % (subject_type, predicate, object))
            elif len(sub_list) > 1:
                raise Inconsistent("More than one subject found for subject_type=%s, predicate=%s, object=%s: count=%s" % (
                    subject_type, predicate, object, len(sub_list)))
            return sub_list[0] if id_only else self.read(sub_list[0])

    def find_objects(self, subject="", predicate="", object_type="", id_only=False):
        return self.rr_store.find_objects(subject, predicate, object_type, id_only=id_only)

    def find_subjects(self, subject_type="", predicate="", object="", id_only=False):
        return self.rr_store.find_subjects(subject_type, predicate, object, id_only=id_only)

    def find_associations(self, subject="", predicate="", object="", assoc_type=None, id_only=False):
        return self.rr_store.find_associations(subject, predicate, object, assoc_type, id_only=id_only)

    def find_associations_mult(self, subjects=[], id_only=False):
        return self.rr_store.find_associations_mult(subjects=subjects, id_only=id_only)

    def get_association(self, subject="", predicate="", object="", assoc_type=None, id_only=False):
        if predicate:
            assoc_type = assoc_type or AT.H2H
        assoc = self.rr_store.find_associations(subject, predicate, object, assoc_type, id_only=id_only)
        if not assoc:
            raise NotFound("Association for subject/predicate/object/type %s/%s/%s/%s not found" % (
                str(subject),str(predicate),str(object),str(assoc_type)))
        elif len(assoc) > 1:
            raise Inconsistent("Duplicate associations found for subject/predicate/object/type %s/%s/%s/%s" % (
                str(subject),str(predicate),str(object),str(assoc_type)))
        return assoc[0]

    def find_resources(self, restype="", lcstate="", name="", id_only=False):
        return self.rr_store.find_resources(restype, lcstate, name, id_only=id_only)
コード例 #51
0
    def test_find_events(self):
        # publish some events for the event repository
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        de_publisher = EventPublisher("DataEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event1")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event2")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event3")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event1")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event2")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event3")

        # find all events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1')
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")

        # find all events for the originator 'DataEvent'
        events = self.unsc.find_events(type='DataEvent')
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'DataEvent':
                self.fail("failed to find correct events")

        # find 2 events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID2',
                                       limit=2)
        if len(events) != 2:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID2':
                self.fail("failed to find correct events")

        # find all events for the originator 'Some_Resource_Agent_ID1' in reverse time order
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1',
                                       descending=True)
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")
コード例 #52
0
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete, index)
            IndexManagementService._es_call(es.index_delete, index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_notification(self,
                             user_name='',
                             instrument_id='',
                             product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(
            name='notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(
            name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(
            notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(
            notification=notification_request_2, user_id=user_id)
        log.debug(
            "test_activateInstrumentSample: create_user_notifications user_id %s",
            str(user_id))

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(
            self, expected_instrument_device_id='', extended_instrument=None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.firmware_version,
                              ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_data_received_datetime,
            ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_calibration_datetime,
            ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        self.assertIsInstance(
            extended_instrument.computed.power_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.communications_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.location_status_roll_up,
            ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        self.assertEqual(
            extended_instrument.computed.communications_status_roll_up.value,
            StatusType.STATUS_WARNING)
        self.assertEqual(
            extended_instrument.computed.data_status_roll_up.value,
            StatusType.STATUS_OK)
        self.assertEqual(
            extended_instrument.computed.power_status_roll_up.value,
            StatusType.STATUS_WARNING)

        # Verify the computed attribute for user notification requests
        self.assertEqual(
            1,
            len(extended_instrument.computed.user_notification_requests.value))
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(notification.origin, expected_instrument_device_id)
        self.assertEqual(notification.origin_type, "instrument")
        self.assertEqual(notification.event_type, 'ResourceLifecycleEvent')

    def _check_computed_attributes_of_extended_product(
            self, expected_data_product_id='', extended_data_product=None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s",
                  extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_data_product.computed.product_download_size_estimated,
            ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.number_active_subscriptions,
            ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url,
                              ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size,
                              ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters,
                              ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events,
                              ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance,
                              ComputedDictValue)
        self.assertIsInstance(
            extended_data_product.computed.user_notification_requests,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.active_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.past_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.data_contents_updated,
            ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime,
                              ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertTrue('ok' in extended_data_product.computed.last_granule.
                        value['quality_flag'])
        self.assertEqual(
            2, len(extended_data_product.computed.data_datetime.value))

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(notification.origin, expected_data_product_id)
        self.assertEqual(notification.origin_type, "data product")
        self.assertEqual(notification.event_type, 'DetectionEvent')

    @attr('LOCOINT')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 60}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }

        kwargs = {
            'name': 'temperature_warning_interval',
            'stream_name': 'parsed',
            'value_id': 'temp',
            'message':
            'Temperature is below the normal range of 50.0 and above.',
            'type': StreamAlarmType.WARNING,
            'lower_bound': 50.0,
            'lower_rel_op': '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(
            stream_name='raw',
            parameter_dictionary_name='ctd_raw_param_dict',
            records_per_granule=2,
            granule_publish_rate=5)
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict',
            records_per_granule=2,
            granule_publish_rate=5,
            alarms=[alarm])

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=
            "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config)

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #setup a subscriber to alarm events from the device
        self._events_received = []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug(
                'TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type=
            'StreamWarningAlarmEvent',  #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s',
                  str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(
            instDevice_id,
            to_name=inst_agent_instance_obj.agent_process_id,
            process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt.pretty_print()))
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals), 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s",
                  temp_vals)

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s",
                  out_of_range_temp_vals)
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()

        log.debug("test_activateInstrumentSample: _events_received: %s",
                  self._events_received)
        log.debug("test_activateInstrumentSample: _event_count: %s",
                  self._event_count)

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()))

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals), 10)

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
コード例 #53
0
    def test_pub_on_different_subtypes(self):
        ar = event.AsyncResult()
        gq = queue.Queue()
        self.count = 0

        def cb(event, *args, **kwargs):
            self.count += 1
            gq.put(event)
            if event.description == "end":
                ar.set()

        sub = EventSubscriber(event_type="ResourceModifiedEvent",
                              sub_type="st1",
                              callback=cb)
        sub.activate()

        pub1 = EventPublisher(event_type="ResourceModifiedEvent")
        pub2 = EventPublisher(event_type="ContainerLifecycleEvent")

        pub1.publish_event(origin="two", sub_type="st2", description="2")
        pub2.publish_event(origin="three", sub_type="st1", description="3")
        pub1.publish_event(origin="one", sub_type="st1", description="1")
        pub1.publish_event(origin="four", sub_type="st1", description="end")

        ar.get(timeout=5)
        sub.deactivate()

        res = []
        for x in xrange(self.count):
            res.append(gq.get(timeout=5))

        self.assertEquals(len(res), 2)
        self.assertEquals(res[0].description, "1")
コード例 #54
0
    def setUp(self):
        """
        Start fake terrestrial components and add cleanup.
        Start terrestrial server and retrieve port.
        Set internal variables.
        Start container.
        Start deployment.
        Start container agent.
        Spawn remote endpoint process.
        Create remote endpoint client and retrieve remote server port.
        Create event publisher.
        """
        
        self._terrestrial_server = R3PCServer(self.consume_req, self.terrestrial_server_close)
        self._terrestrial_client = R3PCClient(self.consume_ack, self.terrestrial_client_close)
        self.addCleanup(self._terrestrial_server.stop)
        self.addCleanup(self._terrestrial_client.stop)
        self._other_port = self._terrestrial_server.start('*', 0)
        log.debug('Terrestrial server binding to *:%i', self._other_port)
        
        self._other_host = 'localhost'
        self._platform_resource_id = 'abc123'
        self._resource_id = 'fake_id'
        self._no_requests = 10
        self._requests_sent = {}
        self._results_recv = {}
        self._no_telem_events = 0
        self._done_evt = AsyncResult()
        self._done_telem_evts = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        
        # Start container.
        log.debug('Staring capability container.')
        self._start_container()
        
        # Bring up services in a deploy file (no need to message).
        log.info('Staring deploy services.')
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Create a container client.
        log.debug('Creating container client.')
        container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)

        # Create agent config.
        endpoint_config = {
            'other_host' : self._other_host,
            'other_port' : self._other_port,
            'this_port' : 0,
            'platform_resource_id' : self._platform_resource_id
        }
        
        # Spawn the remote enpoint process.
        log.debug('Spawning remote endpoint process.')
        re_pid = container_client.spawn_process(
            name='remote_endpoint_1',
            module='ion.services.sa.tcaa.remote_endpoint',
            cls='RemoteEndpoint',
            config=endpoint_config)
        log.debug('Endpoint pid=%s.', str(re_pid))

        # Create an endpoint client.
        self.re_client = RemoteEndpointClient(
            process=FakeProcess(),
            to_name=re_pid)
        log.debug('Got re client %s.', str(self.re_client))
        
        # Remember the remote port.
        self._this_port = self.re_client.get_port()
        log.debug('The remote port is: %i.', self._this_port)
        
        # Start the event publisher.
        self._event_publisher = EventPublisher()
コード例 #55
0
class SystemManagementService(BaseSystemManagementService):
    """ container management requests are handled by the event listener
        ion.processes.event.container_manager.ContainerManager
        which must be running on each container.
    """
    def on_start(self, *a, **b):
        super(SystemManagementService, self).on_start(*a, **b)
        self.sender = EventPublisher(process=self)

    def on_quit(self, *a, **b):
        self.sender.close()

    def perform_action(self, predicate, action):
        userid = None  # get from context
        self.sender.publish_event(event_type=OT.ContainerManagementRequest,
                                  origin=userid,
                                  predicate=predicate,
                                  action=action)

    def set_log_level(self, logger='', level='', recursive=False):
        self.perform_action(
            ALL_CONTAINERS_INSTANCE,
            IonObject(OT.ChangeLogLevel,
                      logger=logger,
                      level=level,
                      recursive=recursive))

    def reset_policy_cache(self, headers=None, timeout=None):
        """Clears and reloads the policy caches in all of the containers.

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE,
                            IonObject(OT.ResetPolicyCache))

    def trigger_garbage_collection(self):
        """Triggers a garbage collection in all containers

        @throws BadRequest    None
        """
        self.perform_action(ALL_CONTAINERS_INSTANCE,
                            IonObject(OT.TriggerGarbageCollection))

    def trigger_container_snapshot(self,
                                   snapshot_id='',
                                   include_snapshots=None,
                                   exclude_snapshots=None,
                                   take_at_time='',
                                   clear_all=False,
                                   persist_snapshot=True,
                                   snapshot_kwargs=None):

        if not snapshot_id:
            snapshot_id = get_ion_ts()
        if not snapshot_kwargs:
            snapshot_kwargs = {}

        self.perform_action(
            ALL_CONTAINERS_INSTANCE,
            IonObject(OT.TriggerContainerSnapshot,
                      snapshot_id=snapshot_id,
                      include_snapshots=include_snapshots,
                      exclude_snapshots=exclude_snapshots,
                      take_at_time=take_at_time,
                      clear_all=clear_all,
                      persist_snapshot=persist_snapshot,
                      snapshot_kwargs=snapshot_kwargs))
        log.info("Event to trigger container snapshots sent. snapshot_id=%s" %
                 snapshot_id)

    def start_gevent_block(self, alarm_mode=False):
        self.perform_action(
            ALL_CONTAINERS_INSTANCE,
            IonObject(OT.StartGeventBlock, alarm_mode=alarm_mode))

    def stop_gevent_block(self):
        self.perform_action(ALL_CONTAINERS_INSTANCE,
                            IonObject(OT.StopGeventBlock))

    def prepare_system_shutdown(self, mode=''):
        self.perform_action(ALL_CONTAINERS_INSTANCE,
                            IonObject(OT.PrepareSystemShutdown, mode=mode))
コード例 #56
0
 def on_init(self):
     self.event_pub = EventPublisher()
コード例 #57
0
class TestRemoteEndpoint(IonIntegrationTestCase):
    """
    Test cases for 2CAA terrestrial endpoint.
    """
    def setUp(self):
        """
        Start fake terrestrial components and add cleanup.
        Start terrestrial server and retrieve port.
        Set internal variables.
        Start container.
        Start deployment.
        Start container agent.
        Spawn remote endpoint process.
        Create remote endpoint client and retrieve remote server port.
        Create event publisher.
        """
        
        self._terrestrial_server = R3PCServer(self.consume_req, self.terrestrial_server_close)
        self._terrestrial_client = R3PCClient(self.consume_ack, self.terrestrial_client_close)
        self.addCleanup(self._terrestrial_server.stop)
        self.addCleanup(self._terrestrial_client.stop)
        self._other_port = self._terrestrial_server.start('*', 0)
        log.debug('Terrestrial server binding to *:%i', self._other_port)
        
        self._other_host = 'localhost'
        self._platform_resource_id = 'abc123'
        self._resource_id = 'fake_id'
        self._no_requests = 10
        self._requests_sent = {}
        self._results_recv = {}
        self._no_telem_events = 0
        self._done_evt = AsyncResult()
        self._done_telem_evts = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        
        # Start container.
        log.debug('Staring capability container.')
        self._start_container()
        
        # Bring up services in a deploy file (no need to message).
        log.info('Staring deploy services.')
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Create a container client.
        log.debug('Creating container client.')
        container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)

        # Create agent config.
        endpoint_config = {
            'other_host' : self._other_host,
            'other_port' : self._other_port,
            'this_port' : 0,
            'platform_resource_id' : self._platform_resource_id
        }
        
        # Spawn the remote enpoint process.
        log.debug('Spawning remote endpoint process.')
        re_pid = container_client.spawn_process(
            name='remote_endpoint_1',
            module='ion.services.sa.tcaa.remote_endpoint',
            cls='RemoteEndpoint',
            config=endpoint_config)
        log.debug('Endpoint pid=%s.', str(re_pid))

        # Create an endpoint client.
        self.re_client = RemoteEndpointClient(
            process=FakeProcess(),
            to_name=re_pid)
        log.debug('Got re client %s.', str(self.re_client))
        
        # Remember the remote port.
        self._this_port = self.re_client.get_port()
        log.debug('The remote port is: %i.', self._this_port)
        
        # Start the event publisher.
        self._event_publisher = EventPublisher()
      
    ######################################################################    
    # Helpers.
    ######################################################################    

    def on_link_up(self):
        """
        Called by a test to simulate turning the link on.
        """
        log.debug('Terrestrial client connecting to localhost:%i.',
                 self._this_port)
        self._terrestrial_client.start('localhost', self._this_port)
        # Publish a link up event to be caught by the endpoint.
        log.debug('Publishing telemetry event.')
        self._event_publisher.publish_event(
                            event_type='PlatformTelemetryEvent',
                            origin=self._platform_resource_id,
                            status = TelemetryStatusType.AVAILABLE)
    
    def on_link_down(self):
        """
        Called by a test to simulate turning the link off.
        """
        self._terrestrial_client.stop()
        # Publish a link down event to be caught by the endpoint.
        log.debug('Publishing telemetry event.')
        self._event_publisher.publish_event(
                            event_type='PlatformTelemetryEvent',
                            origin=self._platform_resource_id,
                            status = TelemetryStatusType.UNAVAILABLE)    
    
    def consume_req(self, res):
        """
        Consume a terrestrial request setting async event when necessary.
        """
        command_id = res['command_id']
        self._results_recv[command_id] = res
        if len(self._results_recv) == self._no_requests:
            self._done_evt.set()
    
    def consume_ack(self, cmd):
        """
        Consume terrestrial ack setting async event when necessary.
        """
        self._requests_sent[cmd.command_id] = cmd
        if len(self._requests_sent) == self._no_requests:
            self._cmd_tx_evt.set()
        
    def terrestrial_server_close(self):
        """
        Callback when terrestrial server closes.
        """
        pass
    
    def terrestrial_client_close(self):
        """
        Callback when terrestrial client closes.
        """
        pass
    
    def make_fake_command(self, no):
        """
        Build a fake command for use in tests.
        """
            
        cmdstr = 'fake_cmd_%i' % no
        cmd = IonObject('RemoteCommand',
                             resource_id=self._resource_id,
                             command=cmdstr,
                             args=['arg1', 23],
                             kwargs={'worktime':3},
                             command_id = str(uuid.uuid4()))
        return cmd

    def start_agent(self):
        """
        Start an instrument agent and client.
        """
        
        log.info('Creating driver integration test support:')
        log.info('driver module: %s', DRV_MOD)
        log.info('driver class: %s', DRV_CLS)
        log.info('device address: %s', DEV_ADDR)
        log.info('device port: %s', DEV_PORT)
        log.info('log delimiter: %s', DELIM)
        log.info('work dir: %s', WORK_DIR)        
        self._support = DriverIntegrationTestSupport(DRV_MOD,
                                                     DRV_CLS,
                                                     DEV_ADDR,
                                                     DEV_PORT,
                                                     DATA_PORT,
                                                     CMD_PORT,
                                                     PA_BINARY,
                                                     DELIM,
                                                     WORK_DIR)
        
        # Start port agent, add stop to cleanup.
        port = self._support.start_pagent()
        log.info('Port agent started at port %i',port)
        
        # Configure driver to use port agent port number.
        DVR_CONFIG['comms_config'] = {
            'addr' : 'localhost',
            'port' : port,
            'cmd_port' : CMD_PORT
        }
        self.addCleanup(self._support.stop_pagent)    
                        
        # Create agent config.
        agent_config = {
            'driver_config' : DVR_CONFIG,
            'stream_config' : {},
            'agent'         : {'resource_id': IA_RESOURCE_ID},
            'test_mode' : True
        }
    
        # Start instrument agent.
        log.debug("Starting IA.")
        container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
    
        ia_pid = container_client.spawn_process(name=IA_NAME,
            module=IA_MOD,
            cls=IA_CLS,
            config=agent_config)
    
        log.info('Agent pid=%s.', str(ia_pid))
    
        # Start a resource agent client to talk with the instrument agent.
    
        self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
        log.info('Got ia client %s.', str(self._ia_client))                
                
    ######################################################################    
    # Tests.
    ######################################################################    

    def test_process_queued(self):
        """
        test_process_queued
        Test that queued commands are forwarded to and handled by
        remote endpoint when link comes up.
        """        
        
        # Create and enqueue some requests.
        for i in range(self._no_requests):
            cmd = self.make_fake_command(i)
            self._terrestrial_client.enqueue(cmd)

        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for all the enqueued commands to be acked.
        # Wait for all the responses to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        # Confirm the results match the commands sent.
        self.assertItemsEqual(self._requests_sent.keys(),
                                  self._results_recv.keys())
    
    def test_process_online(self):
        """
        test_process_online
        Test commands are forwarded and handled while link is up.
        """        
        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for the link to be up.
        # The remote side does not publish public telemetry events
        # so we can't wait for that.
        gevent.sleep(1)

        # Create and enqueue some requests.
        for i in range(self._no_requests):
            cmd = self.make_fake_command(i)
            self._terrestrial_client.enqueue(cmd)

        # Wait for all the enqueued commands to be acked.
        # Wait for all the responses to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        # Confirm the results match the commands sent.
        self.assertItemsEqual(self._requests_sent.keys(),
                                  self._results_recv.keys())

    def test_terrestrial_late(self):
        """
        test_terrestrial_late
        Test queued commands are forwarded and handled by remote endpoint
        when terrestrial side is late to come up.
        """        
        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for the link to be up.
        # The remote side does not publish public telemetry events
        # so we can't wait for that.
        gevent.sleep(1)

        # Manually stop the terrestrial endpoint.
        # This will cause it to be unavailable when commands are queued
        # to simulate stability during asynchronous wake ups.
        self._terrestrial_server.stop()
        self._terrestrial_client.stop()

        # Create and enqueue some requests.
        for i in range(self._no_requests):
            cmd = self.make_fake_command(i)
            self._terrestrial_client.enqueue(cmd)

        # Remote side awaits the terrestrial waking up.
        gevent.sleep(3)

        # Terrestrail endpoint eventually wakes up and starts transmitting.        
        self._terrestrial_client.start('localhost', self._this_port)
        self._terrestrial_server.start('*', self._other_port)
    
        # Wait for all the enqueued commands to be acked.
        # Wait for all the responses to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        # Confirm the results match the commands sent.
        self.assertItemsEqual(self._requests_sent.keys(),
                                  self._results_recv.keys())

    def test_service_commands(self):
        """
        test_service_commands
        Test that real service commands are handled by the remote endpoint.
        """
        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}
        
        # Create user object.
        obj = IonObject("UserInfo", name="some_name")
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='create',
                             args=[obj],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
        
        # Returns obj_id, obj_rev.
        obj_id, obj_rev = self._results_recv[cmd.command_id]['result']
        
        # Confirm the results are valid.
        """
        Result is a tuple of strings.
        {'result': ['ad183ff26bae4f329ddd85fd69d160a9',
        '1-00a308c45fff459c7cda1db9a7314de6'],
        'command_id': 'cc2ae00d-40b0-47d2-af61-8ffb87f1aca2'}
        """
        self.assertIsInstance(obj_id, str)
        self.assertNotEqual(obj_id, '')
        self.assertIsInstance(obj_rev, str)
        self.assertNotEqual(obj_rev, '')
        
        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Read user object.
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='read',
                             args=[obj_id],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)

        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns read_obj.
        read_obj = self._results_recv[cmd.command_id]['result']
        
        # Confirm the results are valid.
        """
        Result is a user info object with the name set.
        {'lcstate': 'DEPLOYED_AVAILABLE',
        '_rev': '1-851f067bac3c34b2238c0188b3340d0f',
        'description': '',
        'ts_updated': '1349213207638',
        'type_': 'UserInfo',
        'contact': <interface.objects.ContactInformation object at 0x10d7df590>,
        '_id': '27832d93f4cd4535a75ac75c06e00a7e',
        'ts_created': '1349213207638',
        'variables': [{'name': '', 'value': ''}],
        'name': 'some_name'}
        """
        self.assertIsInstance(read_obj, UserInfo)
        self.assertEquals(read_obj.name, 'some_name')
        
        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Update user object.
        read_obj.name = 'some_other_name'
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='update',
                             args=[read_obj],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)

        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns nothing.
        
        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Read user object.
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='read',
                             args=[obj_id],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)        

        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns read_obj.
        read_obj = self._results_recv[cmd.command_id]['result']
        
        self.assertIsInstance(read_obj, UserInfo)
        self.assertEquals(read_obj.name, 'some_other_name')
        
        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}
        
        # Delete user object.
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='delete',
                             args=[obj_id],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)        

        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns nothing.
            
        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        gevent.sleep(1)
        
    def test_resource_commands(self):
        """
        test_resource_commands
        Test that real resource commands are handled by the remote endpoint.
        """
        
        # Start the IA and check it's out there and behaving.
        self.start_agent()
        
        state = self._ia_client.get_agent_state()
        log.debug('Agent state is: %s', state)
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        retval = self._ia_client.ping_agent()
        log.debug('Agent ping is: %s', str(retval))
        self.assertIn('ping from InstrumentAgent', retval)

        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for the link to be up.
        # The remote side does not publish public telemetry events
        # so we can't wait for that.
        gevent.sleep(1)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Get agent state via remote endpoint.        
        cmd = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
        
        # Returns agent state.
        state = self._results_recv[cmd.command_id]['result']
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Ping agent via remote endpoint. 
        cmd = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='ping_agent',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
        
        # Returns agent state.
        ping = self._results_recv[cmd.command_id]['result']
        self.assertIn('ping from InstrumentAgent', ping)
        
        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        gevent.sleep(1)

    def test_bad_service_name_resource_id(self):
        """
        test_bad_service_name_resource_id
        Test for proper exception behavior when a bad service name or
        resource id is used in a command forwarded to the remote endpoint.
        """
        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for the link to be up.
        # The remote side does not publish public telemetry events
        # so we can't wait for that.
        gevent.sleep(1)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}
        
        # Create user object.
        obj = IonObject("UserInfo", name="some_name")
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='bogus_service',
                             command='create',
                             args=[obj],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
        
        # Returns NotFound.
        result = self._results_recv[cmd.command_id]['result']
        self.assertIsInstance(result, NotFound)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Get agent state via remote endpoint.        
        cmd = IonObject('RemoteCommand',
                             resource_id='bogus_resource_id',
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns NotFound.
        result = self._results_recv[cmd.command_id]['result']
        self.assertIsInstance(result, NotFound)

        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        gevent.sleep(1)

    def test_bad_commands(self):
        """
        test_bad_commands
        Test for correct exception behavior if a bad command name is forwarded
        to a remote service or resource.
        """
        
        # Start the IA and check it's out there and behaving.
        self.start_agent()
        
        state = self._ia_client.get_agent_state()
        log.debug('Agent state is: %s', state)
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        retval = self._ia_client.ping_agent()
        log.debug('Agent ping is: %s', str(retval))
        self.assertIn('ping from InstrumentAgent', retval)
        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()

        # Wait for the link to be up.
        # The remote side does not publish public telemetry events
        # so we can't wait for that.
        gevent.sleep(1)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}
        
        # Create user object.
        obj = IonObject("UserInfo", name="some_name")
        cmd = IonObject('RemoteCommand',
                             resource_id='',
                             svc_name='resource_registry',
                             command='what_the_flunk',
                             args=[obj],
                             kwargs='',
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
        
        # Returns BadRequest.
        result = self._results_recv[cmd.command_id]['result']
        self.assertIsInstance(result, BadRequest)

        # Send commands one at a time.
        # Reset queues and events.
        self._no_requests = 1
        self._done_evt = AsyncResult()
        self._cmd_tx_evt = AsyncResult()
        self._requests_sent = {}
        self._results_recv = {}

        # Get agent state via remote endpoint.        
        cmd = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='what_the_flunk',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd)
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Returns NotFound.
        result = self._results_recv[cmd.command_id]['result']
        self.assertIsInstance(result, BadRequest)
        
        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        gevent.sleep(1)

    def test_resource_command_sequence(self):
        """
        test_resource_command_sequence
        Test for successful completion of a properly ordered sequence of
        resource commands queued for forwarding to the remote endpoint.
        """
        # Start the IA and check it's out there and behaving.
        self.start_agent()
        
        state = self._ia_client.get_agent_state()
        log.debug('Agent state is: %s', state)
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        retval = self._ia_client.ping_agent()
        log.debug('Agent ping is: %s', str(retval))
        self.assertIn('ping from InstrumentAgent', retval)

        # We execute a sequence of twelve consecutive events.
        self._no_requests = 12

        # Get agent state.
        cmd1 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd1)
        
        # Initialize agent.
        cmd2 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_agent',
                             args=[AgentCommand(command=ResourceAgentEvent.INITIALIZE)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd2)
        
        # Get agent state.
        cmd3 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd3)
        
        # Go active.
        cmd4 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_agent',
                             args=[AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd4)
        
        # Get agent state.
        cmd5 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd5)
        
        # Run.
        cmd6 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_agent',
                             args=[AgentCommand(command=ResourceAgentEvent.RUN)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd6)
        
        # Get agent state.
        cmd7 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd7)
        
        # Acquire sample.
        cmd8 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_resource',
                             args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd8)
        
        # Acquire sample
        cmd9 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_resource',
                             args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd9)
        
        # Acquire sample.
        cmd10 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_resource',
                             args=[AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd10)
        
        # Reset.
        cmd11 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='execute_agent',
                             args=[AgentCommand(command=ResourceAgentEvent.RESET)],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd11)
        
        # Get agent state.
        cmd12 = IonObject('RemoteCommand',
                             resource_id=IA_RESOURCE_ID,
                             svc_name='',
                             command='get_agent_state',
                             args=[],
                             kwargs={},
                             command_id = str(uuid.uuid4()))
        self._terrestrial_client.enqueue(cmd12)

        
        # Publish a telemetry available event.
        # This will cause the endpoint clients to wake up and connect.
        self.on_link_up()
        
        # Wait for command request to be acked.
        # Wait for response to arrive.
        self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout)
        self._done_evt.get(timeout=CFG.endpoint.receive.timeout)

        # Check results of command sequence.
        """
        0ccf1e10-eeca-400d-aefe-f9d6888ec963   {'result': 'RESOURCE_AGENT_STATE_INACTIVE', 'command_id': '0ccf1e10-eeca-400d-aefe-f9d6888ec963'}
        92531bdf-c2c8-4aa8-817d-5107c7311b37   {'result': <interface.objects.AgentCommandResult object at 0x10d7f11d0>, 'command_id': '92531bdf-c2c8-4aa8-817d-5107c7311b37'}
        509934a1-5038-40d8-8014-591e2d8042b6   {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '509934a1-5038-40d8-8014-591e2d8042b6'}
        88bacbb7-5366-4d27-9ecf-fff2bec34b2c   {'result': <interface.objects.AgentCommandResult object at 0x10d389190>, 'command_id': '88bacbb7-5366-4d27-9ecf-fff2bec34b2c'}
        f8b4d3fa-a249-439b-8bd4-ac212b6100aa   {'result': <interface.objects.AgentCommandResult object at 0x10d3893d0>, 'command_id': 'f8b4d3fa-a249-439b-8bd4-ac212b6100aa'}
        8ae98e39-fdb3-4218-ad8f-584620397d9f   {'result': <interface.objects.AgentCommandResult object at 0x10d739990>, 'command_id': '8ae98e39-fdb3-4218-ad8f-584620397d9f'}
        746364a1-c4c7-400f-96d4-ee36df5dc1a4   {'result': BadRequest('Execute argument "command" not set.',), 'command_id': '746364a1-c4c7-400f-96d4-ee36df5dc1a4'}
        d516d3d9-e4f9-4ea5-80e0-34639a6377b5   {'result': <interface.objects.AgentCommandResult object at 0x10d3b2350>, 'command_id': 'd516d3d9-e4f9-4ea5-80e0-34639a6377b5'}
        c7da03f5-59bc-420a-9e10-0a7794266599   {'result': 'RESOURCE_AGENT_STATE_IDLE', 'command_id': 'c7da03f5-59bc-420a-9e10-0a7794266599'}
        678d870a-bf18-424a-afb0-f80ecf3277e2   {'result': <interface.objects.AgentCommandResult object at 0x10d739590>, 'command_id': '678d870a-bf18-424a-afb0-f80ecf3277e2'}
        750c6a30-56eb-4535-99c2-a81fefab1b1f   {'result': 'RESOURCE_AGENT_STATE_COMMAND', 'command_id': '750c6a30-56eb-4535-99c2-a81fefab1b1f'}
        c17bd658-3775-4aa3-8844-02df70a0e3c0   {'result': 'RESOURCE_AGENT_STATE_UNINITIALIZED', 'command_id': 'c17bd658-3775-4aa3-8844-02df70a0e3c0'}
        """        
        
        # First result is a state string.
        result1 = self._results_recv[cmd1.command_id]['result']
        self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
        
        # Second result is an empty AgentCommandResult.
        result2 = self._results_recv[cmd2.command_id]['result']

        # Third result is a state string.
        result3 = self._results_recv[cmd3.command_id]['result']
        self.assertEqual(result3, ResourceAgentState.INACTIVE)
        
        # Fourth result is an empty AgentCommandResult.
        result4 = self._results_recv[cmd4.command_id]['result']

        # Fifth result is a state string.
        result5 = self._results_recv[cmd5.command_id]['result']
        self.assertEqual(result5, ResourceAgentState.IDLE)

        # Sixth result is an empty AgentCommandResult.
        result6 = self._results_recv[cmd6.command_id]['result']

        # Seventh result is a state string.
        result7 = self._results_recv[cmd7.command_id]['result']
        self.assertEqual(result7, ResourceAgentState.COMMAND)
        
        """
        {'raw': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
        'stream_name': 'raw', 'pkt_format_id': 'JSON_Data',
        'pkt_version': 1, '
        values': [{'binary': True, 'value_id': 'raw',
        'value': 'NzkuNDM3MywxNy4yMDU2NCwgNzYxLjg4NSwgICA2LjIxOTgsIDE1MDYuMzk3LCAwMSBGZWIgMjAwMSwgMDE6MDE6MDA='}],
        'driver_timestamp': 3558286748.8039923},
        'parsed': {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
        'stream_name': 'parsed', 'pkt_format_id': 'JSON_Data', 'pkt_version': 1,
        'values': [{'value_id': 'temp', 'value': 79.4373},
        {'value_id': 'conductivity', 'value': 17.20564},
        {'value_id': 'pressure', 'value': 761.885}],
        'driver_timestamp': 3558286748.8039923}}
        """
        
        # Eigth result is an AgentCommandResult containing a sample.
        result8 = self._results_recv[cmd8.command_id]['result']
        self.assertTrue('parsed',result8.result )
        
        # Ninth result is an AgentCommandResult containing a sample.
        result9 = self._results_recv[cmd9.command_id]['result']
        self.assertTrue('parsed',result9.result )

        # Tenth result is an AgentCommandResult containing a sample.
        result10 = self._results_recv[cmd10.command_id]['result']
        self.assertTrue('parsed',result10.result )

        # Eleventh result is an empty AgentCommandResult.
        result11 = self._results_recv[cmd11.command_id]['result']

        # Twelth result is a state string.
        result12 = self._results_recv[cmd12.command_id]['result']
        self.assertEqual(result1, ResourceAgentState.UNINITIALIZED)
        
        # Publish a telemetry unavailable event.
        # This will cause the endpoint clients to disconnect and go to sleep.
        self.on_link_down()

        gevent.sleep(1)
コード例 #58
0
class SchedulerService(BaseSchedulerService):
    def __init__(self, *args, **kwargs):
        BaseSchedulerService.__init__(self, *args, **kwargs)

        self.schedule_entries = {}
        self._no_reschedule = False

    def on_start(self):
        if CFG.get_safe("process.start_mode") == "RESTART":
            self.on_system_restart()
        self.pub = EventPublisher(event_type="ResourceEvent")

    def on_quit(self):
        self.pub.close()

        # throw killswitch on future reschedules
        self._no_reschedule = True

        # terminate any pending spawns
        self._stop_pending_timers()

    def __notify(self, task, id, index):
        log.debug("SchedulerService:__notify: - " + task.event_origin +
                  " - Time: " + str(self.__now()) + " - ID: " + id +
                  " -Index:" + str(index))
        self.pub.publish_event(origin=task.event_origin)

    def __now(self):
        return datetime.utcnow()

    def __now_posix(self, now):
        return time.mktime(now.timetuple())

    def _expire_callback(self, id, index):
        task = self.__get_entry(id)
        self.__notify(task, id, index)
        if not self.__reschedule(id, index):
            self.__delete(id, index)

    def __calculate_next_interval(self, task, current_time):
        if task.start_time < current_time:
            next_interval = task.start_time
            while (next_interval < current_time):
                next_interval = next_interval + task.interval
            return (next_interval - current_time)
        else:
            return (task.start_time - current_time) + task.interval

    def __get_expire_time(self, task):
        now = self.__now()
        now_posix = self.__now_posix(now)
        expires_in = []
        if type(task) == TimeOfDayTimer:
            for time_of_day in task.times_of_day:
                expire_time = datetime(now.year, now.month, now.day,
                                       time_of_day['hour'],
                                       time_of_day['minute'],
                                       time_of_day['second'])
                expires_in.append(ceil((expire_time - now).total_seconds()))
        elif type(task) == IntervalTimer and (task.end_time == -1 or (
            (now_posix + task.interval) <= task.end_time)):
            expires_in = [(self.__calculate_next_interval(task, now_posix))]
        return expires_in

    def __get_reschedule_expire_time(self, task, index):
        expires_in = False
        now = self.__now()
        now_posix = self.__now_posix(now)
        if type(task) == TimeOfDayTimer:
            if task.expires > now_posix:
                time_of_day = task.times_of_day[index]
                tomorrow = now + timedelta(days=1)
                expire_time = datetime(tomorrow.year, tomorrow.month,
                                       tomorrow.day, time_of_day['hour'],
                                       time_of_day['minute'],
                                       time_of_day['second'])
                expires_in = (ceil((expire_time - now).total_seconds()))
            else:
                expires_in = False
        elif type(task) == IntervalTimer and (task.end_time == -1 or (
            (now_posix + task.interval) <= task.end_time)):
            if task.start_time <= now_posix:
                expires_in = (task.interval)
            else:
                expires_in = ((task.start_time - now_posix) + task.interval)

        return expires_in

    def __validate_expire_times(self, expire_times):
        for index, expire_time in enumerate(expire_times):
            if expire_time < 0:
                return False
        return True

    def __schedule(self, scheduler_entry, id=False):
        # if "id" is set, it means scheduler_entry is already in Resource Regsitry. This can occur during a sytsem restart
        spawns = []
        task = scheduler_entry.entry
        expire_times = self.__get_expire_time(task)
        if not self.__validate_expire_times(expire_times):
            log.error(
                "SchedulerService:__schedule: scheduling: expire time is less than zero: "
            )
            return False

        if not id:
            id, _ = self.clients.resource_registry.create(scheduler_entry)
        self.__create_entry(task, spawns, id)
        for index, expire_time in enumerate(expire_times):
            log.debug("SchedulerService:__schedule: scheduling: - " +
                      task.event_origin + " - Now: " + str(self.__now()) +
                      " - Expire: " + str(expire_time) + " - ID: " + id +
                      " - Index:" + str(index))
            spawn = gevent.spawn_later(expire_time, self._expire_callback, id,
                                       index)
            spawns.append(spawn)
        return id

    def __reschedule(self, id, index):
        if self._no_reschedule:
            log.debug(
                "SchedulerService:__reschedule: process quitting, refusing to reschedule %s",
                id)
            return False

        task = self.__get_entry(id)
        expire_time = self.__get_reschedule_expire_time(task, index)
        if expire_time:
            log.debug("SchedulerService:__reschedule: rescheduling: - " +
                      task.event_origin + " - Now: " + str(self.__now()) +
                      " - Expire: " + str(expire_time) + " - ID: " + id +
                      " -Index:" + str(index))
            spawn = gevent.spawn_later(expire_time, self._expire_callback, id,
                                       index)
            self.__update_entry(id=id, index=index, spawn=spawn)

            return True
        else:
            log.debug(
                "SchedulerService:__reschedule: timer expired. Removed from RR  : - "
                + task.event_origin + " - Now: " + str(self.__now()) +
                " - Expire: " + str(expire_time) + " - ID: " + id +
                " -Index:" + str(index))
        return False

    def __create_entry(self, task, spawns, id):
        self.schedule_entries[id] = {"task": task, "spawns": spawns}

    def __update_entry(self, id, index, spawn=None, interval=None):
        if spawn is not None:
            self.schedule_entries[id]["spawns"][index] = spawn
        if interval is not None:
            self.schedule_entries[id]["task"].interval = interval

    def __get_entry_all(self, id):
        return self.schedule_entries[id]

    def __get_spawns(self, id):
        return self.schedule_entries[id]["spawns"]

    def __get_entry(self, id):
        return self.schedule_entries[id]["task"]

    def __delete(self, id, index, force=False):
        if id in self.schedule_entries:
            task = self.__get_entry(id)
            if force and type(task) == TimeOfDayTimer:
                log.debug("SchedulerService:__delete: entry deleted " + id +
                          " -Index:" + str(index))
                del self.schedule_entries[id]
                self.clients.resource_registry.delete(id)
            elif type(task) == TimeOfDayTimer:
                task = self.__get_entry(id)
                task.times_of_day[index] = None
                # Delete if all the timers are set to none
                are_all_timers_expired = True
                for time_of_day in task.times_of_day:
                    if time_of_day is not None:
                        are_all_timers_expired = False
                        break
                if are_all_timers_expired:
                    log.debug("SchedulerService:__delete: entry deleted " +
                              id + " -Index:" + str(index))
                    del self.schedule_entries[id]
                    self.clients.resource_registry.delete(id)
            else:
                log.debug("SchedulerService:__delete: entry deleted " + id +
                          " -Index:" + str(index))
                del self.schedule_entries[id]
                self.clients.resource_registry.delete(id)
            return True
        return False

    def __is_timer_valid(self, task):
        # Validate event_origin is set
        if not task.event_origin:
            log.error(
                "SchedulerService.__is_timer_valid: event_origin is not set")
            return False
            # Validate the timer is set correctly
        if type(task) == IntervalTimer:
            if (task.end_time != -1
                    and (self.__now_posix(self.__now()) >= task.end_time)):
                log.error(
                    "SchedulerService.__is_timer_valid: IntervalTimer is set to incorrect value"
                )
                return False
        elif type(task) == TimeOfDayTimer:
            for time_of_day in task.times_of_day:
                time_of_day['hour'] = int(time_of_day['hour'])
                time_of_day['minute'] = int(time_of_day['minute'])
                time_of_day['second'] = int(time_of_day['second'])
                if ((time_of_day['hour'] < 0 or time_of_day['hour'] > 23) or
                    (time_of_day['minute'] < 0 or time_of_day['minute'] > 59)
                        or
                    (time_of_day['second'] < 0 or time_of_day['second'] > 61)):
                    log.error(
                        "SchedulerService.__is_timer_valid: TimeOfDayTimer is set to incorrect value"
                    )
                    return False
        else:
            return False

        return True

    def _stop_pending_timers(self):
        """
        Safely stops all pending and active timers.

        For all timers still waiting to run, calls kill on them. For active timers, let
        them exit naturally and prevent the reschedule by setting the _no_reschedule flag.
        """
        # prevent reschedules
        self._no_reschedule = True

        gls = []
        for timer_id in self.schedule_entries:
            spawns = self.__get_spawns(timer_id)

            for spawn in spawns:
                gls.append(spawn)
                # only kill spawns that haven't started yet
                if spawn._start_event is not None:
                    spawn.kill()

            log.debug("_stop_pending_timers: timer %s deleted", timer_id)

        self.schedule_entries.clear()

        # wait for running gls to finish up
        gevent.joinall(gls, timeout=10)

        # allow reschedules from here on out
        self._no_reschedule = False

    def on_system_restart(self):
        '''
        On system restart, get timer data from Resource Registry and restore the Scheduler state
        '''
        # Remove all active timers
        # When this method is called, there should not be any active timers but if it is called from test, this helps
        # to remove current active timer and restore them from Resource Regstiry
        self._stop_pending_timers()

        # Restore the timer from Resource Registry
        scheduler_entries, _ = self.clients.resource_registry.find_resources(
            RT.SchedulerEntry, id_only=False)
        for scheduler_entry in scheduler_entries:
            self.__schedule(scheduler_entry, scheduler_entry._id)
            log.debug("SchedulerService:on_system_restart: timer restored: " +
                      scheduler_entry._id)

    def create_timer(self, scheduler_entry=None):
        """
        Create a timer which will send TimerEvents as requested for a given schedule.
        The schedule request is expressed through a specific subtype of TimerSchedulerEntry.
        The task is delivered as a TimeEvent to which processes can subscribe. The creator
        defines the fields of the task. A GUID-based id prefixed by readable process name
        is recommended for the origin. Because the delivery of the task is via the ION Exchange
        there is potential for a small deviation in precision.
        Returns a timer_id which can be used to cancel the timer.

        @param timer__schedule    TimerSchedulerEntry
        @retval timer_id    str
        @throws BadRequest    if timer is misformed and can not be scheduled
        """
        ##scheduler_entry = scheduler_entry.entry
        status = self.__is_timer_valid(scheduler_entry.entry)
        if not status:
            raise BadRequest
        id = self.__schedule(scheduler_entry)
        if not id:
            raise BadRequest
        return id

    def cancel_timer(self, timer_id=''):
        """
        Cancels an existing timer which has not reached its expire time.

        @param timer_id    str
        @throws NotFound    if timer_id doesn't exist
        """
        #try:
        try:
            spawns = self.__get_spawns(timer_id)
            for spawn in spawns:
                spawn.kill()
            log.debug("SchedulerService: cancel_timer: id: " + str(timer_id))
            self.__delete(id=timer_id, index=None, force=True)
        except:
            log.error(
                "SchedulerService: cancel_timer: timer id doesn't exist: " +
                str(timer_id))
            raise BadRequest

    def create_interval_timer(self,
                              start_time="",
                              interval=0,
                              end_time="",
                              event_origin="",
                              event_subtype=""):
        if (end_time != -1 and
            (self.__now_posix(self.__now()) >= end_time)) or not event_origin:
            log.error(
                "SchedulerService.create_interval_timer: event_origin is not set"
            )
            raise BadRequest
        if start_time == "now":
            start_time = self.__now_posix(self.__now())
        log.debug(
            "SchedulerService:create_interval_timer start_time: %s interval: %s end_time: %s event_origin: %s"
            % (start_time, interval, end_time, event_origin))
        interval_timer = IonObject(
            "IntervalTimer", {
                "start_time": start_time,
                "interval": interval,
                "end_time": end_time,
                "event_origin": event_origin,
                "event_subtype": event_subtype
            })
        se = IonObject(RT.SchedulerEntry, {"entry": interval_timer})
        return self.create_timer(se)

    def create_time_of_day_timer(self,
                                 times_of_day=None,
                                 expires='',
                                 event_origin='',
                                 event_subtype=''):
        # Validate the timer
        if not event_origin:
            log.error(
                "SchedulerService.create_time_of_day_timer: event_origin is set to invalid value"
            )
            raise BadRequest
        for time_of_day in times_of_day:
            time_of_day['hour'] = int(time_of_day['hour'])
            time_of_day['minute'] = int(time_of_day['minute'])
            time_of_day['second'] = int(time_of_day['second'])
            log.debug(
                "SchedulerService:create_time_of_day_timer - hour: %d minute: %d second: %d expires: %d event_origin: %s"
                % (time_of_day['hour'], time_of_day['minute'],
                   time_of_day['second'], time_of_day['second'], event_origin))
            if ((time_of_day['hour'] < 0 or time_of_day['hour'] > 23) or
                (time_of_day['minute'] < 0 or time_of_day['minute'] > 59) or
                (time_of_day['second'] < 0 or time_of_day['second'] > 61)):
                log.error(
                    "SchedulerService:create_time_of_day_timer: TimeOfDayTimer is set to invalid value"
                )
                raise BadRequest

        time_of_day_timer = IonObject(
            "TimeOfDayTimer", {
                "times_of_day": times_of_day,
                "expires": expires,
                "event_origin": event_origin,
                "event_subtype": event_subtype
            })

        se = IonObject(RT.SchedulerEntry, {"entry": time_of_day_timer})
        return self.create_timer(se)
コード例 #59
0
class EventAlertTransform(TransformEventListener):
    def on_start(self):
        log.debug('EventAlertTransform.on_start()')
        super(EventAlertTransform, self).on_start()

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # get the algorithm to use
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        self.timer_origin = self.CFG.get_safe('process.timer_origin',
                                              'Interval Timer')
        self.instrument_origin = self.CFG.get_safe('process.instrument_origin',
                                                   '')

        self.event_times = []

        #-------------------------------------------------------------------------------------
        # Set up a listener for instrument events
        #-------------------------------------------------------------------------------------

        self.instrument_event_queue = gevent.queue.Queue()

        def instrument_event_received(message, headers):
            log.debug(
                "EventAlertTransform received an instrument event here::: %s" %
                message)
            self.instrument_event_queue.put(message)

        self.instrument_event_subscriber = EventSubscriber(
            origin=self.instrument_origin, callback=instrument_event_received)

        self.instrument_event_subscriber.start()

        #-------------------------------------------------------------------------------------
        # Create the publisher that will publish the Alert message
        #-------------------------------------------------------------------------------------

        self.event_publisher = EventPublisher()

    def on_quit(self):
        self.instrument_event_subscriber.stop()
        super(EventAlertTransform, self).on_quit()

    def process_event(self, msg, headers):
        '''
        The callback method.
        If the events satisfy the criteria, publish an alert event.
        '''

        if msg.origin == self.timer_origin:
            if self.instrument_event_queue.empty():
                log.debug(
                    "no event received from the instrument. Publishing an alarm event!"
                )
                self.publish()
            else:
                log.debug(
                    "Events were received from the instrument in between timer events. Instrument working normally."
                )
                self.instrument_event_queue.queue.clear()

    def publish(self):

        #-------------------------------------------------------------------------------------
        # publish an alert event
        #-------------------------------------------------------------------------------------
        self.event_publisher.publish_event(
            event_type="DeviceEvent",
            origin="EventAlertTransform",
            description="An alert event being published.")
コード例 #60
0
    def test_pub_on_different_subsubtypes(self):
        res_list = [
            DotDict(ar=event.AsyncResult(), gq=queue.Queue(), count=0)
            for i in xrange(4)
        ]

        def cb_gen(num):
            def cb(event, *args, **kwargs):
                res_list[num].count += 1
                res_list[num].gq.put(event)
                if event.description == "end":
                    res_list[num].ar.set()

            return cb

        sub0 = EventSubscriber(event_type="ResourceModifiedEvent",
                               sub_type="st1.*",
                               callback=cb_gen(0))
        sub0.activate()

        sub1 = EventSubscriber(event_type="ResourceModifiedEvent",
                               sub_type="st1.a",
                               callback=cb_gen(1))
        sub1.activate()

        sub2 = EventSubscriber(event_type="ResourceModifiedEvent",
                               sub_type="*.a",
                               callback=cb_gen(2))
        sub2.activate()

        sub3 = EventSubscriber(event_type="ResourceModifiedEvent",
                               sub_type="st1",
                               callback=cb_gen(3))
        sub3.activate()

        pub1 = EventPublisher(event_type="ResourceModifiedEvent")

        pub1.publish_event(origin="one", sub_type="st1.a", description="1")
        pub1.publish_event(origin="two", sub_type="st1", description="2")
        pub1.publish_event(origin="three", sub_type="st1.b", description="3")

        pub1.publish_event(origin="four", sub_type="st2.a", description="4")
        pub1.publish_event(origin="five", sub_type="st2", description="5")

        pub1.publish_event(origin="six", sub_type="a", description="6")
        pub1.publish_event(origin="seven", sub_type="", description="7")

        pub1.publish_event(origin="end", sub_type="st1.a", description="end")
        pub1.publish_event(origin="end", sub_type="st1", description="end")

        [res_list[i].ar.get(timeout=5) for i in xrange(3)]

        sub0.deactivate()
        sub1.deactivate()
        sub2.deactivate()
        sub3.deactivate()

        for i in xrange(4):
            res_list[i].res = []
            for x in xrange(res_list[i].count):
                res_list[i].res.append(res_list[i].gq.get(timeout=5))

        self.assertEquals(len(res_list[0].res), 3)
        self.assertEquals(res_list[0].res[0].description, "1")

        self.assertEquals(len(res_list[1].res), 2)
        self.assertEquals(res_list[1].res[0].description, "1")

        self.assertEquals(len(res_list[2].res), 3)
        self.assertEquals(res_list[2].res[0].description, "1")

        self.assertEquals(len(res_list[3].res), 2)
        self.assertEquals(res_list[3].res[0].description, "2")