Пример #1
0
    def test_polling(self):
        with self.assertRaises(gevent.timeout.Timeout):
            def callback_bad():
                return False

            poll(callback_bad, timeout=0.5)

        def callback_good():
            self.counter+=1
            return (self.counter == 2)

        self.assertTrue(poll(callback_good, timeout=0.5))
Пример #2
0
    def test_polling(self):
        with self.assertRaises(gevent.timeout.Timeout):

            def callback_bad():
                return False

            poll(callback_bad, timeout=0.5)

        def callback_good():
            self.counter += 1
            return (self.counter == 2)

        self.assertTrue(poll(callback_good, timeout=0.5))
Пример #3
0
    def test_last_granule(self):
        #--------------------------------------------------------------------------------
        # Create the necessary configurations for the test
        #--------------------------------------------------------------------------------
        pdict_id          = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        stream_def_id     = self.pubsub_management.create_stream_definition('ctd parsed', parameter_dictionary_id=pdict_id)
        stream_id, route  = self.pubsub_management.create_stream('last_granule', exchange_point=self.exchange_point_name, stream_definition_id=stream_def_id)
        config_id         = self.get_ingestion_config()
        dataset_id        = self.create_dataset(pdict_id)

        self.ingestion_management.persist_data_stream(stream_id=stream_id, ingestion_configuration_id=config_id, dataset_id=dataset_id)
        #--------------------------------------------------------------------------------
        # Create the datastore first,
        #--------------------------------------------------------------------------------
        self.get_datastore(dataset_id)

        self.publish_hifi(stream_id,route, 0)
        self.publish_hifi(stream_id,route, 1)
        

        self.wait_until_we_have_enough_granules(dataset_id,2) # I just need two


        success = False
        def verifier():
                replay_granule = self.data_retriever.retrieve_last_granule(dataset_id)

                rdt = RecordDictionaryTool.load_from_granule(replay_granule)

                comp = rdt['time'] == np.arange(10) + 10
                if not isinstance(comp,bool):
                    return comp.all()
                return False
        success = poll(verifier)

        self.assertTrue(success)

        success = False
        def verify_points():
                replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id,5)

                rdt = RecordDictionaryTool.load_from_granule(replay_granule)

                comp = rdt['time'] == np.arange(15,20)
                if not isinstance(comp,bool):
                    return comp.all()
                return False
        success = poll(verify_points)

        self.assertTrue(success)
Пример #4
0
    def test_last_granule(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(stream_id, dataset_id)

        self.publish_hifi(stream_id, route, 0)
        self.publish_hifi(stream_id, route, 1)

        self.wait_until_we_have_enough_granules(dataset_id,
                                                20)  # I just need two

        success = False

        def verifier():
            replay_granule = self.data_retriever.retrieve_last_data_points(
                dataset_id, 10)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt['time'] == np.arange(10) + 10
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verifier)

        self.assertTrue(success)

        success = False

        def verify_points():
            replay_granule = self.data_retriever.retrieve_last_data_points(
                dataset_id, 5)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt['time'] == np.arange(15, 20)
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verify_points)

        self.assertTrue(success)
        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)
Пример #5
0
    def test_last_granule(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset()
        self.start_ingestion(stream_id, dataset_id)

        self.publish_hifi(stream_id, route, 0)
        self.publish_hifi(stream_id, route, 1)

        self.wait_until_we_have_enough_granules(dataset_id, 20)  # I just need two

        success = False

        def verifier():
            replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id, 10)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt["time"] == np.arange(10) + 10
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verifier)

        self.assertTrue(success)

        success = False

        def verify_points():
            replay_granule = self.data_retriever.retrieve_last_data_points(dataset_id, 5)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt["time"] == np.arange(15, 20)
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verify_points)

        self.assertTrue(success)
        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)




        raw_config = StreamConfiguration(stream_name='raw', parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(stream_name='parsed', parameter_dictionary_name='ctd_parsed_param_dict')


        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent,
                                  name='agent007',
                                  description="SBE37IMAgent",
                                  driver_uri=DRV_URI_GOOD,
                                  stream_configurations = [raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345" )
        instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) " , instDevice_id)


        port_agent_config = {
            'device_addr':  CFG.device.sbe37.host,
            'device_port':  CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config = port_agent_config,
                                            alerts= [])


        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj,
                                                                               instAgent_id,
                                                                               instDevice_id)


        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()


        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(name='raw', parameter_dictionary_id=raw_pdict_id)


        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug( 'new dp_id = %s' , data_product_id1)
        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)



        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s' , dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]


        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)


        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id2 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification( user_name='user_1', instrument_id=instDevice_id, product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification( user_name='user_2', instrument_id=instDevice_id, product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s' , str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s' , dataset_ids[0])
        self.raw_dataset = dataset_ids[0]


        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])


        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)


        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id,
                                     ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(30), "The instrument agent instance (%s) did not spawn in 30 seconds" %
                                        gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s" , str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s" , str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s" , str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        for i in xrange(10):
            monitor = DatasetMonitor(dataset_id=self.parsed_dataset)
            self._ia_client.execute_resource(AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE))
            if not monitor.wait():
                raise AssertionError('Failed on the %ith granule' % i)
            monitor.stop()


#        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
#        for i in xrange(10):
#            retval = self._ia_client.execute_resource(cmd)
#            log.debug("test_activateInstrumentSample: return from sample %s" , str(retval))

        log.debug( "test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s" , str(reply))


        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()) )

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" % t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s", str(rdt_parsed.pretty_print()) )
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals  = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))


        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """


        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id1, user_id=user_id_1)
        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product( expected_data_product_id = data_product_id1, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(expected_data_product_id = data_product_id2, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
Пример #7
0
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }

        kwargs = {
            'name': 'temperature_warning_interval',
            'stream_name': 'parsed',
            'value_id': 'temp',
            'message':
            'Temperature is below the normal range of 50.0 and above.',
            'type': StreamAlarmType.WARNING,
            'lower_bound': 50.0,
            'lower_rel_op': '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(
            stream_name='raw',
            parameter_dictionary_name='ctd_raw_param_dict',
            records_per_granule=2,
            granule_publish_rate=5)
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict',
            records_per_granule=2,
            granule_publish_rate=5,
            alarms=[alarm])

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=
            "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config)

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #setup a subscriber to alarm events from the device
        self._events_received = []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug(
                'TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type=
            'StreamWarningAlarmEvent',  #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s',
                  str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(
            instDevice_id,
            to_name=inst_agent_instance_obj.agent_process_id,
            process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt.pretty_print()))
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals), 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s",
                  temp_vals)

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s",
                  out_of_range_temp_vals)
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()

        log.debug("test_activateInstrumentSample: _events_received: %s",
                  self._events_received)
        log.debug("test_activateInstrumentSample: _event_count: %s",
                  self._event_count)

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()))

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals), 10)

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
    def test_demo_stream_granules_processing(self):
        """
        Test that the Demo Stream Alert Transform is functioning. The transform coordinates with the scheduler.
        It is configured to listen to a source that publishes granules. It publishes a DeviceStatusEvent if it
        receives a granule with bad data or a DeviceCommsEvent if no granule has arrived between two timer events.

        The transform is configured at launch using a config dictionary.
        """
        #-------------------------------------------------------------------------------------
        # Start a subscriber to listen for an alert event from the Stream Alert Transform
        #-------------------------------------------------------------------------------------

        queue_bad_data = gevent.queue.Queue()
        queue_no_data = gevent.queue.Queue()

        def bad_data(message, headers):
            log.debug("Got a BAD data event: %s" % message)
            if message.type_ == "DeviceStatusEvent":
                queue_bad_data.put(message)

        def no_data(message, headers):
            log.debug("Got a NO data event: %s" % message)
            queue_no_data.put(message)

        event_subscriber_bad_data = EventSubscriber( origin="instrument_1",
            event_type="DeviceStatusEvent",
            callback=bad_data)

        event_subscriber_no_data = EventSubscriber( origin="instrument_1",
            event_type="DeviceCommsEvent",
            callback=no_data)

        event_subscriber_bad_data.start()
        event_subscriber_no_data.start()

        self.addCleanup(event_subscriber_bad_data.stop)
        self.addCleanup(event_subscriber_no_data.stop)

        #-------------------------------------------------------------------------------------
        # The configuration for the Stream Alert Transform... set up the event types to listen to
        #-------------------------------------------------------------------------------------
        self.valid_values = [-100, 100]
        self.timer_interval = 5
        self.queue_name = 'a_queue'

        config = {
            'process':{
                'timer_interval': self.timer_interval,
                'queue_name': self.queue_name,
                'variable_name': 'input_voltage',
                'time_field_name': 'preferred_timestamp',
                'valid_values': self.valid_values,
                'timer_origin': 'Interval Timer',
                'event_origin': 'instrument_1'
            }
        }

        #-------------------------------------------------------------------------------------
        # Create the process
        #-------------------------------------------------------------------------------------
        pid = TransformPrototypeIntTest.create_process( name= 'DemoStreamAlertTransform',
            module='ion.processes.data.transforms.event_alert_transform',
            class_name='DemoStreamAlertTransform',
            configuration= config)
        self.addCleanup(self.process_dispatcher.cancel_process, pid)
        self.assertIsNotNone(pid)

        #-------------------------------------------------------------------------------------
        # Publish streams and make assertions about alerts
        #-------------------------------------------------------------------------------------

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(name= 'platform_eng_parsed', id_only=True)

        stream_def_id = self.pubsub_management.create_stream_definition('demo_stream', parameter_dictionary_id=pdict_id)
        stream_id, stream_route = self.pubsub_management.create_stream( name='test_demo_alert',
            exchange_point='exch_point_1',
            stream_definition_id=stream_def_id)

        sub_1 = self.pubsub_management.create_subscription(name='sub_1', stream_ids=[stream_id], exchange_points=['exch_point_1'], exchange_name=self.queue_name)
        self.pubsub_management.activate_subscription(sub_1)
        self.exchange_names.append('sub_1')
        self.exchange_points.append('exch_point_1')

        #-------------------------------------------------------------------------------------
        # publish a *GOOD* granule
        #-------------------------------------------------------------------------------------
        self.length = 2
        val = numpy.array([random.uniform(0,50)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number=1, values=val)

        self.assertTrue(queue_bad_data.empty())

        #-------------------------------------------------------------------------------------
        # publish a few *BAD* granules
        #-------------------------------------------------------------------------------------
        self.number = 2
        val = numpy.array([(110 + l)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number= self.number, values=val)

        for number in xrange(self.number):
            event = queue_bad_data.get(timeout=40)
            self.assertEquals(event.type_, "DeviceStatusEvent")
            self.assertEquals(event.origin, "instrument_1")
            self.assertEquals(event.state, DeviceStatusType.STATUS_WARNING)
            self.assertEquals(event.valid_values, self.valid_values)
            self.assertEquals(event.sub_type, 'input_voltage')
            self.assertTrue(set(event.values) ==  set(val))

            s = set(event.time_stamps)
            cond = s in [set(numpy.array([1  for l in xrange(self.length)]).tolist()), set(numpy.array([2  for l in xrange(self.length)]).tolist())]
            self.assertTrue(cond)

        # To ensure that only the bad values generated the alert events. Queue should be empty now
        self.assertEquals(queue_bad_data.qsize(), 0)

        #-------------------------------------------------------------------------------------
        # Do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------
        event = queue_no_data.get(timeout=15)

        self.assertEquals(event.type_, "DeviceCommsEvent")
        self.assertEquals(event.origin, "instrument_1")
        self.assertEquals(event.origin_type, "PlatformDevice")
        self.assertEquals(event.state, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
        self.assertEquals(event.sub_type, 'input_voltage')

        #-------------------------------------------------------------------------------------
        # Empty the queues and repeat tests
        #-------------------------------------------------------------------------------------
        queue_bad_data.queue.clear()
        queue_no_data.queue.clear()

        #-------------------------------------------------------------------------------------
        # publish a *GOOD* granule again
        #-------------------------------------------------------------------------------------
        val = numpy.array([(l + 20)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number=1, values=val)

        self.assertTrue(queue_bad_data.empty())

        #-------------------------------------------------------------------------------------
        # Again do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------

        event = queue_no_data.get(timeout=20)

        self.assertEquals(event.type_, "DeviceCommsEvent")
        self.assertEquals(event.origin, "instrument_1")
        self.assertEquals(event.origin_type, "PlatformDevice")
        self.assertEquals(event.state, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
        self.assertEquals(event.sub_type, 'input_voltage')

        #-------------------------------------------------------------------------------------
        # Again do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------

        ar = gevent.event.AsyncResult()
        def poller(ar, method, *args):
            events_in_db = method(*args)
            if len(events_in_db) > 0:
                ar.set(events_in_db)
                return True
            else:
                return False

        poll(poller, ar, self.user_notification.find_events, 'instrument_1')

#        events_in_db = self.user_notification.find_events(origin='instrument_1')

        events_in_db = ar.get(10)
        log.debug("events::: %s" % events_in_db)

        bad_data_events = []
        no_data_events = []

        for event in events_in_db:
            if event.type_ == 'DeviceStatusEvent':
                bad_data_events.append(event)
                self.assertEquals(event.origin, "instrument_1")
                self.assertEquals(event.status, DeviceStatusType.STATUS_WARNING)
                self.assertEquals(event.valid_values, self.valid_values)
                self.assertEquals(event.sub_type, 'input_voltage')
            elif event.type_ == 'DeviceCommsEvent':
                no_data_events.append(event)
                self.assertEquals(event.origin, "instrument_1")
                self.assertEquals(event.origin_type, "PlatformDevice")
                self.assertEquals(event.status, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
                self.assertEquals(event.sub_type, 'input_voltage')

        self.assertTrue(len(bad_data_events) > 0)
        self.assertTrue(len(no_data_events) > 0)

        log.debug("This satisfies L4-CI-SA-RQ-114 : 'Marine facility shall monitor marine infrastructure usage by instruments.'"
                  " The req is satisfied because the stream alert transform"
                  "is able to send device status and communication events over selected time intervals. This capability will be "
                  "augmented in the future.")
Пример #9
0
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        raw_config = StreamConfiguration(stream_name='raw',
                                         parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict')

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=DRV_URI_GOOD,
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)
        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config,
                                          alerts=[])

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id, ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()))

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" %
                      t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(
            self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt_parsed.pretty_print()))
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)



        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }


        kwargs = {
            'name' : 'temperature_warning_interval',
            'stream_name' : 'parsed',
            'value_id' : 'temp',
            'message' : 'Temperature is below the normal range of 50.0 and above.',
            'type' : StreamAlarmType.WARNING,
            'lower_bound' : 50.0,
            'lower_rel_op' : '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(stream_name='raw', parameter_dictionary_name='ctd_raw_param_dict', records_per_granule=2, granule_publish_rate=5 )
        parsed_config = StreamConfiguration(stream_name='parsed', parameter_dictionary_name='ctd_parsed_param_dict', records_per_granule=2, granule_publish_rate=5, alarms=[alarm] )


        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent,
                                  name='agent007',
                                  description="SBE37IMAgent",
                                  driver_uri="http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
                                  stream_configurations = [raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345" )
        instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)

        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) " , instDevice_id)


        port_agent_config = {
            'device_addr':  CFG.device.sbe37.host,
            'device_port':  CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config = port_agent_config)


        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj,
                                                                               instAgent_id,
                                                                               instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()


        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(name='raw', parameter_dictionary_id=raw_pdict_id)


        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug( 'new dp_id = %s' , data_product_id1)
        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)



        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s' , dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]


        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)


        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id2 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification( user_name='user_1', instrument_id=instDevice_id, product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification( user_name='user_2', instrument_id=instDevice_id, product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s' , str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s' , dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)


        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])


        #setup a subscriber to alarm events from the device
        self._events_received= []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug('TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type= 'StreamWarningAlarmEvent',   #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()


        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)


        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(30), "The instrument agent instance (%s) did not spawn in 30 seconds" %
                                        inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=inst_agent_instance_obj.agent_process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s" , str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s" , str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s" , str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s" , str(retval))

        log.debug( "test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s" , str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s", str(rdt.pretty_print()) )
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals) , 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s", temp_vals )

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s", out_of_range_temp_vals )
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()


        log.debug("test_activateInstrumentSample: _events_received: %s", self._events_received )
        log.debug("test_activateInstrumentSample: _event_count: %s", self._event_count )

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()) )

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals) , 10)


        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """


        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id1, user_id=user_id_1)
        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product( expected_data_product_id = data_product_id1, extended_data_product = extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(  ts_created= t,  event_type = 'DeviceStatusEvent',
            origin = instDevice_id, state=DeviceStatusType.OUT_OF_RANGE, values = [200] )
        self.event_publisher.publish_event( ts_created= t,   event_type = 'DeviceCommsEvent',
            origin = instDevice_id, state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION, lapse_interval_seconds = 20 )

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(expected_data_product_id = data_product_id2, extended_data_product = extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(  ts_created= t,  event_type = 'DeviceStatusEvent',
            origin = instDevice_id, state=DeviceStatusType.OUT_OF_RANGE, values = [200] )
        self.event_publisher.publish_event( ts_created= t,   event_type = 'DeviceCommsEvent',
            origin = instDevice_id, state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION, lapse_interval_seconds = 20 )

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
Пример #11
0
    def test_demo_stream_granules_processing(self):
        """
        Test that the Demo Stream Alert Transform is functioning. The transform coordinates with the scheduler.
        It is configured to listen to a source that publishes granules. It publishes a DeviceStatusEvent if it
        receives a granule with bad data or a DeviceCommsEvent if no granule has arrived between two timer events.

        The transform is configured at launch using a config dictionary.
        """
        #-------------------------------------------------------------------------------------
        # Start a subscriber to listen for an alert event from the Stream Alert Transform
        #-------------------------------------------------------------------------------------

        queue_bad_data = gevent.queue.Queue()
        queue_no_data = gevent.queue.Queue()

        def bad_data(message, headers):
            log.debug("Got a BAD data event: %s" % message)
            if message.type_ == "DeviceStatusEvent":
                queue_bad_data.put(message)

        def no_data(message, headers):
            log.debug("Got a NO data event: %s" % message)
            queue_no_data.put(message)

        event_subscriber_bad_data = EventSubscriber( origin="instrument_1",
            event_type="DeviceStatusEvent",
            callback=bad_data)

        event_subscriber_no_data = EventSubscriber( origin="instrument_1",
            event_type="DeviceCommsEvent",
            callback=no_data)

        event_subscriber_bad_data.start()
        event_subscriber_no_data.start()

        self.addCleanup(event_subscriber_bad_data.stop)
        self.addCleanup(event_subscriber_no_data.stop)

        #-------------------------------------------------------------------------------------
        # The configuration for the Stream Alert Transform... set up the event types to listen to
        #-------------------------------------------------------------------------------------
        self.valid_values = [-100, 100]
        self.timer_interval = 5
        self.queue_name = 'a_queue'

        config = {
            'process':{
                'timer_interval': self.timer_interval,
                'queue_name': self.queue_name,
                'variable_name': 'input_voltage',
                'time_field_name': 'preferred_timestamp',
                'valid_values': self.valid_values,
                'timer_origin': 'Interval Timer',
                'event_origin': 'instrument_1'
            }
        }

        #-------------------------------------------------------------------------------------
        # Create the process
        #-------------------------------------------------------------------------------------
        pid = TransformPrototypeIntTest.create_process( name= 'DemoStreamAlertTransform',
            module='ion.processes.data.transforms.event_alert_transform',
            class_name='DemoStreamAlertTransform',
            configuration= config)
        self.addCleanup(self.process_dispatcher.cancel_process, pid)
        self.assertIsNotNone(pid)

        #-------------------------------------------------------------------------------------
        # Publish streams and make assertions about alerts
        #-------------------------------------------------------------------------------------

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(name= 'platform_eng_parsed', id_only=True)

        stream_def_id = self.pubsub_management.create_stream_definition('demo_stream', parameter_dictionary_id=pdict_id)
        stream_id, stream_route = self.pubsub_management.create_stream( name='test_demo_alert',
            exchange_point='exch_point_1',
            stream_definition_id=stream_def_id)

        sub_1 = self.pubsub_management.create_subscription(name='sub_1', stream_ids=[stream_id], exchange_points=['exch_point_1'], exchange_name=self.queue_name)
        self.pubsub_management.activate_subscription(sub_1)
        self.exchange_names.append('sub_1')
        self.exchange_points.append('exch_point_1')

        #-------------------------------------------------------------------------------------
        # publish a *GOOD* granule
        #-------------------------------------------------------------------------------------
        self.length = 2
        val = numpy.array([random.uniform(0,50)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number=1, values=val)

        self.assertTrue(queue_bad_data.empty())

        #-------------------------------------------------------------------------------------
        # publish a few *BAD* granules
        #-------------------------------------------------------------------------------------
        self.number = 2
        val = numpy.array([(110 + l)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number= self.number, values=val)

        for number in xrange(self.number):
            event = queue_bad_data.get(timeout=40)
            self.assertEquals(event.type_, "DeviceStatusEvent")
            self.assertEquals(event.origin, "instrument_1")
            self.assertEquals(event.state, DeviceStatusType.STATUS_WARNING)
            self.assertEquals(event.valid_values, self.valid_values)
            self.assertEquals(event.sub_type, 'input_voltage')
            self.assertTrue(set(event.values) ==  set(val))

            s = set(event.time_stamps)
            cond = s in [set(numpy.array([1  for l in xrange(self.length)]).tolist()), set(numpy.array([2  for l in xrange(self.length)]).tolist())]
            self.assertTrue(cond)

        # To ensure that only the bad values generated the alert events. Queue should be empty now
        self.assertEquals(queue_bad_data.qsize(), 0)

        #-------------------------------------------------------------------------------------
        # Do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------
        event = queue_no_data.get(timeout=15)

        self.assertEquals(event.type_, "DeviceCommsEvent")
        self.assertEquals(event.origin, "instrument_1")
        self.assertEquals(event.origin_type, "PlatformDevice")
        self.assertEquals(event.state, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
        self.assertEquals(event.sub_type, 'input_voltage')

        #-------------------------------------------------------------------------------------
        # Empty the queues and repeat tests
        #-------------------------------------------------------------------------------------
        queue_bad_data.queue.clear()
        queue_no_data.queue.clear()

        #-------------------------------------------------------------------------------------
        # publish a *GOOD* granule again
        #-------------------------------------------------------------------------------------
        val = numpy.array([(l + 20)  for l in xrange(self.length)])
        self._publish_granules(stream_id= stream_id, stream_route= stream_route, number=1, values=val)

        self.assertTrue(queue_bad_data.empty())

        #-------------------------------------------------------------------------------------
        # Again do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------

        event = queue_no_data.get(timeout=20)

        self.assertEquals(event.type_, "DeviceCommsEvent")
        self.assertEquals(event.origin, "instrument_1")
        self.assertEquals(event.origin_type, "PlatformDevice")
        self.assertEquals(event.state, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
        self.assertEquals(event.sub_type, 'input_voltage')

        #-------------------------------------------------------------------------------------
        # Again do not publish any granules for some time. This should generate a DeviceCommsEvent for the communication status
        #-------------------------------------------------------------------------------------

        ar = gevent.event.AsyncResult()
        def poller(ar, method, *args):
            events_in_db = method(*args)
            if len(events_in_db) > 0:
                ar.set(events_in_db)
                return True
            else:
                return False

        poll(poller, ar, self.user_notification.find_events, 'instrument_1')

#        events_in_db = self.user_notification.find_events(origin='instrument_1')

        events_in_db = ar.get(10)
        log.debug("events::: %s" % events_in_db)

        bad_data_events = []
        no_data_events = []

        for event in events_in_db:
            if event.type_ == 'DeviceStatusEvent':
                bad_data_events.append(event)
                self.assertEquals(event.origin, "instrument_1")
                self.assertEquals(event.status, DeviceStatusType.STATUS_WARNING)
                self.assertEquals(event.valid_values, self.valid_values)
                self.assertEquals(event.sub_type, 'input_voltage')
            elif event.type_ == 'DeviceCommsEvent':
                no_data_events.append(event)
                self.assertEquals(event.origin, "instrument_1")
                self.assertEquals(event.origin_type, "PlatformDevice")
                self.assertEquals(event.status, DeviceCommsType.DATA_DELIVERY_INTERRUPTION)
                self.assertEquals(event.sub_type, 'input_voltage')

        self.assertTrue(len(bad_data_events) > 0)
        self.assertTrue(len(no_data_events) > 0)

        log.debug("This satisfies L4-CI-SA-RQ-114 : 'Marine facility shall monitor marine infrastructure usage by instruments.'"
                  " The req is satisfied because the stream alert transform"
                  "is able to send device status and communication events over selected time intervals. This capability will be "
                  "augmented in the future.")