Exemple #1
0
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)
Exemple #2
0
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient()
        self.rrclient = ResourceRegistryServiceClient()
        self.damsclient = DataAcquisitionManagementServiceClient()
        self.pubsubcli = PubsubManagementServiceClient()
        self.ingestclient = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()
        self.identcli = IdentityManagementServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        self.stream_def_id = self.pubsubcli.create_stream_definition(
            name='SBE37_CDM')

        self.process_definitions = {}
        ingestion_worker_definition = ProcessDefinition(
            name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':
            'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class': 'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(
            process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space = 'science_granule_ingestion'
        self.exchange_point = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(
            self.process_definitions['ingestion_worker'], configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()

        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.datasetclient =  DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)
Exemple #6
0
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()
Exemple #7
0
    def setUp(self):
        super(TransformPrototypeIntTest, self).setUp()

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.rrc = ResourceRegistryServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.pubsub_management = PubsubManagementServiceClient()
        self.ssclient = SchedulerServiceClient()
        self.event_publisher = EventPublisher()
        self.user_notification = UserNotificationServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()

        self.exchange_names = []
        self.exchange_points = []
Exemple #8
0
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        log.debug("Start rel from url")
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.DPMS = DataProductManagementServiceClient()
        self.RR = ResourceRegistryServiceClient()
        self.RR2 = EnhancedResourceRegistryClient(self.RR)
        self.DAMS = DataAcquisitionManagementServiceClient()
        self.PSMS = PubsubManagementServiceClient()
        self.ingestclient = IngestionManagementServiceClient()
        self.PD = ProcessDispatcherServiceClient()
        self.DSMS = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------
        log.debug("get datastore")
        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(
            datastore_name)
        self.stream_def_id = self.PSMS.create_stream_definition(
            name='SBE37_CDM')

        self.process_definitions = {}
        ingestion_worker_definition = ProcessDefinition(
            name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':
            'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class': 'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.PD.create_process_definition(
            process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        self.addCleanup(self.cleaning_up)
Exemple #9
0
 def setUp(self):
     self._start_container()
     self.container.start_rel_from_url('res/deploy/r2deploy.yml')
     self.resource_registry = self.container.resource_registry
     self.RR2 = EnhancedResourceRegistryClient(self.resource_registry)
     self.data_acquisition_management = DataAcquisitionManagementServiceClient()
     self.pubsub_management =  PubsubManagementServiceClient()
     self.instrument_management = InstrumentManagementServiceClient()
     self.data_product_management = DataProductManagementServiceClient()
     self.dataset_management =  DatasetManagementServiceClient()
     self.process_dispatcher = ProcessDispatcherServiceClient()
     self.data_process_management = DataProcessManagementServiceClient()
     self.data_product_management = DataProductManagementServiceClient()
     self.data_retriever = DataRetrieverServiceClient()
     self.dataset_management = DatasetManagementServiceClient()
     self.user_notification = UserNotificationServiceClient()
     self.workflow_management = WorkflowManagementServiceClient()
     self.visualization = VisualizationServiceClient()
 def setUp(self):
     self._start_container()
     self.container.start_rel_from_url('res/deploy/r2deploy.yml')
     self.resource_registry = self.container.resource_registry
     self.RR2 = EnhancedResourceRegistryClient(self.resource_registry)
     self.data_acquisition_management = DataAcquisitionManagementServiceClient()
     self.pubsub_management =  PubsubManagementServiceClient()
     self.instrument_management = InstrumentManagementServiceClient()
     self.discovery = DiscoveryServiceClient()
     self.dataset_management =  DatasetManagementServiceClient()
     self.process_dispatcher = ProcessDispatcherServiceClient()
     self.data_process_management = DataProcessManagementServiceClient()
     self.data_product_management = DataProductManagementServiceClient()
     self.data_retriever = DataRetrieverServiceClient()
     self.dataset_management = DatasetManagementServiceClient()
     self.user_notification = UserNotificationServiceClient()
     self.observatory_management = ObservatoryManagementServiceClient()
     self.visualization = VisualizationServiceClient()
     self.ph = ParameterHelper(self.dataset_management, self.addCleanup)
     self.ctd_count = 0
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli           = DataProductManagementServiceClient()
        self.rrclient           = ResourceRegistryServiceClient()
        self.damsclient         = DataAcquisitionManagementServiceClient()
        self.pubsubcli          = PubsubManagementServiceClient()
        self.ingestclient       = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc               = UserNotificationServiceClient()
        self.data_retriever     = DataRetrieverServiceClient()
        self.identcli           = IdentityManagementServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    @attr('EXT')
    @attr('PREP')
    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary._id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------




        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 10.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -10.0
        dp_obj.ooi_product_name = "PRODNAME"

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        # Assert that the data product has an associated stream at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertNotEquals(len(stream_ids), 0)

        # Assert that the data product has an associated stream def at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStreamDefinition, RT.StreamDefinition, True)
        self.assertNotEquals(len(stream_ids), 0)

        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp')

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)

        group_names = self.dpsc_cli.get_data_product_group_list()
        self.assertIn("PRODNAME", group_names)


        #----------------------------------------------------------------------------------------
        # Create users then notifications to this data product for each user
        #----------------------------------------------------------------------------------------

        # user_1
        user_1 = UserInfo()
        user_1.name = 'user_1'
        user_1.contact.email = '*****@*****.**'

        # user_2
        user_2 = UserInfo()
        user_2.name = 'user_2'
        user_2.contact.email = '*****@*****.**'
        #user1 is a complete user
        self.subject = "/DC=org/DC=cilogon/C=US/O=ProtectNetwork/CN=Roger Unwin A254"
        actor_identity_obj = IonObject("ActorIdentity", {"name": self.subject})
        actor_id = self.identcli.create_actor_identity(actor_identity_obj)

        user_credentials_obj = IonObject("UserCredentials", {"name": self.subject})
        self.identcli.register_user_credentials(actor_id, user_credentials_obj)
        user_id_1 = self.identcli.create_user_info(actor_id, user_1)
        user_id_2, _ = self.rrclient.create(user_2)

        delivery_config1a = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        delivery_config1b = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        notification_request_1 = NotificationRequest(   name = "notification_1",
            origin=dp_id,
            origin_type="type_1",
            event_type=OT.ResourceLifecycleEvent,
            disabled_by_system = False,
            delivery_configurations=[delivery_config1a, delivery_config1b])

        delivery_config2a = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        delivery_config2b = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        notification_request_2 = NotificationRequest(   name = "notification_2",
            origin=dp_id,
            origin_type="type_2",
            disabled_by_system = False,
            event_type=OT.DetectionEvent,
            delivery_configurations=[delivery_config2a, delivery_config2b])

        notification_request_1_id = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_1)
        notification_request_2_id = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
        self.unsc.delete_notification(notification_request_1_id)



        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 20.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -20.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)


        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        #validate that there is one active and one retired user notification for this data product
        self.assertEqual(1, len(extended_product.computed.active_user_subscriptions.value))
        self.assertEqual(1, len(extended_product.computed.past_user_subscriptions.value))

        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)


        def ion_object_encoder(obj):
            return obj.__dict__


        #test prepare for create
        data_product_data = self.dpsc_cli.prepare_data_product_support()

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, "")
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)
        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 0)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 0)
        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 0)

        #test prepare for update
        data_product_data = self.dpsc_cli.prepare_data_product_support(dp_id)

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, dp_id)
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)

        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 1)

        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['StreamDefinition'].associated_resources[0].s, dp_id)

        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['Dataset'].associated_resources[0].s, dp_id)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)

        # Assert that there are no associated streams leftover after deleting the data product
        stream_ids, assoc_ids = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertEquals(len(stream_ids), 0)
        self.assertEquals(len(assoc_ids), 0)

        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)


        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)


    def test_derived_data_product(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='ctd parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition, ctd_stream_def_id)


        dp = DataProduct(name='Instrument DP')
        dp_id = self.dpsc_cli.create_data_product(dp, stream_definition_id=ctd_stream_def_id)
        self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)


        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]
        
        # Make the derived data product
        simple_stream_def_id = self.pubsubcli.create_stream_definition(name='TEMPWAT stream def', parameter_dictionary_id=pdict_id, available_fields=['time','temp'])
        tempwat_dp = DataProduct(name='TEMPWAT', category=DataProductTypeEnum.DERIVED)
        tempwat_dp_id = self.dpsc_cli.create_data_product(tempwat_dp, stream_definition_id=simple_stream_def_id, parent_data_product_id=dp_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)
        rdt['pressure'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id, PRED.hasDataset, id_only=True)
        tempwat_dataset_id = tempwat_dataset_ids[0]
        granule = self.data_retriever.retrieve(tempwat_dataset_id, delivery_format=simple_stream_def_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['time'], np.arange(20))
        self.assertEquals(set(rdt.fields), set(['time','temp']))


    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # Subscribe to persist events
        #------------------------------------------------------------------------------------------------
        queue = gevent.queue.Queue()

        def info_event_received(message, headers):
            queue.put(message)

        es = EventSubscriber(event_type=OT.InformationContentStatusEvent, callback=info_event_received, origin=dp_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)


        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)


        dataset_modified.clear()

        rdt['time'] = np.arange(20,40)

        publisher.publish(rdt.to_granule())
        self.assertFalse(dataset_modified.wait(2))

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        dataset_modified.clear()

        publisher.publish(rdt.to_granule())
        self.assertTrue(dataset_modified.wait(30))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_almost_equal(rdt['time'], np.arange(40))


        dataset_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasDataset, id_only=True)
        self.assertEquals(len(dataset_ids), 1)

        self.dpsc_cli.suspend_data_product_persistence(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)


        info_event_counter = 0
        runtime = 0
        starttime = time.time()
        caught_events = []

        #check that the four InfoStatusEvents were received
        while info_event_counter < 4 and runtime < 60 :
            a = queue.get(timeout=60)
            caught_events.append(a)
            info_event_counter += 1
            runtime = time.time() - starttime

        self.assertEquals(info_event_counter, 4)
class TestActivateInstrumentIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()

        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.datasetclient =  DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()


    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id=logger_procdef_id,
                                                            configuration=configuration)

        return pid

    def _create_notification(self, user_name = '', instrument_id='', product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(   name= 'notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(   name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(notification=notification_request_2, user_id=user_id)
        log.debug( "test_activateInstrumentSample: create_user_notifications user_id %s", str(user_id) )

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(self, expected_instrument_device_id = '',extended_instrument = None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.last_data_received_datetime, ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime, ComputedStringValue)

        self.assertIsInstance(extended_instrument.computed.power_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.communications_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.location_status_roll_up, ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )

        # Verify the computed attribute for user notification requests
        self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(expected_instrument_device_id, notification.origin)
        self.assertEqual("instrument", notification.origin_type)
        self.assertEqual('ResourceLifecycleEvent', notification.event_type)


    def _check_computed_attributes_of_extended_product(self, expected_data_product_id = '', extended_data_product = None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s", extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_data_product.computed.product_download_size_estimated, ComputedFloatValue)
        self.assertIsInstance(extended_data_product.computed.number_active_subscriptions, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events, ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.user_notification_requests, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.active_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.past_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_contents_updated, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime, ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertEqual( 2, len(extended_data_product.computed.data_datetime.value) )

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(expected_data_product_id, notification.origin)
        self.assertEqual("data product", notification.origin_type)
        self.assertEqual('DetectionEvent', notification.event_type)


    @attr('LOCOINT')
    #@unittest.skip('refactoring')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint':{'receive':{'timeout': 90}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)




        raw_config = StreamConfiguration(stream_name='raw', parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(stream_name='parsed', parameter_dictionary_name='ctd_parsed_param_dict')


        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent,
                                  name='agent007',
                                  description="SBE37IMAgent",
                                  driver_uri=DRV_URI_GOOD,
                                  stream_configurations = [raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345" )
        instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) " , instDevice_id)


        port_agent_config = {
            'device_addr':  CFG.device.sbe37.host,
            'device_port':  CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config = port_agent_config,
                                            alerts= [])


        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj,
                                                                               instAgent_id,
                                                                               instDevice_id)


        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()


        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(name='raw', parameter_dictionary_id=raw_pdict_id)


        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug( 'new dp_id = %s' , data_product_id1)
        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)



        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s' , dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]


        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)


        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id2 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification( user_name='user_1', instrument_id=instDevice_id, product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification( user_name='user_2', instrument_id=instDevice_id, product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s' , str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s' , dataset_ids[0])
        self.raw_dataset = dataset_ids[0]


        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])


        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)


        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id,
                                     ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(30), "The instrument agent instance (%s) did not spawn in 30 seconds" %
                                        gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s" , str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s" , str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s" , str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        for i in xrange(10):
            monitor = DatasetMonitor(dataset_id=self.parsed_dataset)
            self._ia_client.execute_resource(AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE))
            if not monitor.wait():
                raise AssertionError('Failed on the %ith granule' % i)
            monitor.stop()


#        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
#        for i in xrange(10):
#            retval = self._ia_client.execute_resource(cmd)
#            log.debug("test_activateInstrumentSample: return from sample %s" , str(retval))

        log.debug( "test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s" , str(reply))


        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()) )

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" % t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s", str(rdt_parsed.pretty_print()) )
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals  = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))


        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """


        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id1, user_id=user_id_1)
        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product( expected_data_product_id = data_product_id1, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(expected_data_product_id = data_product_id2, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
Exemple #13
0
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient()
        self.rrclient = ResourceRegistryServiceClient()
        self.damsclient = DataAcquisitionManagementServiceClient()
        self.pubsubcli = PubsubManagementServiceClient()
        self.ingestclient = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        self.stream_def_id = self.pubsubcli.create_stream_definition(
            name='SBE37_CDM')

        self.process_definitions = {}
        ingestion_worker_definition = ProcessDefinition(
            name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':
            'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class': 'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(
            process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space = 'science_granule_ingestion'
        self.exchange_point = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(
            self.process_definitions['ingestion_worker'], configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    @attr('EXT')
    @attr('PREP')
    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='Simulated CTD data',
            parameter_dictionary_id=parameter_dictionary._id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='DP1',
                           description='some new dp')

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 10.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -10.0
        dp_obj.ooi_product_name = "PRODNAME"

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(
            data_product=dp_obj, stream_definition_id=ctd_stream_def_id)
        # Assert that the data product has an associated stream at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                   RT.Stream, True)
        self.assertNotEquals(len(stream_ids), 0)

        # Assert that the data product has an associated stream def at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id,
                                                   PRED.hasStreamDefinition,
                                                   RT.StreamDefinition, True)
        self.assertNotEquals(len(stream_ids), 0)

        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
                           name='DP2',
                           description='some new dp')

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream,
                                                RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition,
                                                  RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)

        group_names = self.dpsc_cli.get_data_product_group_list()
        self.assertIn("PRODNAME", group_names)

        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 20.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -20.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)

        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description, 'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(
            ComputedValueAvailability.PROVIDED,
            extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(
            0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)

        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        def ion_object_encoder(obj):
            return obj.__dict__

        #test prepare for create
        data_product_data = self.dpsc_cli.prepare_data_product_support()

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, "")
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(
            len(data_product_data.associations['StreamDefinition'].resources),
            2)
        self.assertEqual(
            len(data_product_data.associations['Dataset'].resources), 0)
        self.assertEqual(
            len(data_product_data.associations['StreamDefinition'].
                associated_resources), 0)
        self.assertEqual(
            len(data_product_data.associations['Dataset'].associated_resources
                ), 0)

        #test prepare for update
        data_product_data = self.dpsc_cli.prepare_data_product_support(dp_id)

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, dp_id)
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(
            len(data_product_data.associations['StreamDefinition'].resources),
            2)

        self.assertEqual(
            len(data_product_data.associations['Dataset'].resources), 1)

        self.assertEqual(
            len(data_product_data.associations['StreamDefinition'].
                associated_resources), 1)
        self.assertEqual(
            data_product_data.associations['StreamDefinition'].
            associated_resources[0].s, dp_id)

        self.assertEqual(
            len(data_product_data.associations['Dataset'].associated_resources
                ), 1)
        self.assertEqual(
            data_product_data.associations['Dataset'].associated_resources[0].
            s, dp_id)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)

        # Assert that there are no associated streams leftover after deleting the data product
        stream_ids, assoc_ids = self.rrclient.find_objects(
            dp_id, PRED.hasStream, RT.Stream, True)
        self.assertEquals(len(stream_ids), 0)
        self.assertEquals(len(assoc_ids), 0)

        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        dp_obj = IonObject(RT.DataProduct,
                           name='DP1',
                           description='some new dp')
        dp_id = self.dpsc_cli.create_data_product(
            data_product=dp_obj, stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)

    def test_derived_data_product(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='ctd parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition,
                        ctd_stream_def_id)

        dp = DataProduct(name='Instrument DP')
        dp_id = self.dpsc_cli.create_data_product(
            dp, stream_definition_id=ctd_stream_def_id)
        self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id,
                                                    predicate=PRED.hasDataset,
                                                    id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" %
                           str(dp_id))
        dataset_id = dataset_ids[0]

        # Make the derived data product
        simple_stream_def_id = self.pubsubcli.create_stream_definition(
            name='TEMPWAT stream def',
            parameter_dictionary_id=pdict_id,
            available_fields=['time', 'temp'])
        tempwat_dp = DataProduct(name='TEMPWAT',
                                 category=DataProductTypeEnum.DERIVED)
        tempwat_dp_id = self.dpsc_cli.create_data_product(
            tempwat_dp,
            stream_definition_id=simple_stream_def_id,
            parent_data_product_id=dp_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
        # Check that the streams associated with the data product are persisted with
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                   RT.Stream, True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)
        rdt['pressure'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id, route)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id,
                                                            PRED.hasDataset,
                                                            id_only=True)
        tempwat_dataset_id = tempwat_dataset_ids[0]
        granule = self.data_retriever.retrieve(
            tempwat_dataset_id, delivery_format=simple_stream_def_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['time'], np.arange(20))
        self.assertEquals(set(rdt.fields), set(['time', 'temp']))

    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(
            name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects

        dp_obj = IonObject(RT.DataProduct,
                           name='DP1',
                           description='some new dp')

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(
            data_product=dp_obj, stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # Subscribe to persist events
        #------------------------------------------------------------------------------------------------
        queue = gevent.queue.Queue()

        def info_event_received(message, headers):
            queue.put(message)

        es = EventSubscriber(event_type=OT.InformationContentStatusEvent,
                             callback=info_event_received,
                             origin=dp_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id,
                                                    predicate=PRED.hasDataset,
                                                    id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" %
                           str(dp_id))
        dataset_id = dataset_ids[0]

        # Check that the streams associated with the data product are persisted with
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                   RT.Stream, True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id, route)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id,
                             auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug(
            "The data retriever was able to replay the dataset that was attached to the data product "
            "we wanted to be persisted. Therefore the data product was indeed persisted with "
            "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
            "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'"
        )

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name, 'DP1')
        self.assertEquals(data_product_object.description, 'some new dp')

        log.debug(
            "Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
            " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
            "resource registry, name='%s', desc='%s'" %
            (dp_obj.name, dp_obj.description, data_product_object.name,
             data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        dataset_modified.clear()

        rdt['time'] = np.arange(20, 40)

        publisher.publish(rdt.to_granule())
        self.assertFalse(dataset_modified.wait(2))

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        dataset_modified.clear()

        publisher.publish(rdt.to_granule())
        self.assertTrue(dataset_modified.wait(30))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_almost_equal(rdt['time'], np.arange(40))

        dataset_ids, _ = self.rrclient.find_objects(dp_id,
                                                    PRED.hasDataset,
                                                    id_only=True)
        self.assertEquals(len(dataset_ids), 1)

        self.dpsc_cli.suspend_data_product_persistence(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)

        info_event_counter = 0
        runtime = 0
        starttime = time.time()
        caught_events = []

        #check that the four InfoStatusEvents were received
        while info_event_counter < 4 and runtime < 60:
            a = queue.get(timeout=60)
            caught_events.append(a)
            info_event_counter += 1
            runtime = time.time() - starttime

        self.assertEquals(info_event_counter, 4)
class RealTimeNotificationTestCase(IonIntegrationTestCase):

    def setUp(self):

        self._start_container()

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe("service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.user_notification = UserNotificationServiceClient()
        self.event_publisher = EventPublisher()

        # create UserInfo object (user)
        user = UserInfo()
        user.name = 'Iceman'
        user.contact.email = '*****@*****.**'
        user_id, _ = self.resource_registry.create(user)
        self.user = self.resource_registry.read(user_id)

        # create NotificationRequest objects (notifications)
        # 4 notifications are created:
        #     REAL_TIME, EMAIL(user default via UserInfo)
        #     REAL_TIME, EMAIL(in DeliveryConfiguration)
        #     DISABLED, EMAIL(in DeliveryConfiguration)
        #     REAL_TIME, SMS(in DeliveryConfiguration)

        # REAL_TIME, EMAIL(user default via UserInfo)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='REAL_TIME to default UserInfo email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin='Miramar',
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        # store this notification_id to check disabled_by_system status later
        self.notification_id = self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)
        
        # REAL_TIME, EMAIL(in DeliveryConfiguration), 10 notifications/day max
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='REAL_TIME to alternate email, 10 notifications/day max',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)

        # DISABLED, EMAIL(in DeliveryConfiguration)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.DISABLED)
        notification_request = IonObject(OT.NotificationRequest,
                               name='DISABLED to alternate email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)

        # REAL_TIME, SMS(in DeliveryConfiguration)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.SMS,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='SMS to alternate email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)

    def test_realtime_notifications(self):

        # monkey patch smtplib.SMTP to capture sent emails
        original_SMTP = smtplib.SMTP # store original for restoration

        class MonkeyPatchSMTP(object):
            def __init__(self, address, host):
                self.address = address
                self.host = host
            def login(self,username,password):
                self.username = username
                self.password = password
            def sendmail(self,from_addr, to_addrs, msg):
                global outbox
                outbox.append((from_addr, to_addrs, msg,time.time()))
                return []
            def quit(self):
                pass

        smtplib.SMTP=MonkeyPatchSMTP

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe("service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        # publish event(s) - one should trigger notifications, the other not
        self.event_publisher.publish_event(origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        self.event_publisher.publish_event(origin='Hong Kong', event_type=OT.ResourceLifecycleEvent)
        time.sleep(1) # wait some non-trivial time for events to be processed by NotificationWorker

        # outbox now contains tuple(from_addr, to_addrs, msg,time.time()) for each message sent

        # verifies the alternate email requirement (eg. email specified in DeliveryConfiguration)
        self.assertEqual(len([t[1] for t in outbox if t[1] == '*****@*****.**']), 1) # 1 message to Iceman
        self.assertEqual(len([t[1] for t in outbox if t[1] == '*****@*****.**']), 1) # 1 message to Slider
        self.assertEqual(len([t[1] for t in outbox if t[1] == '*****@*****.**']), 0 ) # no messages to Charlie (CIV)

        # check SMS sent
        self.assertEqual(len([t[1] for t in outbox if t[1] == '*****@*****.**']), 1 )
        # check SMS <= 140 characters
        self.assertLessEqual(len([(t[2]) for t in outbox if t[1] == '*****@*****.**'][0]), 140)

        # publish 9 more events (already have 1), notification should be disabled at number 10
        for x in xrange(9):
            self.event_publisher.publish_event(origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        time.sleep(2) # give system time to update NotificationRequest (1 sec saw occasional fail)
        # publish 1 more event, should NOT trigger any additional notifications
        self.event_publisher.publish_event(origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        time.sleep(1) # wait some non-trivial time for events to be processed by NotificationWorker

        # check there are 10 for Iceman, not 11 even though 11 have been published
        # verifies the max_daily notification limit
        self.assertEquals(len([t[1] for t in outbox if t[1] == '*****@*****.**']), 10) # 10 to Iceman (reached limit)

        # check NotificationRequest has been disabled_by_system
        notification = self.resource_registry.read(self.notification_id)
        self.assertTrue(notification.disabled_by_system)

        # MONKEY PATCH time.time() fast forward to trigger NotificationSentScanner to persist counts
        CONSTANT_TIME = time.time() + 600 # forward 10 minutes (from now FREEZES time.time())
        def new_time():
            return CONSTANT_TIME
        old_time = time.time
        time.time = new_time

        # sleep 5s longer than interval taken from ion/processes/event/event_persister.py
        time.sleep(float(CFG.get_safe("process.event_persister.persist_interval", 1.0))+5)

        # get notification_counts from ObjectStore
        notification_counts_obj = self.object_store.read('notification_counts')
        # persisted as standard dicts, convert to Counter objects
        notification_counts = {k:Counter(v) for k,v in notification_counts_obj.items() if not (k == '_id' or k == '_rev') }
        self.assertEqual(int(notification_counts.get(self.user._id).get('all')), 30) # 3 DeliveryConfigurations * 10 notifications

        # restore MONKEY PATCHed time (so we can go ahead from actual wall time)
        time.time = old_time

        # MONKEY PATCH time.time() fast forward to trigger NotificationSentScanner to flush/reset counts
        CONSTANT_TIME = time.time() + 86400 # forward 1 day (from now FREEZES time.time())
        def new_time():
            return CONSTANT_TIME
        old_time = time.time
        time.time = new_time

        # sleep 5s longer than interval taken from ion/processes/event/event_persister.py
        time.sleep(float(CFG.get_safe("process.event_persister.persist_interval", 1.0))+5)

        # get notification_counts from ObjectStore
        notification_counts_obj = self.object_store.read('notification_counts')
        # persisted as standard dicts, convert to Counter objects
        notification_counts = {k:Counter(v) for k,v in notification_counts_obj.items() if not (k == '_id' or k == '_rev') }
        self.assertEqual(notification_counts.get(self.user._id, None), None)

        # restore MONKEY PATCHed time
        time.time = old_time

        # restore original smtplib.SMTP and CFG value
        smtplib.SMTP = original_SMTP
        CFG['service']['user_notification']['max_daily_notifications'] = original_CFG_max
    def setUp(self):

        self._start_container()

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe("service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.user_notification = UserNotificationServiceClient()
        self.event_publisher = EventPublisher()

        # create UserInfo object (user)
        user = UserInfo()
        user.name = 'Iceman'
        user.contact.email = '*****@*****.**'
        user_id, _ = self.resource_registry.create(user)
        self.user = self.resource_registry.read(user_id)

        # create NotificationRequest objects (notifications)
        # 4 notifications are created:
        #     REAL_TIME, EMAIL(user default via UserInfo)
        #     REAL_TIME, EMAIL(in DeliveryConfiguration)
        #     DISABLED, EMAIL(in DeliveryConfiguration)
        #     REAL_TIME, SMS(in DeliveryConfiguration)

        # REAL_TIME, EMAIL(user default via UserInfo)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='REAL_TIME to default UserInfo email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin='Miramar',
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        # store this notification_id to check disabled_by_system status later
        self.notification_id = self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)
        
        # REAL_TIME, EMAIL(in DeliveryConfiguration), 10 notifications/day max
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='REAL_TIME to alternate email, 10 notifications/day max',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)

        # DISABLED, EMAIL(in DeliveryConfiguration)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.EMAIL,
                                 frequency=NotificationFrequencyEnum.DISABLED)
        notification_request = IonObject(OT.NotificationRequest,
                               name='DISABLED to alternate email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)

        # REAL_TIME, SMS(in DeliveryConfiguration)
        delivery_configuration = IonObject(OT.DeliveryConfiguration,
                                 email='*****@*****.**',
                                 mode=DeliveryModeEnum.SMS,
                                 frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(OT.NotificationRequest,
                               name='SMS to alternate email',
                               type=NotificationTypeEnum.SIMPLE,
                               origin="Miramar",
                               event_type=OT.ResourceLifecycleEvent,
                               delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(notification=notification_request, user_id=self.user._id)
class UserNotificationIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)
        
    def xtest_find_event_types_for_resource(self):
        dataset_object = IonObject(RT.DataSet, name="dataset1")
        dataset_id, version = self.rrc.create(dataset_object)
        events = self.unsc.find_event_types_for_resource(dataset_id)
        log.debug("dataset events = " + str(events))
        try:
            events = self.unsc.find_event_types_for_resource("bogus_id")
            self.fail("failed to detect non-existant resource")
        except:
            pass
        
    def test_create_two_user_notifications(self):
        user_identty_object = IonObject(RT.UserIdentity, name="user1")
        user_id = self.imc.create_user_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['resource_lifecycle']})
        self.unsc.create_notification(notification_object, user_id)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['data']})
        self.unsc.create_notification(notification_object, user_id)

    def test_delete_user_notifications(self):
        user_identty_object = IonObject(RT.UserIdentity, name="user1")
        user_id = self.imc.create_user_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        notification_object1 = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['resource_lifecycle']})
        notification1_id = self.unsc.create_notification(notification_object1, user_id)
        notification_object2 = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['data']})
        notification2_id = self.unsc.create_notification(notification_object2, user_id)
        self.unsc.delete_notification(notification1_id)
        self.unsc.delete_notification(notification2_id)

    def test_find_user_notifications(self):
        user_identty_object = IonObject(RT.UserIdentity, name="user1")
        user_id = self.imc.create_user_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['resource_lifecycle']})
        self.unsc.create_notification(notification_object, user_id)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['data']})
        self.unsc.create_notification(notification_object, user_id)
        notifications = self.unsc.find_notifications_by_user(user_id)
        for n in notifications:
            log.debug("n = " +str(n))

    def test_update_user_notification(self):
        user_identty_object = IonObject(RT.UserIdentity, name="user1")
        user_id = self.imc.create_user_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['resource_lifecycle']})
        notification_id = self.unsc.create_notification(notification_object, user_id)
        notification = self.rrc.read(notification_id)
        notification.origin_list = ['Some_Resource_Agent_ID5']
        self.unsc.update_notification(notification)

    def test_send_notification_emails(self):
        user_identty_object = IonObject(RT.UserIdentity, name="user1")
        user_id = self.imc.create_user_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['resource_lifecycle']})
        self.unsc.create_notification(notification_object, user_id)
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['data']})
        self.unsc.create_notification(notification_object, user_id)
        rle_publisher = ResourceLifecycleEventPublisher()
        rle_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event")
        de_publisher = DataEventPublisher()
        de_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID2', description="DE test event")
        gevent.sleep(1)

    def test_find_events(self):
        rle_publisher = ResourceLifecycleEventPublisher(event_repo=self.container.event_repository)
        rle_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event1")
        rle_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event2")
        de_publisher = DataEventPublisher(event_repo=self.container.event_repository)
        de_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID2', description="DE test event1")
        de_publisher.create_and_publish_event(origin='Some_Resource_Agent_ID2', description="DE test event2")
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1')
        for event in events:
            log.debug("event=" + str(event))
        events = self.unsc.find_events(type='DataEvent')
        for event in events:
            log.debug("event=" + str(event))
Exemple #17
0
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)

        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

#        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)



    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        self.get_datastore(dataset_ids[0])


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)
class UserNotificationIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_email(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name = 'new_user')
        user_id, _ = self.rrc.create(user)

        # set up....
        notification_id = self.unsc.create_email(event_type='ResourceLifecycleEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            email='*****@*****.**',
            mode = DeliveryMode.DIGEST,
            message_header='message_header',
            parser='parser',
            period=1)

        #------------------------------------------------------------------------------------------------------
        # Setup so as to be able to get the message and headers going into the
        # subscription callback method of the EmailEventProcessor
        #------------------------------------------------------------------------------------------------------

        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event")


        msg_tuple = proc1.event_processors[notification_id].smtp_client.sentmail.get(timeout=4)

        self.assertTrue(proc1.event_processors[notification_id].smtp_client.sentmail.empty())

        message = msg_tuple[2]
        list_lines = message.split("\n")

        #-------------------------------------------------------
        # parse the message body
        #-------------------------------------------------------

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        self.assertEquals(msg_tuple[1], '*****@*****.**' )
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)

        #self.assertEquals(message_dict['From'], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['To'], '*****@*****.**')
        self.assertEquals(message_dict['Event'].rstrip('\r'), 'ResourceLifecycleEvent')
        self.assertEquals(message_dict['Originator'].rstrip('\r'), 'Some_Resource_Agent_ID1')
        self.assertEquals(message_dict['Description'].rstrip('\r'), 'RLE test event')

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_sms(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name = 'new_user')
        user_id, _ = self.rrc.create(user)

        # set up....
        notification_id = self.unsc.create_sms(event_type='ResourceLifecycleEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            phone = '401-XXX-XXXX',
            provider='T-Mobile',
            message_header='message_header',
            parser='parser',
            )

        #------------------------------------------------------------------------------------------------------
        # Setup so as to be able to get the message and headers going into the
        # subscription callback method of the EmailEventProcessor
        #------------------------------------------------------------------------------------------------------

        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event")


        msg_tuple = proc1.event_processors[notification_id].smtp_client.sentmail.get(timeout=4)

        self.assertTrue(proc1.event_processors[notification_id].smtp_client.sentmail.empty())

        message = msg_tuple[2]
        list_lines = message.split("\n")

        #-------------------------------------------------------
        # parse the message body
        #-------------------------------------------------------

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        self.assertEquals(msg_tuple[1], '*****@*****.**' )
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['Description'].rstrip('\r'), 'RLE test event')

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_event_detection(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name = 'new_user')
        user_id, _ = self.rrc.create(user)

        # Create detection notification
        dfilt = DetectionFilterConfig()

        dfilt.processing['condition'] = 5
        dfilt.processing['comparator'] = '>'
        dfilt.processing['filter_field'] = 'voltage'

        dfilt.delivery['message'] = 'I got my detection event!'

        notification_id = self.unsc.create_detection_filter(event_type='ExampleDetectableEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            filter_config=dfilt
            )

        #---------------------------------------------------------------------------------
        # Create event subscription for resulting detection event
        #---------------------------------------------------------------------------------

        # Create an email notification so that when the DetectionEventProcessor
        # detects an event and fires its own output event, this will caught by an
        # EmailEventProcessor and an email will be sent to the user

        notification_id_2 = self.unsc.create_email(event_type='DetectionEvent',
            event_subtype=None,
            origin='DetectionEventProcessor',
            origin_type=None,
            user_id=user_id,
            email='*****@*****.**',
            mode = DeliveryMode.UNFILTERED,
            message_header='Detection event',
            parser='parser',
            period=1)


        # Send event that is not detected
        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ExampleDetectableEvent")

        # since the voltage field in this event is less than 5, it will not be detected
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage = 3)

        # Check at the end of the test to make sure this event never triggered a Detectable!

        # Send Event that is detected
        # publish an event for each notification to generate the emails

        # since the voltage field in this event is greater than 5, it WILL be detected
        rle_publisher = EventPublisher("ExampleDetectableEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage = 10)

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        msg_tuple = proc1.event_processors[notification_id_2].smtp_client.sentmail.get(timeout=4)

        # The first event never triggered an email because the voltage was less than 5, the queue is now empty
        self.assertTrue(proc1.event_processors[notification_id_2].smtp_client.sentmail.empty())

        self.assertEquals(msg_tuple[1], '*****@*****.**' )
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)

        # parse the message body
        message = msg_tuple[2]
        list_lines = message.split("\n")

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #self.assertEquals(message_dict['From'], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['To'], '*****@*****.**')
        self.assertEquals(message_dict['Event'].rstrip('\r'), 'DetectionEvent')
        self.assertEquals(message_dict['Originator'].rstrip('\r'), 'DetectionEventProcessor')
        self.assertEquals(message_dict['Description'].rstrip('\r'), 'Event was detected by DetectionEventProcessor')

    @unittest.skip('interface has changed!')
    def test_find_event_types_for_resource(self):
        # create a dataset object in the RR to pass into the UNS method
        dataset_object = IonObject(RT.DataSet, name="dataset1")
        dataset_id, version = self.rrc.create(dataset_object)
        
        # get the list of event types for the dataset
        events = self.unsc.find_event_types_for_resource(dataset_id)
        log.debug("dataset events = " + str(events))
        if not events == ['dataset_supplement_added', 'dataset_change']:
            self.fail("failed to return correct list of event types")
            
        # try to pass in an id of a resource that doesn't exist (should fail)
        try:
            events = self.unsc.find_event_types_for_resource("bogus_id")
            self.fail("failed to detect non-existant resource")
        except:
            pass

    @unittest.skip('interface has changed!')
    def test_create_two_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        
        # create first notification
        notification_object1 = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification_id1 = self.unsc.create_notification(notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        notification_id2 = self.unsc.create_notification(notification_object2, user_id)
        
        # read the notifications back and check that they are correct
        n1 = self.unsc.read_notification(notification_id1)
        if n1.name != notification_object1.name or \
           n1.origin_list != notification_object1.origin_list or \
           n1.events_list != notification_object1.events_list:
            self.fail("notification was not correct")
        n2 = self.unsc.read_notification(notification_id2)
        if n2.name != notification_object2.name or \
           n2.origin_list != notification_object2.origin_list or \
           n2.events_list != notification_object2.events_list:
            self.fail("notification was not correct")

    @unittest.skip('interface has changed!')
    def test_delete_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        
        # create first notification
        notification_object1 = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification1_id = self.unsc.create_notification(notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        notification2_id = self.unsc.create_notification(notification_object2, user_id)
        
        # delete both notifications
        self.unsc.delete_notification(notification1_id)
        self.unsc.delete_notification(notification2_id)
        
        # check that the notifications are not there
        try:
            n1 = self.unsc.read_notification(notification1_id)
        except:
            try:
                n2 = self.unsc.read_notification(notification2_id)
            except:
                return
        self.fail("failed to delete notifications")

    @unittest.skip('interface has changed!')
    def test_find_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})

        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        self.unsc.create_notification(notification_object, user_id)
        
        # try to find all notifications for user
        notifications = self.unsc.find_notifications_by_user(user_id)
        if len(notifications) != 2:
            self.fail("failed to find all notifications")

    @unittest.skip('interface has changed!')
    def test_update_user_notification(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create a notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification_id = self.unsc.create_notification(notification_object, user_id)
        
        # read back the notification and change it
        notification = self.unsc.read_notification(notification_id)
        notification.origin_list = ['Some_Resource_Agent_ID5']
        self.unsc.update_notification(notification)
        
        # read back the notification and check that it got changed
        notification = self.unsc.read_notification(notification_id)
        if notification.origin_list != ['Some_Resource_Agent_ID5']:
            self.fail("failed to change notification")

    @unittest.skip('interface has changed!')
    def test_send_notification_emails(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        self.unsc.create_notification(notification_object, user_id)
        
        # publish an event for each notification to generate the emails
        # this can't be easily check in SW so need to check for these at the [email protected] account
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event")
        de_publisher = EventPublisher("DataEvent")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event")
        gevent.sleep(1)

    @unittest.skip('interface has changed!')
    def test_find_events(self):
        # publish some events for the event repository
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        de_publisher = EventPublisher("DataEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event1")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event2")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event3")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event1")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event2")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event3")
        
        # find all events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1')
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")
                  
        # find all events for the originator 'DataEvent'
        events = self.unsc.find_events(type='DataEvent')
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'DataEvent':
                self.fail("failed to find correct events") 
                 
        # find 2 events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID2', limit=2)
        if len(events) != 2:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID2':
                self.fail("failed to find correct events")
            
        # find all events for the originator 'Some_Resource_Agent_ID1' in reverse time order
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1', descending=True)
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")
Exemple #19
0
class RealTimeNotificationTestCase(IonIntegrationTestCase):
    def setUp(self):

        self._start_container()

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe(
            "service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.user_notification = UserNotificationServiceClient()
        self.event_publisher = EventPublisher()

        # create UserInfo object (user)
        user = UserInfo()
        user.name = 'Iceman'
        user.contact.email = '*****@*****.**'
        user_id, _ = self.resource_registry.create(user)
        self.user = self.resource_registry.read(user_id)

        # create NotificationRequest objects (notifications)
        # 4 notifications are created:
        #     REAL_TIME, EMAIL(user default via UserInfo)
        #     REAL_TIME, EMAIL(in DeliveryConfiguration)
        #     DISABLED, EMAIL(in DeliveryConfiguration)
        #     REAL_TIME, SMS(in DeliveryConfiguration)

        # REAL_TIME, EMAIL(user default via UserInfo)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='REAL_TIME to default UserInfo email',
            type=NotificationTypeEnum.SIMPLE,
            origin='Miramar',
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        # store this notification_id to check disabled_by_system status later
        self.notification_id = self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # REAL_TIME, EMAIL(in DeliveryConfiguration), 10 notifications/day max
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='REAL_TIME to alternate email, 10 notifications/day max',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # DISABLED, EMAIL(in DeliveryConfiguration)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.DISABLED)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='DISABLED to alternate email',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # REAL_TIME, SMS(in DeliveryConfiguration)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.SMS,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='SMS to alternate email',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

    def test_realtime_notifications(self):

        # monkey patch smtplib.SMTP to capture sent emails
        original_SMTP = smtplib.SMTP  # store original for restoration

        class MonkeyPatchSMTP(object):
            def __init__(self, address, host):
                self.address = address
                self.host = host

            def login(self, username, password):
                self.username = username
                self.password = password

            def sendmail(self, from_addr, to_addrs, msg):
                global outbox
                outbox.append((from_addr, to_addrs, msg, time.time()))
                return []

            def quit(self):
                pass

        smtplib.SMTP = MonkeyPatchSMTP

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe(
            "service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        # publish event(s) - one should trigger notifications, the other not
        self.event_publisher.publish_event(
            origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        self.event_publisher.publish_event(
            origin='Hong Kong', event_type=OT.ResourceLifecycleEvent)
        time.sleep(
            1
        )  # wait some non-trivial time for events to be processed by NotificationWorker

        # outbox now contains tuple(from_addr, to_addrs, msg,time.time()) for each message sent

        # verifies the alternate email requirement (eg. email specified in DeliveryConfiguration)
        self.assertEqual(
            len([t[1] for t in outbox if t[1] == '*****@*****.**']),
            1)  # 1 message to Iceman
        self.assertEqual(
            len([t[1] for t in outbox if t[1] == '*****@*****.**']),
            1)  # 1 message to Slider
        self.assertEqual(
            len([t[1] for t in outbox if t[1] == '*****@*****.**']),
            0)  # no messages to Charlie (CIV)

        # check SMS sent
        self.assertEqual(
            len([
                t[1] for t in outbox if t[1] == '*****@*****.**'
            ]), 1)
        # check SMS <= 140 characters
        self.assertLessEqual(
            len([(t[2]) for t in outbox
                 if t[1] == '*****@*****.**'][0]), 140)

        # publish 9 more events (already have 1), notification should be disabled at number 10
        for x in xrange(9):
            self.event_publisher.publish_event(
                origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        time.sleep(
            2
        )  # give system time to update NotificationRequest (1 sec saw occasional fail)
        # publish 1 more event, should NOT trigger any additional notifications
        self.event_publisher.publish_event(
            origin='Miramar', event_type=OT.ResourceLifecycleEvent)
        time.sleep(
            1
        )  # wait some non-trivial time for events to be processed by NotificationWorker

        # check there are 10 for Iceman, not 11 even though 11 have been published
        # verifies the max_daily notification limit
        self.assertEquals(
            len([t[1] for t in outbox if t[1] == '*****@*****.**']),
            10)  # 10 to Iceman (reached limit)

        # check NotificationRequest has been disabled_by_system
        notification = self.resource_registry.read(self.notification_id)
        self.assertTrue(notification.disabled_by_system)

        # MONKEY PATCH time.time() fast forward to trigger NotificationSentScanner to persist counts
        CONSTANT_TIME = time.time(
        ) + 600  # forward 10 minutes (from now FREEZES time.time())

        def new_time():
            return CONSTANT_TIME

        old_time = time.time
        time.time = new_time

        # sleep 5s longer than interval taken from ion/processes/event/event_persister.py
        time.sleep(
            float(CFG.get_safe("process.event_persister.persist_interval",
                               1.0)) + 5)

        # get notification_counts from ObjectStore
        notification_counts_obj = self.object_store.read('notification_counts')
        # persisted as standard dicts, convert to Counter objects
        notification_counts = {
            k: Counter(v)
            for k, v in notification_counts_obj.items()
            if not (k == '_id' or k == '_rev')
        }
        self.assertEqual(int(
            notification_counts.get(self.user._id).get('all')),
                         30)  # 3 DeliveryConfigurations * 10 notifications

        # restore MONKEY PATCHed time (so we can go ahead from actual wall time)
        time.time = old_time

        # MONKEY PATCH time.time() fast forward to trigger NotificationSentScanner to flush/reset counts
        CONSTANT_TIME = time.time(
        ) + 86400  # forward 1 day (from now FREEZES time.time())

        def new_time():
            return CONSTANT_TIME

        old_time = time.time
        time.time = new_time

        # sleep 5s longer than interval taken from ion/processes/event/event_persister.py
        time.sleep(
            float(CFG.get_safe("process.event_persister.persist_interval",
                               1.0)) + 5)

        # get notification_counts from ObjectStore
        notification_counts_obj = self.object_store.read('notification_counts')
        # persisted as standard dicts, convert to Counter objects
        notification_counts = {
            k: Counter(v)
            for k, v in notification_counts_obj.items()
            if not (k == '_id' or k == '_rev')
        }
        self.assertEqual(notification_counts.get(self.user._id, None), None)

        # restore MONKEY PATCHed time
        time.time = old_time

        # restore original smtplib.SMTP and CFG value
        smtplib.SMTP = original_SMTP
        CFG['service']['user_notification'][
            'max_daily_notifications'] = original_CFG_max
Exemple #20
0
    def setUp(self):

        self._start_container()

        # patch the CFG service.user_notification.max_daily_notifications value so we only test 10
        original_CFG_max = CFG.get_safe(
            "service.user_notification.max_daily_notifications", 1000)
        CFG['service']['user_notification']['max_daily_notifications'] = 10

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.object_store = self.container.object_store
        self.resource_registry = self.container.resource_registry
        self.user_notification = UserNotificationServiceClient()
        self.event_publisher = EventPublisher()

        # create UserInfo object (user)
        user = UserInfo()
        user.name = 'Iceman'
        user.contact.email = '*****@*****.**'
        user_id, _ = self.resource_registry.create(user)
        self.user = self.resource_registry.read(user_id)

        # create NotificationRequest objects (notifications)
        # 4 notifications are created:
        #     REAL_TIME, EMAIL(user default via UserInfo)
        #     REAL_TIME, EMAIL(in DeliveryConfiguration)
        #     DISABLED, EMAIL(in DeliveryConfiguration)
        #     REAL_TIME, SMS(in DeliveryConfiguration)

        # REAL_TIME, EMAIL(user default via UserInfo)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='REAL_TIME to default UserInfo email',
            type=NotificationTypeEnum.SIMPLE,
            origin='Miramar',
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        # store this notification_id to check disabled_by_system status later
        self.notification_id = self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # REAL_TIME, EMAIL(in DeliveryConfiguration), 10 notifications/day max
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='REAL_TIME to alternate email, 10 notifications/day max',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # DISABLED, EMAIL(in DeliveryConfiguration)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.EMAIL,
            frequency=NotificationFrequencyEnum.DISABLED)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='DISABLED to alternate email',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)

        # REAL_TIME, SMS(in DeliveryConfiguration)
        delivery_configuration = IonObject(
            OT.DeliveryConfiguration,
            email='*****@*****.**',
            mode=DeliveryModeEnum.SMS,
            frequency=NotificationFrequencyEnum.REAL_TIME)
        notification_request = IonObject(
            OT.NotificationRequest,
            name='SMS to alternate email',
            type=NotificationTypeEnum.SIMPLE,
            origin="Miramar",
            event_type=OT.ResourceLifecycleEvent,
            delivery_configurations=[delivery_configuration])
        self.user_notification.create_notification(
            notification=notification_request, user_id=self.user._id)
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id


        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    @unittest.skip('OBE')
    def test_get_last_update(self):



        # Construct temporal and spatial Coordinate Reference System objects
        tcrs = CRS([AxisTypeEnum.TIME])
        scrs = CRS([AxisTypeEnum.LON, AxisTypeEnum.LAT])

        # Construct temporal and spatial Domain objects
        tdom = GridDomain(GridShape('temporal', [0]), tcrs, MutabilityEnum.EXTENSIBLE) # 1d (timeline)
        sdom = GridDomain(GridShape('spatial', [0]), scrs, MutabilityEnum.IMMUTABLE) # 1d spatial topology (station/trajectory)

        sdom = sdom.dump()
        tdom = tdom.dump()

        #@TODO: DO NOT DO THIS, WHEN THIS TEST IS REWRITTEN GET RID OF THIS, IT WILL FAIL, thanks -Luke
        parameter_dictionary = get_param_dict('ctd_parsed_param_dict')
        parameter_dictionary = parameter_dictionary.dump()

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id = self.dpsc_cli.create_data_product(data_product=dp_obj, stream_definition_id=self.stream_def_id, parameter_dictionary=parameter_dictionary)
        stream_ids, garbage = self.rrclient.find_objects(data_product_id, PRED.hasStream, id_only=True)
        stream_id = stream_ids[0]

        fake_lu = LastUpdate()
        fake_lu_doc = self.db._ion_object_to_persistence_dict(fake_lu)
        self.db.create_doc(fake_lu_doc, object_id=stream_id)

        #------------------------------------------
        # Now execute
        #------------------------------------------
        res = self.dpsc_cli.get_last_update(data_product_id=data_product_id)
        self.assertTrue(isinstance(res[stream_id], LastUpdate), 'retrieving documents failed')

    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('test_createDataProduct: Creating new data product w/o a stream definition (L4-CI-SA-RQ-308)')

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        log.debug('new dp_id = %s' % dp_id)
        log.debug("test_createDataProduct: Data product info from registry %s (L4-CI-SA-RQ-308)", str(dp_obj))


        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)

        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)

        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        try:
            dp_obj = self.dpsc_cli.read_data_product(dp_id)
        except NotFound as ex:
            pass
        else:
            self.fail("force deleted data product was found during read")

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)


        # now try to get the deleted dp object

        #todo: the RR should perhaps not return retired data products
    #            dp_obj = self.dpsc_cli.read_data_product(dp_id)

    # now try to delete the already deleted dp object
    #        log.debug( "deleting non-existing data product")
    #            self.dpsc_cli.delete_data_product(dp_id)

    # Shut down container
    #container.stop()


    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('test_createDataProduct: Creating new data product w/o a stream definition (L4-CI-SA-RQ-308)')

        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        log.debug('new dp_id = %s' % dp_id)
        log.debug("test_createDataProduct: Data product info from registry %s (L4-CI-SA-RQ-308)", str(dp_obj))

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        self.get_datastore(dataset_ids[0])

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

#        pid = self.container.spawn_process(name='dummy_process_for_test',
#            module='pyon.ion.process',
#            cls='SimpleProcess',
#            config={})
#        dummy_process = self.container.proc_manager.procs[pid]

        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object
        try:
            dp_obj = self.rrclient.read(dp_id)
        except NotFound as ex:
            pass
        else:
            self.fail("force_deleted data product was found during read")
class TestActivateInstrumentIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.datasetclient =  DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(
            host=es_host,
            port=es_port,
            timeout=10
        )
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete,index)
            IndexManagementService._es_call(es.index_delete,index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id=logger_procdef_id,
                                                            configuration=configuration)

        return pid

    def _create_notification(self, user_name = '', instrument_id='', product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(   name= 'notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(   name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(notification=notification_request_2, user_id=user_id)
        log.debug( "test_activateInstrumentSample: create_user_notifications user_id %s", str(user_id) )

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(self, expected_instrument_device_id = '',extended_instrument = None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.firmware_version, ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.last_data_received_datetime, ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.last_calibration_datetime, ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime, ComputedStringValue)

        self.assertIsInstance(extended_instrument.computed.power_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.communications_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.location_status_roll_up, ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        self.assertEqual(extended_instrument.computed.communications_status_roll_up.value, StatusType.STATUS_WARNING)
        self.assertEqual(extended_instrument.computed.data_status_roll_up.value, StatusType.STATUS_OK)
        self.assertEqual(extended_instrument.computed.power_status_roll_up.value, StatusType.STATUS_WARNING)

        # Verify the computed attribute for user notification requests
        self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(notification.origin, expected_instrument_device_id)
        self.assertEqual(notification.origin_type, "instrument")
        self.assertEqual(notification.event_type, 'ResourceLifecycleEvent')


    def _check_computed_attributes_of_extended_product(self, expected_data_product_id = '', extended_data_product = None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s", extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_data_product.computed.product_download_size_estimated, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.number_active_subscriptions, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events, ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.user_notification_requests, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.active_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.past_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_contents_updated, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime, ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertTrue( 'ok' in extended_data_product.computed.last_granule.value['quality_flag'] )
        self.assertEqual( 2, len(extended_data_product.computed.data_datetime.value) )

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(notification.origin, expected_data_product_id)
        self.assertEqual(notification.origin_type, "data product")
        self.assertEqual(notification.event_type, 'DetectionEvent')


    @attr('LOCOINT')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint':{'receive':{'timeout': 60}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)



        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }


        kwargs = {
            'name' : 'temperature_warning_interval',
            'stream_name' : 'parsed',
            'value_id' : 'temp',
            'message' : 'Temperature is below the normal range of 50.0 and above.',
            'type' : StreamAlarmType.WARNING,
            'lower_bound' : 50.0,
            'lower_rel_op' : '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(stream_name='raw', parameter_dictionary_name='ctd_raw_param_dict', records_per_granule=2, granule_publish_rate=5 )
        parsed_config = StreamConfiguration(stream_name='parsed', parameter_dictionary_name='ctd_parsed_param_dict', records_per_granule=2, granule_publish_rate=5, alarms=[alarm] )


        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent,
                                  name='agent007',
                                  description="SBE37IMAgent",
                                  driver_uri="http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
                                  stream_configurations = [raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345" )
        instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)

        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) " , instDevice_id)


        port_agent_config = {
            'device_addr':  CFG.device.sbe37.host,
            'device_port':  CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config = port_agent_config)


        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj,
                                                                               instAgent_id,
                                                                               instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()


        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(name='raw', parameter_dictionary_id=raw_pdict_id)


        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug( 'new dp_id = %s' , data_product_id1)
        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)



        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s' , dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]


        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)


        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id2 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification( user_name='user_1', instrument_id=instDevice_id, product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification( user_name='user_2', instrument_id=instDevice_id, product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s' , str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s' , dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)


        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])


        #setup a subscriber to alarm events from the device
        self._events_received= []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug('TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type= 'StreamWarningAlarmEvent',   #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()


        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)


        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(30), "The instrument agent instance (%s) did not spawn in 30 seconds" %
                                        inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=inst_agent_instance_obj.agent_process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s" , str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s" , str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s" , str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s" , str(retval))

        log.debug( "test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s" , str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s", str(rdt.pretty_print()) )
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals) , 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s", temp_vals )

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s", out_of_range_temp_vals )
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()


        log.debug("test_activateInstrumentSample: _events_received: %s", self._events_received )
        log.debug("test_activateInstrumentSample: _event_count: %s", self._event_count )

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()) )

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals) , 10)


        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """


        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id1, user_id=user_id_1)
        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product( expected_data_product_id = data_product_id1, extended_data_product = extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(  ts_created= t,  event_type = 'DeviceStatusEvent',
            origin = instDevice_id, state=DeviceStatusType.OUT_OF_RANGE, values = [200] )
        self.event_publisher.publish_event( ts_created= t,   event_type = 'DeviceCommsEvent',
            origin = instDevice_id, state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION, lapse_interval_seconds = 20 )

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(expected_data_product_id = data_product_id2, extended_data_product = extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(  ts_created= t,  event_type = 'DeviceStatusEvent',
            origin = instDevice_id, state=DeviceStatusType.OUT_OF_RANGE, values = [200] )
        self.event_publisher.publish_event( ts_created= t,   event_type = 'DeviceCommsEvent',
            origin = instDevice_id, state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION, lapse_interval_seconds = 20 )

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
Exemple #23
0
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete, index)
            IndexManagementService._es_call(es.index_delete, index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_notification(self,
                             user_name='',
                             instrument_id='',
                             product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(
            name='notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(
            name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(
            notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(
            notification=notification_request_2, user_id=user_id)
        log.debug(
            "test_activateInstrumentSample: create_user_notifications user_id %s",
            str(user_id))

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(
            self, expected_instrument_device_id='', extended_instrument=None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_instrument.computed.last_data_received_datetime,
            ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        self.assertIsInstance(
            extended_instrument.computed.power_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.communications_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.location_status_roll_up,
            ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )

        # Verify the computed attribute for user notification requests
        self.assertEqual(
            1,
            len(extended_instrument.computed.user_notification_requests.value))
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(expected_instrument_device_id, notification.origin)
        self.assertEqual("instrument", notification.origin_type)
        self.assertEqual('ResourceLifecycleEvent', notification.event_type)

    def _check_computed_attributes_of_extended_product(
            self, expected_data_product_id='', extended_data_product=None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s",
                  extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_data_product.computed.product_download_size_estimated,
            ComputedFloatValue)
        self.assertIsInstance(
            extended_data_product.computed.number_active_subscriptions,
            ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url,
                              ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size,
                              ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters,
                              ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events,
                              ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance,
                              ComputedDictValue)
        self.assertIsInstance(
            extended_data_product.computed.user_notification_requests,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.active_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.past_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.data_contents_updated,
            ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime,
                              ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertIn(
            'ok',
            extended_data_product.computed.last_granule.value['quality_flag'])
        self.assertEqual(
            2, len(extended_data_product.computed.data_datetime.value))

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(expected_data_product_id, notification.origin)
        self.assertEqual("data product", notification.origin_type)
        self.assertEqual('DetectionEvent', notification.event_type)

    @attr('LOCOINT')
    #@unittest.skip('refactoring')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 90}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        raw_config = StreamConfiguration(stream_name='raw',
                                         parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict')

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=DRV_URI_GOOD,
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)
        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config,
                                          alerts=[])

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id, ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()))

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" %
                      t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(
            self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt_parsed.pretty_print()))
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli           = DataProductManagementServiceClient()
        self.rrclient           = ResourceRegistryServiceClient()
        self.damsclient         = DataAcquisitionManagementServiceClient()
        self.pubsubcli          = PubsubManagementServiceClient()
        self.ingestclient       = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc               = UserNotificationServiceClient()
        self.data_retriever     = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    @attr('EXT')
    @attr('PREP')
    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary._id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 10.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -10.0
        dp_obj.ooi_product_name = "PRODNAME"

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        # Assert that the data product has an associated stream at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertNotEquals(len(stream_ids), 0)

        # Assert that the data product has an associated stream def at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStreamDefinition, RT.StreamDefinition, True)
        self.assertNotEquals(len(stream_ids), 0)

        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)

        group_names = self.dpsc_cli.get_data_product_group_list()
        self.assertIn("PRODNAME", group_names)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 20.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -20.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)


        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)


        def ion_object_encoder(obj):
            return obj.__dict__


        #test prepare for create
        data_product_data = self.dpsc_cli.prepare_data_product_support()

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, "")
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)
        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 0)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 0)
        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 0)

        #test prepare for update
        data_product_data = self.dpsc_cli.prepare_data_product_support(dp_id)

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, dp_id)
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)

        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 1)

        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['StreamDefinition'].associated_resources[0].s, dp_id)

        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['Dataset'].associated_resources[0].s, dp_id)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)

        # Assert that there are no associated streams leftover after deleting the data product
        stream_ids, assoc_ids = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertEquals(len(stream_ids), 0)
        self.assertEquals(len(assoc_ids), 0)

        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)


    def test_derived_data_product(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='ctd parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition, ctd_stream_def_id)

        tdom, sdom = time_series_domain()

        dp = DataProduct(name='Instrument DP', temporal_domain=tdom.dump(), spatial_domain=sdom.dump())
        dp_id = self.dpsc_cli.create_data_product(dp, stream_definition_id=ctd_stream_def_id)
        self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)


        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]
        
        # Make the derived data product
        simple_stream_def_id = self.pubsubcli.create_stream_definition(name='TEMPWAT stream def', parameter_dictionary_id=pdict_id, available_fields=['time','temp'])
        tempwat_dp = DataProduct(name='TEMPWAT')
        tempwat_dp_id = self.dpsc_cli.create_data_product(tempwat_dp, stream_definition_id=simple_stream_def_id, parent_data_product_id=dp_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)
        rdt['pressure'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id, PRED.hasDataset, id_only=True)
        tempwat_dataset_id = tempwat_dataset_ids[0]
        granule = self.data_retriever.retrieve(tempwat_dataset_id, delivery_format=simple_stream_def_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['time'], np.arange(20))
        self.assertEquals(set(rdt.fields), set(['time','temp']))


    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)


        dataset_modified.clear()

        rdt['time'] = np.arange(20,40)

        publisher.publish(rdt.to_granule())
        self.assertFalse(dataset_modified.wait(2))

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        dataset_modified.clear()

        publisher.publish(rdt.to_granule())
        self.assertTrue(dataset_modified.wait(30))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_almost_equal(rdt['time'], np.arange(40))


        dataset_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasDataset, id_only=True)
        self.assertEquals(len(dataset_ids), 1)

        self.dpsc_cli.suspend_data_product_persistence(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)

    def test_lookup_values(self):
        ph = ParameterHelper(self.dataset_management, self.addCleanup)
        pdict_id = ph.create_lookups()
        stream_def_id = self.pubsubcli.create_stream_definition('lookup', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition, stream_def_id)

        data_product = DataProduct(name='lookup data product')
        tdom, sdom = time_series_domain()
        data_product.temporal_domain = tdom.dump()
        data_product.spatial_domain = sdom.dump()

        data_product_id = self.dpsc_cli.create_data_product(data_product, stream_definition_id=stream_def_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, data_product_id)
        data_producer = DataProducer(name='producer')
        data_producer.producer_context = DataProcessProducerContext()
        data_producer.producer_context.configuration['qc_keys'] = ['offset_document']
        data_producer_id, _ = self.rrclient.create(data_producer)
        self.addCleanup(self.rrclient.delete, data_producer_id)
        assoc,_ = self.rrclient.create_association(subject=data_product_id, object=data_producer_id, predicate=PRED.hasDataProducer)
        self.addCleanup(self.rrclient.delete_association, assoc)

        document_keys = self.damsclient.list_qc_references(data_product_id)
            
        self.assertEquals(document_keys, ['offset_document'])
        svm = StoredValueManager(self.container)
        svm.stored_value_cas('offset_document', {'offset_a':2.0})
        self.dpsc_cli.activate_data_product_persistence(data_product_id)
        dataset_ids, _ = self.rrclient.find_objects(subject=data_product_id, predicate=PRED.hasDataset, id_only=True)
        dataset_id = dataset_ids[0]

        dataset_monitor = DatasetMonitor(dataset_id)
        self.addCleanup(dataset_monitor.stop)

        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        rdt['time'] = [0]
        rdt['temp'] = [20.]
        granule = rdt.to_granule()

        stream_ids, _ = self.rrclient.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        publisher = StandaloneStreamPublisher(stream_id, route)
        publisher.publish(granule)

        self.assertTrue(dataset_monitor.event.wait(10))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt2 = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['temp'], rdt2['temp'])
        np.testing.assert_array_almost_equal(rdt2['calibrated'], np.array([22.0]))


        svm.stored_value_cas('updated_document', {'offset_a':3.0})
        dataset_monitor = DatasetMonitor(dataset_id)
        self.addCleanup(dataset_monitor.stop)
        ep = EventPublisher(event_type=OT.ExternalReferencesUpdatedEvent)
        ep.publish_event(origin=data_product_id, reference_keys=['updated_document'])

        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        rdt['time'] = [1]
        rdt['temp'] = [20.]
        granule = rdt.to_granule()
        gevent.sleep(2) # Yield so that the event goes through
        publisher.publish(granule)
        self.assertTrue(dataset_monitor.event.wait(10))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt2 = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt2['temp'],np.array([20.,20.]))
        np.testing.assert_array_almost_equal(rdt2['calibrated'], np.array([22.0,23.0]))
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 200.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = 100.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 50.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = 100.0

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 150.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 300.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = 200.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 150.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = 200.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)


        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 250.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)



    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        self.get_datastore(dataset_ids[0])


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)
Exemple #26
0
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete, index)
            IndexManagementService._es_call(es.index_delete, index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_notification(self,
                             user_name='',
                             instrument_id='',
                             product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(
            name='notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(
            name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(
            notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(
            notification=notification_request_2, user_id=user_id)
        log.debug(
            "test_activateInstrumentSample: create_user_notifications user_id %s",
            str(user_id))

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(
            self, expected_instrument_device_id='', extended_instrument=None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.firmware_version,
                              ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_data_received_datetime,
            ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_calibration_datetime,
            ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        self.assertIsInstance(
            extended_instrument.computed.power_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.communications_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.location_status_roll_up,
            ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        self.assertEqual(
            extended_instrument.computed.communications_status_roll_up.value,
            StatusType.STATUS_WARNING)
        self.assertEqual(
            extended_instrument.computed.data_status_roll_up.value,
            StatusType.STATUS_OK)
        self.assertEqual(
            extended_instrument.computed.power_status_roll_up.value,
            StatusType.STATUS_WARNING)

        # Verify the computed attribute for user notification requests
        self.assertEqual(
            1,
            len(extended_instrument.computed.user_notification_requests.value))
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(notification.origin, expected_instrument_device_id)
        self.assertEqual(notification.origin_type, "instrument")
        self.assertEqual(notification.event_type, 'ResourceLifecycleEvent')

    def _check_computed_attributes_of_extended_product(
            self, expected_data_product_id='', extended_data_product=None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s",
                  extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_data_product.computed.product_download_size_estimated,
            ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.number_active_subscriptions,
            ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url,
                              ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size,
                              ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters,
                              ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events,
                              ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance,
                              ComputedDictValue)
        self.assertIsInstance(
            extended_data_product.computed.user_notification_requests,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.active_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.past_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.data_contents_updated,
            ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime,
                              ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertTrue('ok' in extended_data_product.computed.last_granule.
                        value['quality_flag'])
        self.assertEqual(
            2, len(extended_data_product.computed.data_datetime.value))

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(notification.origin, expected_data_product_id)
        self.assertEqual(notification.origin_type, "data product")
        self.assertEqual(notification.event_type, 'DetectionEvent')

    @attr('LOCOINT')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 60}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }

        kwargs = {
            'name': 'temperature_warning_interval',
            'stream_name': 'parsed',
            'value_id': 'temp',
            'message':
            'Temperature is below the normal range of 50.0 and above.',
            'type': StreamAlarmType.WARNING,
            'lower_bound': 50.0,
            'lower_rel_op': '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(
            stream_name='raw',
            parameter_dictionary_name='ctd_raw_param_dict',
            records_per_granule=2,
            granule_publish_rate=5)
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict',
            records_per_granule=2,
            granule_publish_rate=5,
            alarms=[alarm])

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=
            "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config)

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #setup a subscriber to alarm events from the device
        self._events_received = []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug(
                'TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type=
            'StreamWarningAlarmEvent',  #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s',
                  str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(
            instDevice_id,
            to_name=inst_agent_instance_obj.agent_process_id,
            process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt.pretty_print()))
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals), 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s",
                  temp_vals)

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s",
                  out_of_range_temp_vals)
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()

        log.debug("test_activateInstrumentSample: _events_received: %s",
                  self._events_received)
        log.debug("test_activateInstrumentSample: _event_count: %s",
                  self._event_count)

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()))

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals), 10)

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
Exemple #27
0
class UserNotificationIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    def test_email(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name='new_user')
        user_id, _ = self.rrc.create(user)

        # set up....
        notification_id = self.unsc.create_email(
            event_type='ResourceLifecycleEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            email='*****@*****.**',
            mode=DeliveryMode.DIGEST,
            message_header='message_header',
            parser='parser',
            period=1)

        #------------------------------------------------------------------------------------------------------
        # Setup so as to be able to get the message and headers going into the
        # subscription callback method of the EmailEventProcessor
        #------------------------------------------------------------------------------------------------------

        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event")

        msg_tuple = proc1.event_processors[
            notification_id].smtp_client.sentmail.get(timeout=4)

        self.assertTrue(proc1.event_processors[notification_id].smtp_client.
                        sentmail.empty())

        message = msg_tuple[2]
        list_lines = message.split("\n")

        #-------------------------------------------------------
        # parse the message body
        #-------------------------------------------------------

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        self.assertEquals(msg_tuple[1], '*****@*****.**')
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)

        #self.assertEquals(message_dict['From'], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['To'], '*****@*****.**')
        self.assertEquals(message_dict['Event'].rstrip('\r'),
                          'ResourceLifecycleEvent')
        self.assertEquals(message_dict['Originator'].rstrip('\r'),
                          'Some_Resource_Agent_ID1')
        self.assertEquals(message_dict['Description'].rstrip('\r'),
                          'RLE test event')

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    def test_sms(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name='new_user')
        user_id, _ = self.rrc.create(user)

        # set up....
        notification_id = self.unsc.create_sms(
            event_type='ResourceLifecycleEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            phone='401-XXX-XXXX',
            provider='T-Mobile',
            message_header='message_header',
            parser='parser',
        )

        #------------------------------------------------------------------------------------------------------
        # Setup so as to be able to get the message and headers going into the
        # subscription callback method of the EmailEventProcessor
        #------------------------------------------------------------------------------------------------------

        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event")

        msg_tuple = proc1.event_processors[
            notification_id].smtp_client.sentmail.get(timeout=4)

        self.assertTrue(proc1.event_processors[notification_id].smtp_client.
                        sentmail.empty())

        message = msg_tuple[2]
        list_lines = message.split("\n")

        #-------------------------------------------------------
        # parse the message body
        #-------------------------------------------------------

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        self.assertEquals(msg_tuple[1], '*****@*****.**')
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['Description'].rstrip('\r'),
                          'RLE test event')

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    def test_event_detection(self):

        proc1 = self.container.proc_manager.procs_by_name['user_notification']

        # Create a user and get the user_id
        user = UserInfo(name='new_user')
        user_id, _ = self.rrc.create(user)

        # Create detection notification
        dfilt = DetectionFilterConfig()

        dfilt.processing['condition'] = 5
        dfilt.processing['comparator'] = '>'
        dfilt.processing['filter_field'] = 'voltage'

        dfilt.delivery['message'] = 'I got my detection event!'

        notification_id = self.unsc.create_detection_filter(
            event_type='ExampleDetectableEvent',
            event_subtype=None,
            origin='Some_Resource_Agent_ID1',
            origin_type=None,
            user_id=user_id,
            filter_config=dfilt)

        #---------------------------------------------------------------------------------
        # Create event subscription for resulting detection event
        #---------------------------------------------------------------------------------

        # Create an email notification so that when the DetectionEventProcessor
        # detects an event and fires its own output event, this will caught by an
        # EmailEventProcessor and an email will be sent to the user

        notification_id_2 = self.unsc.create_email(
            event_type='DetectionEvent',
            event_subtype=None,
            origin='DetectionEventProcessor',
            origin_type=None,
            user_id=user_id,
            email='*****@*****.**',
            mode=DeliveryMode.UNFILTERED,
            message_header='Detection event',
            parser='parser',
            period=1)

        # Send event that is not detected
        # publish an event for each notification to generate the emails
        rle_publisher = EventPublisher("ExampleDetectableEvent")

        # since the voltage field in this event is less than 5, it will not be detected
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage=3)

        # Check at the end of the test to make sure this event never triggered a Detectable!

        # Send Event that is detected
        # publish an event for each notification to generate the emails

        # since the voltage field in this event is greater than 5, it WILL be detected
        rle_publisher = EventPublisher("ExampleDetectableEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event",
                                    voltage=10)

        #-------------------------------------------------------
        # make assertions
        #-------------------------------------------------------

        msg_tuple = proc1.event_processors[
            notification_id_2].smtp_client.sentmail.get(timeout=4)

        # The first event never triggered an email because the voltage was less than 5, the queue is now empty
        self.assertTrue(proc1.event_processors[notification_id_2].smtp_client.
                        sentmail.empty())

        self.assertEquals(msg_tuple[1], '*****@*****.**')
        #self.assertEquals(msg_tuple[0], ION_NOTIFICATION_EMAIL_ADDRESS)

        # parse the message body
        message = msg_tuple[2]
        list_lines = message.split("\n")

        message_dict = {}
        for line in list_lines:
            key_item = line.split(": ")
            if key_item[0] == 'Subject':
                message_dict['Subject'] = key_item[1] + key_item[2]
            else:
                try:
                    message_dict[key_item[0]] = key_item[1]
                except IndexError as exc:
                    # these IndexError exceptions happen only because the message sometimes
                    # has successive /r/n (i.e. new lines) and therefore,
                    # the indexing goes out of range. These new lines
                    # can just be ignored. So we ignore the exceptions here.
                    pass

        #self.assertEquals(message_dict['From'], ION_NOTIFICATION_EMAIL_ADDRESS)
        self.assertEquals(message_dict['To'], '*****@*****.**')
        self.assertEquals(message_dict['Event'].rstrip('\r'), 'DetectionEvent')
        self.assertEquals(message_dict['Originator'].rstrip('\r'),
                          'DetectionEventProcessor')
        self.assertEquals(message_dict['Description'].rstrip('\r'),
                          'Event was detected by DetectionEventProcessor')

    @unittest.skip('interface has changed!')
    def test_find_event_types_for_resource(self):
        # create a dataset object in the RR to pass into the UNS method
        dataset_object = IonObject(RT.DataSet, name="dataset1")
        dataset_id, version = self.rrc.create(dataset_object)

        # get the list of event types for the dataset
        events = self.unsc.find_event_types_for_resource(dataset_id)
        log.debug("dataset events = " + str(events))
        if not events == ['dataset_supplement_added', 'dataset_change']:
            self.fail("failed to return correct list of event types")

        # try to pass in an id of a resource that doesn't exist (should fail)
        try:
            events = self.unsc.find_event_types_for_resource("bogus_id")
            self.fail("failed to detect non-existant resource")
        except:
            pass

    @unittest.skip('interface has changed!')
    def test_create_two_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(
            RT.UserInfo, {
                "name": "user1_info",
                "contact": {
                    "email": '*****@*****.**'
                }
            })
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object1 = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })
        notification_id1 = self.unsc.create_notification(
            notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(
            RT.NotificationRequest, {
                "name": "notification2",
                "origin_list": ['Some_Resource_Agent_ID2'],
                "events_list": ['DataEvent']
            })
        notification_id2 = self.unsc.create_notification(
            notification_object2, user_id)

        # read the notifications back and check that they are correct
        n1 = self.unsc.read_notification(notification_id1)
        if n1.name != notification_object1.name or \
           n1.origin_list != notification_object1.origin_list or \
           n1.events_list != notification_object1.events_list:
            self.fail("notification was not correct")
        n2 = self.unsc.read_notification(notification_id2)
        if n2.name != notification_object2.name or \
           n2.origin_list != notification_object2.origin_list or \
           n2.events_list != notification_object2.events_list:
            self.fail("notification was not correct")

    @unittest.skip('interface has changed!')
    def test_delete_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(
            RT.UserInfo, {
                "name": "user1_info",
                "contact": {
                    "email": '*****@*****.**'
                }
            })
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object1 = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })
        notification1_id = self.unsc.create_notification(
            notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(
            RT.NotificationRequest, {
                "name": "notification2",
                "origin_list": ['Some_Resource_Agent_ID2'],
                "events_list": ['DataEvent']
            })
        notification2_id = self.unsc.create_notification(
            notification_object2, user_id)

        # delete both notifications
        self.unsc.delete_notification(notification1_id)
        self.unsc.delete_notification(notification2_id)

        # check that the notifications are not there
        try:
            n1 = self.unsc.read_notification(notification1_id)
        except:
            try:
                n2 = self.unsc.read_notification(notification2_id)
            except:
                return
        self.fail("failed to delete notifications")

    @unittest.skip('interface has changed!')
    def test_find_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(
            RT.UserInfo, {
                "name": "user1_info",
                "contact": {
                    "email": '*****@*****.**'
                }
            })
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })

        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification2",
                "origin_list": ['Some_Resource_Agent_ID2'],
                "events_list": ['DataEvent']
            })
        self.unsc.create_notification(notification_object, user_id)

        # try to find all notifications for user
        notifications = self.unsc.find_notifications_by_user(user_id)
        if len(notifications) != 2:
            self.fail("failed to find all notifications")

    @unittest.skip('interface has changed!')
    def test_update_user_notification(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(
            RT.UserInfo, {
                "name": "user1_info",
                "contact": {
                    "email": '*****@*****.**'
                }
            })
        self.imc.create_user_info(user_id, user_info_object)

        # create a notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })
        notification_id = self.unsc.create_notification(
            notification_object, user_id)

        # read back the notification and change it
        notification = self.unsc.read_notification(notification_id)
        notification.origin_list = ['Some_Resource_Agent_ID5']
        self.unsc.update_notification(notification)

        # read back the notification and check that it got changed
        notification = self.unsc.read_notification(notification_id)
        if notification.origin_list != ['Some_Resource_Agent_ID5']:
            self.fail("failed to change notification")

    @unittest.skip('interface has changed!')
    def test_send_notification_emails(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {
            "name": "user1_info",
            "contact": {
                "email": '*****@*****.**'
            }
        })
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification1",
                "origin_list": ['Some_Resource_Agent_ID1'],
                "events_list": ['ResourceLifecycleEvent']
            })
        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(
            RT.NotificationRequest, {
                "name": "notification2",
                "origin_list": ['Some_Resource_Agent_ID2'],
                "events_list": ['DataEvent']
            })
        self.unsc.create_notification(notification_object, user_id)

        # publish an event for each notification to generate the emails
        # this can't be easily check in SW so need to check for these at the [email protected] account
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event")
        de_publisher = EventPublisher("DataEvent")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event")
        gevent.sleep(1)

    @unittest.skip('interface has changed!')
    def test_find_events(self):
        # publish some events for the event repository
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        de_publisher = EventPublisher("DataEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event1")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event2")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1',
                                    description="RLE test event3")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event1")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event2")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2',
                                   description="DE test event3")

        # find all events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1')
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")

        # find all events for the originator 'DataEvent'
        events = self.unsc.find_events(type='DataEvent')
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'DataEvent':
                self.fail("failed to find correct events")

        # find 2 events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID2',
                                       limit=2)
        if len(events) != 2:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID2':
                self.fail("failed to find correct events")

        # find all events for the originator 'Some_Resource_Agent_ID1' in reverse time order
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1',
                                       descending=True)
        if len(events) != 3:
            self.fail("failed to find all events")
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")
class UserNotificationIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.unsc = UserNotificationServiceClient(node=self.container.node)
        self.rrc = ResourceRegistryServiceClient(node=self.container.node)
        self.imc = IdentityManagementServiceClient(node=self.container.node)
        
    def test_find_event_types_for_resource(self):
        # create a dataset object in the RR to pass into the UNS method
        dataset_object = IonObject(RT.DataSet, name="dataset1")
        dataset_id, version = self.rrc.create(dataset_object)
        
        # get the list of event types for the dataset
        events = self.unsc.find_event_types_for_resource(dataset_id)
        log.debug("dataset events = " + str(events))
        if not events == ['dataset_supplement_added', 'dataset_change']:
            self.fail("failed to return correct list of event types")
            
        # try to pass in an id of a resource that doesn't exist (should fail)
        try:
            events = self.unsc.find_event_types_for_resource("bogus_id")
            self.fail("failed to detect non-existant resource")
        except:
            pass
        
    def test_create_two_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        
        # create first notification
        notification_object1 = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification_id1 = self.unsc.create_notification(notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        notification_id2 = self.unsc.create_notification(notification_object2, user_id)
        
        # read the notifications back and check that they are correct
        n1 = self.unsc.read_notification(notification_id1)
        if n1.name != notification_object1.name or \
           n1.origin_list != notification_object1.origin_list or \
           n1.events_list != notification_object1.events_list:
            self.fail("notification was not correct")
        n2 = self.unsc.read_notification(notification_id2)
        if n2.name != notification_object2.name or \
           n2.origin_list != notification_object2.origin_list or \
           n2.events_list != notification_object2.events_list:
            self.fail("notification was not correct")

    def test_delete_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)
        
        # create first notification
        notification_object1 = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification1_id = self.unsc.create_notification(notification_object1, user_id)
        # create second notification
        notification_object2 = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        notification2_id = self.unsc.create_notification(notification_object2, user_id)
        
        # delete both notifications
        self.unsc.delete_notification(notification1_id)
        self.unsc.delete_notification(notification2_id)
        
        # check that the notifications are not there
        try:
            n1 = self.unsc.read_notification(notification1_id)
        except:
            try:
                n2 = self.unsc.read_notification(notification2_id)
            except:
                return
        self.fail("failed to delete notifications")      
        
    def test_find_user_notifications(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})

        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        self.unsc.create_notification(notification_object, user_id)
        
        # try to find all notifications for user
        notifications = self.unsc.find_notifications_by_user(user_id)
        if len(notifications) != 2:
            self.fail("failed to find all notifications")  

    def test_update_user_notification(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create a notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        notification_id = self.unsc.create_notification(notification_object, user_id)
        
        # read back the notification and change it
        notification = self.unsc.read_notification(notification_id)
        notification.origin_list = ['Some_Resource_Agent_ID5']
        self.unsc.update_notification(notification)
        
        # read back the notification and check that it got changed
        notification = self.unsc.read_notification(notification_id)
        if notification.origin_list != ['Some_Resource_Agent_ID5']:
            self.fail("failed to change notification")          

    def test_send_notification_emails(self):
        # create user with email address in RR
        user_identty_object = IonObject(RT.ActorIdentity, name="user1")
        user_id = self.imc.create_actor_identity(user_identty_object)
        user_info_object = IonObject(RT.UserInfo, {"name":"user1_info", "contact":{"email":'*****@*****.**'}})
        self.imc.create_user_info(user_id, user_info_object)

        # create first notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification1",
                                                                 "origin_list":['Some_Resource_Agent_ID1'],
                                                                 "events_list":['ResourceLifecycleEvent']})
        self.unsc.create_notification(notification_object, user_id)
        # create second notification
        notification_object = IonObject(RT.NotificationRequest, {"name":"notification2",
                                                                 "origin_list":['Some_Resource_Agent_ID2'],
                                                                 "events_list":['DataEvent']})
        self.unsc.create_notification(notification_object, user_id)
        
        # publish an event for each notification to generate the emails
        # this can't be easily check in SW so need to check for these at the [email protected] account
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event")
        de_publisher = EventPublisher("DataEvent")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event")
        gevent.sleep(1)

    def test_find_events(self):
        # publish some events for the event repository
        rle_publisher = EventPublisher("ResourceLifecycleEvent")
        de_publisher = EventPublisher("DataEvent")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event1")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event2")
        rle_publisher.publish_event(origin='Some_Resource_Agent_ID1', description="RLE test event3")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event1")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event2")
        de_publisher.publish_event(origin='Some_Resource_Agent_ID2', description="DE test event3")
        
        # find all events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1')
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")
                  
        # find all events for the originator 'DataEvent'
        events = self.unsc.find_events(type='DataEvent')
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'DataEvent':
                self.fail("failed to find correct events") 
                 
        # find 2 events for the originator 'Some_Resource_Agent_ID1'
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID2', limit=2)
        if len(events) != 2:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID2':
                self.fail("failed to find correct events")
            
        # find all events for the originator 'Some_Resource_Agent_ID1' in reverse time order
        events = self.unsc.find_events(origin='Some_Resource_Agent_ID1', descending=True)
        if len(events) != 3:
            self.fail("failed to find all events")  
        for event in events:
            log.debug("event=" + str(event))
            if event[1][0] != 'Some_Resource_Agent_ID1':
                self.fail("failed to find correct events")