示例#1
0
    def run_reverse_transform(self):
        ''' Runs a reverse transform example and displays the results of performing the transform
        '''
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        #-------------------------------
        # Process Definition
        #-------------------------------
        process_definition = IonObject(RT.ProcessDefinition, name='transform_process_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class':'ReverseTransform'
        }

        process_definition_id = procd_cli.create_process_definition(process_definition)

        #-------------------------------
        # Execute Transform
        #-------------------------------
        input = [1,2,3,4]
        retval = tms_cli.execute_transform(process_definition_id=process_definition_id,
            data=[1,2,3,4],
            configuration={})
        log.debug('Transform Input: %s', input)
        log.debug('Transform Output: %s', retval)
    def run_reverse_transform(self):
        ''' Runs a reverse transform example and displays the results of performing the transform
        '''
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        #-------------------------------
        # Process Definition
        #-------------------------------
        process_definition = IonObject(RT.ProcessDefinition,
                                       name='transform_process_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class': 'ReverseTransform'
        }

        process_definition_id = procd_cli.create_process_definition(
            process_definition)

        #-------------------------------
        # Execute Transform
        #-------------------------------
        input = [1, 2, 3, 4]
        retval = tms_cli.execute_transform(
            process_definition_id=process_definition_id,
            data=[1, 2, 3, 4],
            configuration={})
        log.debug('Transform Input: %s', input)
        log.debug('Transform Output: %s', retval)
def upload_qc():
    upload_folder = FileSystem.get_url(FS.TEMP, 'uploads')
    try:

        object_store = Container.instance.object_store

        # required fields
        upload = request.files['file']  # <input type=file name="file">

        if upload:

            # upload file - run filename through werkzeug.secure_filename
            filename = secure_filename(upload.filename)
            path = os.path.join(upload_folder, filename)
            upload_time = time.time()
            upload.save(path)
            filetype = _check_magic(
                upload) or 'CSV'  # Either going to be ZIP or CSV, probably

            # register upload
            file_upload_context = {
                'name': 'User uploaded QC file %s' % filename,
                'filename': filename,
                'filetype': filetype,  # only CSV, no detection necessary
                'path': path,
                'upload_time': upload_time,
                'status': 'File uploaded to server'
            }
            fuc_id, _ = object_store.create_doc(file_upload_context)

            # client to process dispatch
            pd_client = ProcessDispatcherServiceClient()

            # create process definition
            process_definition = ProcessDefinition(
                name='upload_qc_processor',
                executable={
                    'module': 'ion.processes.data.upload.upload_qc_processing',
                    'class': 'UploadQcProcessing'
                })
            process_definition_id = pd_client.create_process_definition(
                process_definition)
            # create process
            process_id = pd_client.create_process(process_definition_id)
            #schedule process
            config = DotDict()
            config.process.fuc_id = fuc_id
            pid = pd_client.schedule_process(process_definition_id,
                                             process_id=process_id,
                                             configuration=config)
            log.info('UploadQcProcessing process created %s' % pid)
            # response - only FileUploadContext ID and determined filetype for UX display
            resp = {'fuc_id': fuc_id}
            return gateway_json_response(resp)

        raise BadRequest('Invalid Upload')

    except Exception as e:
        return build_error_response(e)
def upload_qc():
    upload_folder = FileSystem.get_url(FS.TEMP,'uploads')
    try:

        object_store = Container.instance.object_store
        
        # required fields
        upload = request.files['file'] # <input type=file name="file">

        if upload:

            # upload file - run filename through werkzeug.secure_filename
            filename = secure_filename(upload.filename)
            path = os.path.join(upload_folder, filename)
            upload_time = time.time()
            upload.save(path)
            filetype = _check_magic(upload) or 'CSV' # Either going to be ZIP or CSV, probably

            # register upload
            file_upload_context = {
                'name': 'User uploaded QC file %s' % filename,
                'filename': filename,
                'filetype': filetype,  # only CSV, no detection necessary
                'path': path,
                'upload_time': upload_time,
                'status': 'File uploaded to server'
            }
            fuc_id, _ = object_store.create_doc(file_upload_context)

            # client to process dispatch
            pd_client = ProcessDispatcherServiceClient()

            # create process definition
            process_definition = ProcessDefinition(
                name='upload_qc_processor',
                executable={
                    'module': 'ion.processes.data.upload.upload_qc_processing',
                    'class': 'UploadQcProcessing'
                }
            )
            process_definition_id = pd_client.create_process_definition(process_definition)
            # create process
            process_id = pd_client.create_process(process_definition_id)
            # schedule process
            config = DotDict()
            config.process.fuc_id = fuc_id
            pid = pd_client.schedule_process(process_definition_id, process_id=process_id, configuration=config)
            log.info('UploadQcProcessing process created %s' % pid)
            # response - only FileUploadContext ID and determined filetype for UX display
            resp = {'fuc_id': fuc_id}
            return gateway_json_response(resp)

        raise BadRequest('Invalid Upload')

    except Exception as e:
        return build_error_response(e)
class ProcessDispatcherSimpleAPIClient(object):

    # State to use when state returned from PD is None
    unknown_state = "400-PENDING"

    state_map = {
        ProcessStateEnum.SPAWN: '500-RUNNING',
        ProcessStateEnum.TERMINATE: '700-TERMINATED',
        ProcessStateEnum.ERROR: '850-FAILED'
    }

    def __init__(self, name, **kwargs):
        self.real_client = ProcessDispatcherServiceClient(to_name=name, **kwargs)
        self.event_pub = EventPublisher()

    def dispatch_process(self, upid, spec, subscribers, constraints=None,
                         immediate=False):

        name = spec.get('name')
        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
            origin=name, origin_type="DispatchedHAProcess",
            state=ProcessStateEnum.SPAWN)
        process_def = ProcessDefinition(name=name)
        process_def.executable = {'module': spec.get('module'),
                'class': spec.get('class')}

        process_def_id = self.real_client.create_process_definition(process_def)

        pid = self.real_client.create_process(process_def_id)

        process_schedule = ProcessSchedule()

        sched_pid = self.real_client.schedule_process(process_def_id,
                process_schedule, configuration={}, process_id=pid)

        proc = self.real_client.read_process(sched_pid)
        dict_proc = {'upid': proc.process_id,
                'state': self.state_map.get(proc.process_state, self.unknown_state),
                }
        return dict_proc

    def terminate_process(self, pid):
        return self.real_client.cancel_process(pid)

    def describe_processes(self):
        procs = self.real_client.list_processes()
        dict_procs = []
        for proc in procs:
            dict_proc = {'upid': proc.process_id,
                    'state': self.state_map.get(proc.process_state, self.unknown_state),
                    }
            dict_procs.append(dict_proc)
        return dict_procs
示例#6
0
    def run_external_transform(self):
        '''
        This example script illustrates how a transform can interact with the an outside process (very basic)
        it launches an external_transform example which uses the operating system command 'bc' to add 1 to the input

        Producer -> A -> 'FS.TEMP/transform_output'
        A is an external transform that spawns an OS process to increment the input by 1
        '''
        pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)
        
        #-------------------------------
        # Process Definition
        #-------------------------------
        process_definition = ProcessDefinition(name='external_transform_definition')
        process_definition.executable['module'] = 'ion.processes.data.transforms.transform_example'
        process_definition.executable['class'] = 'ExternalTransform'
        process_definition_id = procd_cli.create_process_definition(process_definition=process_definition)

        #-------------------------------
        # Streams
        #-------------------------------

        input_stream_id = pubsub_cli.create_stream(name='input_stream', original=True)
        
        #-------------------------------
        # Subscription
        #-------------------------------

        query = StreamQuery(stream_ids=[input_stream_id])
        input_subscription_id = pubsub_cli.create_subscription(query=query, exchange_name='input_queue')

        #-------------------------------
        # Launch Transform
        #-------------------------------

        transform_id = tms_cli.create_transform(name='external_transform', 
              in_subscription_id=input_subscription_id,
              process_definition_id=process_definition_id,
              configuration={})
        tms_cli.activate_transform(transform_id)

        #-------------------------------
        # Launch Producer
        #-------------------------------

        id_p = self.container.spawn_process('myproducer', 'ion.processes.data.transforms.transform_example', 'TransformExampleProducer', {'process':{'type':'stream_process','publish_streams':{'out_stream':input_stream_id}},'stream_producer':{'interval':4000}})
        self.container.proc_manager.procs[id_p].start()
示例#7
0
    def on_start(self):
        super(CacheLauncher, self).on_start()
        tms_cli = TransformManagementServiceClient()
        pubsub_cli = PubsubManagementServiceClient()
        pd_cli = ProcessDispatcherServiceClient()
        dname = CACHE_DATASTORE_NAME
        number_of_workers = self.CFG.get_safe('process.number_of_workers', 2)

        proc_def = ProcessDefinition(
            name='last_update_worker_process',
            description=
            'Worker process for caching the last update from a stream')
        proc_def.executable['module'] = 'ion.processes.data.last_update_cache'
        proc_def.executable['class'] = 'LastUpdateCache'
        proc_def_id = pd_cli.create_process_definition(
            process_definition=proc_def)

        xs_dot_xp = CFG.core_xps.science_data
        try:
            self.XS, xp_base = xs_dot_xp.split('.')
            self.XP = '.'.join([get_sys_name(), xp_base])
        except ValueError:
            raise StandardError(
                'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure'
                % xs_dot_xp)

        subscription_id = pubsub_cli.create_subscription(
            query=ExchangeQuery(), exchange_name='last_update_cache')

        config = {
            'couch_storage': {
                'datastore_name': dname,
                'datastore_profile': 'SCIDATA'
            }
        }

        for i in xrange(number_of_workers):

            transform_id = tms_cli.create_transform(
                name='last_update_cache%d' % i,
                description=
                'last_update that compiles an aggregate of metadata',
                in_subscription_id=subscription_id,
                process_definition_id=proc_def_id,
                configuration=config)

        tms_cli.activate_transform(transform_id=transform_id)
示例#8
0
    def create_process(name= '', module = '', class_name = '', configuration = None):
        '''
        A helper method to create a process
        '''

        producer_definition = ProcessDefinition(name=name)
        producer_definition.executable = {
            'module':module,
            'class': class_name
        }

        process_dispatcher = ProcessDispatcherServiceClient()

        procdef_id = process_dispatcher.create_process_definition(process_definition=producer_definition)
        pid = process_dispatcher.schedule_process(process_definition_id= procdef_id, configuration=configuration)

        return pid
    def create_process(name= '', module = '', class_name = '', configuration = None):
        '''
        A helper method to create a process
        '''

        producer_definition = ProcessDefinition(name=name)
        producer_definition.executable = {
            'module':module,
            'class': class_name
        }

        process_dispatcher = ProcessDispatcherServiceClient()

        procdef_id = process_dispatcher.create_process_definition(process_definition=producer_definition)
        pid = process_dispatcher.schedule_process(process_definition_id= procdef_id, configuration=configuration)

        return pid
示例#10
0
class ProcessDispatcherServiceLocalIntTest(IonIntegrationTestCase):
    """Integration tests for the "local" mode of the PD

    In local mode processes are directly started on the local container. This
    is provided to allow use of PD functionality when launching via a single
    rel file instead of via a "real" CEI launch.
    """

    def setUp(self):

        # ensure bridge mode is disabled
        if 'process_dispatcher_bridge' in CFG:
            del CFG['process_dispatcher_bridge']

        # set up the container
        self._start_container()

        self.cc = ContainerAgentClient(node=self.container.node, name=self.container.name)

        self.cc.start_rel_from_url('res/deploy/r2cei.yml')

        self.pd_cli = ProcessDispatcherServiceClient(node=self.cc.node)

        self.process_definition = ProcessDefinition(name='basic_transform_definition')
        self.process_definition.executable = {'module': 'ion.processes.data.transforms.transform_example',
                                              'class':'TransformExample'}
        self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition)

    def test_schedule_cancel(self):

        process_schedule = ProcessSchedule()

        pid = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={})

        proc = self.container.proc_manager.procs.get(pid)
        self.assertIsInstance(proc, TransformExample)

        # failures could theoretically leak processes but I don't think that
        # matters since everything gets torn down between tests
        self.pd_cli.cancel_process(pid)
        self.assertNotIn(pid, self.container.proc_manager.procs)
示例#11
0
    def on_start(self):
        super(CacheLauncher,self).on_start()
        tms_cli = TransformManagementServiceClient()
        pubsub_cli = PubsubManagementServiceClient()
        pd_cli = ProcessDispatcherServiceClient()
        dname = CACHE_DATASTORE_NAME
        number_of_workers = self.CFG.get_safe('process.number_of_workers', 2)

        proc_def = ProcessDefinition(name='last_update_worker_process',description='Worker process for caching the last update from a stream')
        proc_def.executable['module'] = 'ion.processes.data.last_update_cache'
        proc_def.executable['class'] = 'LastUpdateCache'
        proc_def_id = pd_cli.create_process_definition(process_definition=proc_def)

        xs_dot_xp = CFG.core_xps.science_data
        try:
            self.XS, xp_base = xs_dot_xp.split('.')
            self.XP = '.'.join([get_sys_name(), xp_base])
        except ValueError:
            raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)

        subscription_id = pubsub_cli.create_subscription(query=ExchangeQuery(), exchange_name='last_update_cache')

        config = {
            'couch_storage' : {
                'datastore_name' : dname,
                'datastore_profile' : 'SCIDATA'
            }
        }

        for i in xrange(number_of_workers):

            transform_id = tms_cli.create_transform(
                name='last_update_cache%d' % i,
                description='last_update that compiles an aggregate of metadata',
                in_subscription_id=subscription_id,
                process_definition_id=proc_def_id,
                configuration=config
            )


        tms_cli.activate_transform(transform_id=transform_id)
示例#12
0
    def test_cei_launch_mode(self):
        
        pdc = ProcessDispatcherServiceClient(node=self.container.node)
        p_def = ProcessDefinition(name='Agent007')
        p_def.executable = {
            'module' : 'ion.agents.instrument.instrument_agent',
            'class' : 'InstrumentAgent'
        }
        p_def_id = pdc.create_process_definition(p_def)
        
        pid = pdc.create_process(p_def_id)
        
        def event_callback(event, *args, **kwargs):
            print '######### proc %s in state %s' % (event.origin, ProcessStateEnum._str_map[event.state])
 
        sub = EventSubscriber(event_type='ProcessLifecycleEvent',
                              callback=event_callback,
                              origin=pid,
                              origin_type='DispatchedProcess')
         
        sub.start()

        agent_config = deepcopy(self._agent_config)
        agent_config['bootmode'] = 'restart'
        pdc.schedule_process(p_def_id, process_id=pid,
                             configuration=agent_config)
        
        gevent.sleep(5)
        
        pdc.cancel_process(pid)
        
        gevent.sleep(15)

        sub.stop()
        
        
    def test_cei_launch_mode(self):
        
        pdc = ProcessDispatcherServiceClient(node=self.container.node)
        p_def = ProcessDefinition(name='Agent007')
        p_def.executable = {
            'module' : 'ion.agents.instrument.instrument_agent',
            'class' : 'InstrumentAgent'
        }
        p_def_id = pdc.create_process_definition(p_def)
        
        pid = pdc.create_process(p_def_id)
        
        def event_callback(event, *args, **kwargs):
            print '######### proc %s in state %s' % (event.origin, ProcessStateEnum._str_map[event.state])
 
        sub = EventSubscriber(event_type='ProcessLifecycleEvent',
                              callback=event_callback,
                              origin=pid,
                              origin_type='DispatchedProcess')
         
        sub.start()

        agent_config = deepcopy(self._agent_config)
        agent_config['bootmode'] = 'restart'
        pdc.schedule_process(p_def_id, process_id=pid,
                             configuration=agent_config)
        
        gevent.sleep(5)
        
        pdc.cancel_process(pid)
        
        gevent.sleep(15)

        sub.stop()
        
        
class UserNotificationService(BaseUserNotificationService):
    """
    A service that provides users with an API for CRUD methods for notifications.
    """

    def __init__(self, *args, **kwargs):
        self._schedule_ids = []
        BaseUserNotificationService.__init__(self, *args, **kwargs)

    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

#        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'
        self.ION_NOTIFICATION_EMAIL_ADDRESS = CFG.get_safe('server.smtp.sender')


        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor()

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.notifications = {}

        #---------------------------------------------------------------------------------------------------
        # Dictionaries that maintain information about users and their subscribed notifications
        # The reverse_user_info is calculated from the user_info dictionary
        #---------------------------------------------------------------------------------------------------
        self.user_info = {}
        self.reverse_user_info = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.event_publisher = EventPublisher()
        self.datastore = self.container.datastore_manager.get_datastore('events')

        self.start_time = get_ion_ts()

        #------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        #------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id =  event_msg.notification_id
            log.debug("(UNS instance received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(
            event_type="ReloadUserInfoEvent",
            origin='UserNotificationService',
            callback=reload_user_info
        )
        self.add_endpoint(self.reload_user_info_subscriber)

    def on_quit(self):
        """
        Handles stop/terminate.

        Cleans up subscribers spawned here, terminates any scheduled tasks to the scheduler.
        """
        for sid in self._schedule_ids:
            try:
                self.clients.scheduler.cancel_timer(sid)
            except IonException as ex:
                log.info("Ignoring exception while cancelling schedule id (%s): %s: %s", sid, ex.__class__.__name__, ex)

        super(UserNotificationService, self).on_quit()

    def __now(self):
        """
        This method defines what the UNS uses as its "current" time
        """
        return datetime.utcnow()

    def set_process_batch_key(self, process_batch_key = ''):
        """
        This method allows an operator to set the process_batch_key, a string.
        Once this method is used by the operator, the UNS will start listening for timer events
        published by the scheduler with origin = process_batch_key.

        @param process_batch_key str
        """


        def process(event_msg, headers):
            self.end_time = get_ion_ts()

            # run the process_batch() method
            self.process_batch(start_time=self.start_time, end_time=self.end_time)
            self.start_time = self.end_time

        # the subscriber for the batch processing
        """
        To trigger the batch notification, have the scheduler create a timer with event_origin = process_batch_key
        """
        self.batch_processing_subscriber = EventSubscriber(
            event_type="TimerEvent",
            origin=process_batch_key,
            callback=process
        )
        self.add_endpoint(self.batch_processing_subscriber)

    def create_notification(self, notification=None, user_id=''):
        """
        Persists the provided NotificationRequest object for the specified Origin id.
        Associate the Notification resource with the user_id string.
        returned id is the internal id by which NotificationRequest will be identified
        in the data store.

        @param notification        NotificationRequest
        @param user_id             str
        @retval notification_id    str
        @throws BadRequest    if object passed has _id or _rev attribute

        """

        if not user_id:
            raise BadRequest("User id not provided.")

        log.debug("Create notification called for user_id: %s, and notification: %s", user_id, notification)

        #---------------------------------------------------------------------------------------------------
        # Persist Notification object as a resource if it has already not been persisted
        #---------------------------------------------------------------------------------------------------

        # if the notification has already been registered, simply use the old id
        notification_id = self._notification_in_notifications(notification, self.notifications)

        # since the notification has not been registered yet, register it and get the id

        temporal_bounds = TemporalBounds()
        temporal_bounds.start_datetime = get_ion_ts()
        temporal_bounds.end_datetime = ''

        if not notification_id:
            notification.temporal_bounds = temporal_bounds
            notification_id, _ = self.clients.resource_registry.create(notification)
            self.notifications[notification_id] = notification
        else:
            log.debug("Notification object has already been created in resource registry before. No new id to be generated. notification_id: %s", notification_id)
            # Read the old notification already in the resource registry
            notification = self.clients.resource_registry.read(notification_id)

            # Update the temporal bounds of the old notification resource
            notification.temporal_bounds = temporal_bounds

            # Update the notification in the resource registry
            self.clients.resource_registry.update(notification)

            log.debug("The temporal bounds for this resubscribed notification object with id: %s, is: %s", notification_id,notification.temporal_bounds)


        # Link the user and the notification with a hasNotification association
        assocs= self.clients.resource_registry.find_associations(subject=user_id,
                                                                    predicate=PRED.hasNotification,
                                                                    object=notification_id,
                                                                    id_only=True)
        if assocs:
            log.debug("Got an already existing association: %s, between user_id: %s, and notification_id: %s", assocs,user_id,notification_id)
            return notification_id
        else:
            log.debug("Creating association between user_id: %s, and notification_id: %s", user_id, notification_id )
            self.clients.resource_registry.create_association(user_id, PRED.hasNotification, notification_id)

        # read the registered notification request object because this has an _id and is more useful
        notification = self.clients.resource_registry.read(notification_id)

        # Update the user info object with the notification
        self.event_processor.add_notification_for_user(new_notification=notification, user_id=user_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been created.",
            notification_id = notification_id)

        return notification_id

    def update_notification(self, notification=None, user_id = ''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        raise NotImplementedError("This method needs to be worked out in terms of implementation")

#        #-------------------------------------------------------------------------------------------------------------------
#        # Get the old notification
#        #-------------------------------------------------------------------------------------------------------------------
#
#        old_notification = self.clients.resource_registry.read(notification._id)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the notifications dict
#        #-------------------------------------------------------------------------------------------------------------------
#
#
#        self._update_notification_in_notifications_dict(new_notification=notification,
#                                                        notifications=self.notifications)
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the registry
#        #-------------------------------------------------------------------------------------------------------------------
#        '''
#        Since one user should not be able to update the notification request resource without the knowledge of other users
#        who have subscribed to the same notification request, we do not update the resource in the resource registry
#        '''
#
##        self.clients.resource_registry.update(notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # reading up the notification object to make sure we have the newly registered notification request object
#        #-------------------------------------------------------------------------------------------------------------------
#
#        notification_id = notification._id
#        notification = self.clients.resource_registry.read(notification_id)
#
#        #------------------------------------------------------------------------------------
#        # Update the UserInfo object
#        #------------------------------------------------------------------------------------
#
#        user = self.update_user_info_object(user_id, notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
#        #-------------------------------------------------------------------------------------------------------------------
#        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")
#
#        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
#            origin="UserNotificationService",
#            description= "A notification has been updated.",
#            notification_id = notification_id
#        )

    def read_notification(self, notification_id=''):
        """Returns the NotificationRequest object for the specified notification id.
        Throws exception if id does not match any persisted NotificationRequest
        objects.

        @param notification_id    str
        @retval notification    NotificationRequest
        @throws NotFound    object with specified id does not exist
        """
        notification = self.clients.resource_registry.read(notification_id)

        return notification

    def delete_notification(self, notification_id=''):
        """For now, permanently deletes NotificationRequest object with the specified
        id. Throws exception if id does not match any persisted NotificationRequest.

        @param notification_id    str
        @throws NotFound    object with specified id does not exist
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Stop the event subscriber for the notification
        #-------------------------------------------------------------------------------------------------------------------
        notification_request = self.clients.resource_registry.read(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the resource registry
        #-------------------------------------------------------------------------------------------------------------------

        notification_request.temporal_bounds.end_datetime = get_ion_ts()

        self.clients.resource_registry.update(notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Find users who are interested in the notification and update the notification in the list maintained by the UserInfo object
        #-------------------------------------------------------------------------------------------------------------------
        user_ids, _ = self.clients.resource_registry.find_subjects(RT.UserInfo, PRED.hasNotification, notification_id, True)

        for user_id in user_ids:
            self.update_user_info_object(user_id, notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been deleted.",
            notification_id = notification_id)

#    def delete_notification_from_user_info(self, notification_id):
#        """
#        Helper method to delete the notification from the user_info dictionary
#
#        @param notification_id str
#        """
#
#        user_ids, assocs = self.clients.resource_registry.find_subjects(object=notification_id, predicate=PRED.hasNotification, id_only=True)
#
#        for assoc in assocs:
#            self.clients.resource_registry.delete_association(assoc)
#
#        for user_id in user_ids:
#
#            value = self.user_info[user_id]
#
#            for notif in value['notifications']:
#                if notification_id == notif._id:
#                    # remove the notification
#                    value['notifications'].remove(notif)
#
#        self.reverse_user_info = calculate_reverse_user_info(self.user_info)

    def find_events(self, origin='', type='', min_datetime=0, max_datetime=0, limit= -1, descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param event_type     str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        event_tuples = []

        try:
            event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin, start_ts=min_datetime, end_ts=max_datetime, limit=limit, descending=descending)
        except Exception as exc:
            log.warning("The UNS find_events operation for event origin = %s and type = %s failed. Error message = %s", origin, type, exc.message)

        events = [item[2] for item in event_tuples]
        log.debug("(find_events) UNS found the following relevant events: %s", events)

        return events


    #todo Uses Elastic Search. Later extend this to a larger search criteria
    def find_events_extended(self, origin='', type='', min_time= 0, max_time=0, limit=-1, descending=False):
        """Uses Elastic Search. Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param type           str
        @param min_time   int seconds
        @param max_time   int seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """

        query = []

        if min_time and max_time:
            query.append( "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (min_time, max_time))

        if origin:
            query.append( 'search "origin" is "%s" from "events_index"' % origin)

        if type:
            query.append( 'search "type_" is "%s" from "events_index"' % type)

        search_string = ' and '.join(query)


        # get the list of ids corresponding to the events
        ret_vals = self.discovery.parse(search_string)
        if len(query) > 1:
            events = self.datastore.read_mult(ret_vals)
        else:
            events = [i['_source'] for i in ret_vals]

        log.debug("(find_events_extended) Discovery search returned the following event ids: %s", ret_vals)


        log.debug("(find_events_extended) UNS found the following relevant events: %s", events)

        if limit > 0:
            return events[:limit]

        #todo implement time ordering: ascending or descending

        return events

    def publish_event_object(self, event=None):
        """
        This service operation would publish the given event from an event object.

        @param event    !Event
        @retval event   !Event
        """
        event = self.event_publisher.publish_event_object(event_object=event)
        log.info("The publish_event_object(event) method of UNS was used to publish the event: %s", event )

        return event

    def publish_event(self, event_type='', origin='', origin_type='', sub_type='', description='', event_attrs=None):
        """
        This service operation assembles a new Event object based on event_type 
        (e.g. via the pyon Event publisher) with optional additional attributes from a event_attrs
        dict of arbitrary attributes.
        
        
        @param event_type   str
        @param origin       str
        @param origin_type  str
        @param sub_type     str
        @param description  str
        @param event_attrs  dict
        @retval event       !Event
        """
        event_attrs = event_attrs or {}

        event = self.event_publisher.publish_event(
            event_type = event_type,
            origin = origin,
            origin_type = origin_type,
            sub_type = sub_type,
            description = description,
            **event_attrs
            )
        log.info("The publish_event() method of UNS was used to publish an event: %s", event)

        return event

    def get_recent_events(self, resource_id='', limit = 100):
        """
        Get recent events for use in extended resource computed attribute
        @param resource_id str
        @param limit int
        @retval ComputedListValue with value list of 4-tuple with Event objects
        """

        now = get_ion_ts()
        events = self.find_events(origin=resource_id, limit=limit, max_datetime=now, descending=True)

        ret = IonObject(OT.ComputedEventListValue)
        if events:
            ret.value = events
            ret.computed_list = [self._get_event_computed_attributes(event) for event in events]
            ret.status = ComputedValueAvailability.PROVIDED
        else:
            ret.status = ComputedValueAvailability.NOTAVAILABLE

        return ret

    def _get_event_computed_attributes(self, event):
        """
        @param event any Event to compute attributes for
        @retval an EventComputedAttributes object for given event
        """
        evt_computed = IonObject(OT.EventComputedAttributes)
        evt_computed.event_id = event._id
        evt_computed.ts_computed = get_ion_ts()

        try:
            summary = self._get_event_summary(event)
            evt_computed.event_summary = summary

            spc_attrs = ["%s:%s" % (k, str(getattr(event, k))[:50]) for k in sorted(event.__dict__.keys()) if k not in ['_id', '_rev', 'type_', 'origin', 'origin_type', 'ts_created', 'base_types']]
            evt_computed.special_attributes = ", ".join(spc_attrs)

            evt_computed.event_attributes_formatted = pprint.pformat(event.__dict__)
        except Exception as ex:
            log.exception("Error computing EventComputedAttributes for event %s" % event)

        return evt_computed

    def _get_event_summary(self, event):
        event_types = [event.type_] + event.base_types
        summary = ""
        if "ResourceLifecycleEvent" in event_types:
            summary = "%s lifecycle state change: %s_%s" % (event.origin_type, event.lcstate, event.availability)
        elif "ResourceModifiedEvent" in event_types:
            summary = "%s modified: %s" % (event.origin_type, event.sub_type)
        elif "ResourceIssueReportedEvent" in event_types:
            summary = "Issue created: %s" % (event.description)

        elif "ResourceAgentStateEvent" in event_types:
            summary = "%s agent state change: %s" % (event.origin_type, event.state)
        elif "ResourceAgentResourceStateEvent" in event_types:
            summary = "%s agent resource state change: %s" % (event.origin_type, event.state)
        elif "ResourceAgentConfigEvent" in event_types:
            summary = "%s agent config set: %s" % (event.origin_type, event.config)
        elif "ResourceAgentResourceConfigEvent" in event_types:
            summary = "%s agent resource config set: %s" % (event.origin_type, event.config)
        elif "ResourceAgentCommandEvent" in event_types:
            summary = "%s agent command '%s(%s)' succeeded: %s" % (event.origin_type, event.command, event.execute_command, "" if event.result is None else event.result)
        elif "ResourceAgentErrorEvent" in event_types:
            summary = "%s agent command '%s(%s)' failed: %s:%s (%s)" % (event.origin_type, event.command, event.execute_command, event.error_type, event.error_msg, event.error_code)
        elif "ResourceAgentAsyncResultEvent" in event_types:
            summary = "%s agent async command '%s(%s)' succeeded: %s" % (event.origin_type, event.command, event.desc, "" if event.result is None else event.result)

        elif "ResourceAgentResourceCommandEvent" in event_types:
            summary = "%s agent resource command '%s(%s)' executed: %s" % (event.origin_type, event.command, event.execute_command, "OK" if event.result is None else event.result)
        elif "DeviceStatusEvent" in event_types:
            summary = "%s '%s' status change: %s" % (event.origin_type, event.sub_type, DeviceStatusType._str_map.get(event.status,"???"))
        elif "DeviceOperatorEvent" in event_types or "ResourceOperatorEvent" in event_types:
            summary = "Operator entered: %s" % event.description

        elif "OrgMembershipGrantedEvent" in event_types:
            summary = "Joined Org '%s' as member" % (event.org_name)
        elif "OrgMembershipCancelledEvent" in event_types:
            summary = "Cancelled Org '%s' membership" % (event.org_name)
        elif "UserRoleGrantedEvent" in event_types:
            summary = "Granted %s in Org '%s'" % (event.role_name, event.org_name)
        elif "UserRoleRevokedEvent" in event_types:
            summary = "Revoked %s in Org '%s'" % (event.role_name, event.org_name)
        elif "ResourceSharedEvent" in event_types:
            summary = "%s shared in Org: '%s'" % (event.sub_type, event.org_name)
        elif "ResourceUnsharedEvent" in event_types:
            summary = "%s unshared in Org: '%s'" % (event.sub_type, event.org_name)
        elif "ResourceCommitmentCreatedEvent" in event_types:
            summary = "%s commitment created in Org: '%s'" % (event.commitment_type, event.org_name)
        elif "ResourceCommitmentReleasedEvent" in event_types:
            summary = "%s commitment released in Org: '%s'" % (event.commitment_type, event.org_name)


#        if event.description and summary:
#            summary = summary + ". " + event.description
#        elif event.description:
#            summary = event.description
        return summary

    def get_user_notifications(self, user_info_id=''):
        """
        Get the notification request objects that are subscribed to by the user

        @param user_info_id str

        @retval notifications list of NotificationRequest objects
        """

        if self.user_info.has_key(user_info_id):
            notifications = self.user_info[user_info_id]['notifications']

            log.debug("Got %s notifications, for the user: %s", len(notifications), user_info_id)

            for notif in notifications:
                # remove notifications that have expired
                if notif.temporal_bounds.end_datetime != '':
                    log.debug("removing notification: %s", notif)
                    notifications.remove(notif)

            return notifications

#            ret = IonObject(OT.ComputedListValue)
#
#            if notifications:
#                ret.value = notifications
#                ret.status = ComputedValueAvailability.PROVIDED
#            else:
#                ret.status = ComputedValueAvailability.NOTAVAILABLE
#            return ret
#        else:
#            return None

    def create_worker(self, number_of_workers=1):
        """
        Creates notification workers

        @param number_of_workers int
        @retval pids list

        """

        pids = []

        for n in xrange(number_of_workers):

            process_definition = ProcessDefinition( name='notification_worker_%s' % n)

            process_definition.executable = {
                'module': 'ion.processes.data.transforms.notification_worker',
                'class':'NotificationWorker'
            }
            process_definition_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

            # ------------------------------------------------------------------------------------
            # Process Spawning
            # ------------------------------------------------------------------------------------

            pid2 = self.process_dispatcher.create_process(process_definition_id)

            #@todo put in a configuration
            configuration = {}
            configuration['process'] = dict({
                'name': 'notification_worker_%s' % n,
                'type':'simple',
                'queue_name': 'notification_worker_queue'
            })

            pid  = self.process_dispatcher.schedule_process(
                process_definition_id,
                configuration = configuration,
                process_id=pid2
            )

            pids.append(pid)

        return pids

    def process_batch(self, start_time = '', end_time = ''):
        """
        This method is launched when an process_batch event is received. The user info dictionary maintained
        by the User Notification Service is used to query the event repository for all events for a particular
        user that have occurred in a provided time interval, and then an email is sent to the user containing
        the digest of all the events.

        @param start_time int milliseconds
        @param end_time int milliseconds
        """
        self.smtp_client = setting_up_smtp_client()

        if end_time <= start_time:
            return

        for user_id, value in self.user_info.iteritems():

            notifications = value['notifications']
            notifications_disabled = value['notifications_disabled']
            notifications_daily_digest = value['notifications_daily_digest']


            # Ignore users who do NOT want batch notifications or who have disabled the delivery switch
            # However, if notification preferences have not been set for the user, use the default mechanism and do not bother
            if notifications_disabled or not notifications_daily_digest:
                    continue

            events_for_message = []

            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (start_time, end_time)

            for notification in notifications:
                # If the notification request has expired, then do not use it in the search
                if notification.temporal_bounds.end_datetime:
                    continue

                if notification.origin:
                    search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin
                else:
                    search_origin = 'search "origin" is "*" from "events_index"'

                if notification.origin_type:
                    search_origin_type= 'search "origin_type" is "%s" from "events_index"' % notification.origin_type
                else:
                    search_origin_type= 'search "origin_type" is "*" from "events_index"'

                if notification.event_type:
                    search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type
                else:
                    search_event_type = 'search "type_" is "*" from "events_index"'

                search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type

                # get the list of ids corresponding to the events
                log.debug('process_batch  search_string: %s', search_string)
                ret_vals = self.discovery.parse(search_string)

                events_for_message.extend(self.datastore.read_mult(ret_vals))

            log.debug("Found following events of interest to user, %s: %s", user_id, events_for_message)

            # send a notification email to each user using a _send_email() method
            if events_for_message:
                self.format_and_send_email(events_for_message = events_for_message,
                                            user_id = user_id,
                                            smtp_client=self.smtp_client)

        self.smtp_client.quit()


    def format_and_send_email(self, events_for_message = None, user_id = None, smtp_client = None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """

        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s", user_id, message)

        msg_body = ''
        count = 1

        for event in events_for_message:

            ts_created = _convert_to_human_readable(event.ts_created)

            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                     "",
                                     "Originator: %s" %  event.origin,
                                     "",
                                     "Description: %s" % event.description or "Not provided",
                                     "",
                                     "ts_created: %s" %  ts_created,
                                     "\r\n",
                                     "------------------------"
                                     "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " +\
                    "notified about this event from this source. " +\
                    "To modify or remove notifications about this event, " +\
                    "please access My Notifications Settings in the ION Web UI. " +\
                    "Do not reply to this email.  This email address is not monitored " +\
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s", msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
            msg_subject = msg_subject,
            msg_recipient=self.user_info[user_id]['user_contact'].email,
            smtp_client=smtp_client )


    def send_batch_email(self, msg_body = None, msg_subject = None, msg_recipient = None, smtp_client = None):
        """
        Send the email

        @param msg_body str
        @param msg_subject str
        @param msg_recipient str
        @param smtp_client object
        """

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS
        msg['To'] = msg_recipient
        log.debug("UNS sending batch (digest) email from %s to %s" , self.ION_NOTIFICATION_EMAIL_ADDRESS, msg_recipient)

        smtp_sender = CFG.get_safe('server.smtp.sender')

        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())

    def update_user_info_object(self, user_id, new_notification):
        """
        Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification

        @param user_id str
        @param new_notification NotificationRequest
        @param old_notification NotificationRequest
        """

        #------------------------------------------------------------------------------------
        # read the user
        #------------------------------------------------------------------------------------

        user = self.clients.resource_registry.read(user_id)

        if not user:
            raise BadRequest("No user with the provided user_id: %s" % user_id)

        for item in user.variables:
            if type(item) is dict and item.has_key('name') and item['name'] == 'notifications':
                for notif in item['value']:
                    if notif._id == new_notification._id:
                        log.debug("came here for updating notification")
                        notifications = item['value']
                        notifications.remove(notif)
                        notifications.append(new_notification)
                break
            else:
                log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)


        #------------------------------------------------------------------------------------
        # update the resource registry
        #------------------------------------------------------------------------------------

        log.debug("user.variables::: %s", user.variables)

        self.clients.resource_registry.update(user)

        return user


    def _get_subscriptions(self, resource_id='', include_nonactive=False):
        """
        This method is used to get the subscriptions to a data product. The method will return a list of NotificationRequest
        objects for whom the origin is set to this data product. This way all the users who were interested in listening to
        events with origin equal to this data product, will be known and all their subscriptions will be known.

        @param resource_id
        @param include_nonactive
        @return notification_requests []

        """

        search_origin = 'search "origin" is "%s" from "resources_index"' % resource_id
        ret_vals = self.discovery.parse(search_origin)

        log.debug("Using discovery with search_string: %s", search_origin)
        log.debug("_get_subscriptions() got ret_vals: %s", ret_vals )

        notifications_all = set()
        notifications_active = set()

        object_ids = []
        for item in ret_vals:
            if item['_type'] == 'NotificationRequest':
                object_ids.append(item['_id'])

        notifs = self.clients.resource_registry.read_mult(object_ids)

        log.debug("Got %s notifications here. But they include both active and past notifications", len(notifs))

        if include_nonactive:
            # Add active or retired notification
            notifications_all.update(notifs)
        else:
            for notif in notifs:
                log.debug("Got the end_datetime here: notif.temporal_bounds.end_datetime = %s", notif.temporal_bounds.end_datetime)
                if notif.temporal_bounds.end_datetime == '':
                    log.debug("removing the notification: %s", notif._id)
                    # Add the active notification
                    notifications_active.add(notif)

        if include_nonactive:
            return list(notifications_all)
        else:
            return list(notifications_active)

    def get_subscriptions(self, resource_id='', user_id = '', include_nonactive=False):
        """
        This method takes the user-id as an input parameter. The logic will first find all notification requests for this resource
        then if a user_id is present, it will filter on those that this user is associated with.
        """

        # Get the notifications whose origin field has the provided resource_id
        notifs = self._get_subscriptions(resource_id=resource_id, include_nonactive=include_nonactive)

        log.debug("For include_nonactive= %s, UNS fetched the following the notifications subscribed to the resource_id: %s --> %s. "
                      "They are %s in number", include_nonactive,resource_id, notifs, len(notifs))

        if not user_id:
            return notifs

        notifications = []

        # Now find the users who subscribed to the above notifications
        #todo Right now looking at assocs in a loop which is not efficient to find the users linked to these notifications
        # todo(contd) Need to use a more efficient way later
        for notif in notifs:
            notif_id = notif._id
            # Find if the user is associated with this notification request
            ids, _ = self.clients.resource_registry.find_subjects( subject_type = RT.UserInfo, object=notif_id, predicate=PRED.hasNotification, id_only=True)
            log.debug("Got the following users: %s, associated with the notification: %s", ids, notif_id)

            if ids and user_id in ids:
                notifications.append(notif)

        log.debug("For include_nonactive = %s, UNS fetched the following %s notifications subscribed to %s --> %s", include_nonactive,len(notifications),user_id, notifications)

        return notifications

    def get_subscriptions_attribute(self, resource_id='', user_id = '', include_nonactive=False):
        retval = self.get_subscriptions(resource_id=resource_id, user_id=user_id, include_nonactive=include_nonactive)
        container = ComputedListValue(value=retval)
        return container


#    def get_users_who_subscribed(self, resource_id='', include_nonactive=False):
#
#        # Get the notifications whose origin field has the provided resource_id
#        notifications = self.get_subscriptions(resource_id, include_nonactive)
#
#        # Now find the users who subscribed to the above notifications
#        #todo Right now looking at assocs in a loop which is not efficient to find the users linked to these notifications
#        # todo(contd) Need to use a more efficient way later
#
#        user_ids = set()
#        for notif in notifications:
#            notif_id = notif._id
#            # Find the users who are associated with this notification request
#            ids, _ = self.clients.resource_registry.find_subjects( subject_type = RT.UserInfo, object=notif_id, predicate=PRED.hasNotification, id_only=True)
#            user_ids.add(ids)
#
#        return user_ids

    def _notification_in_notifications(self, notification = None, notifications = None):

        for id, notif in notifications.iteritems():
            if notif.name == notification.name and \
            notif.origin == notification.origin and \
            notif.origin_type == notification.origin_type and \
            notif.event_type == notification.event_type:
                return id
        return None

    def _update_notification_in_notifications_dict(self, new_notification = None, notifications = None ):

        for id, notif in notifications.iteritems():
            if id == new_notification._id:
                notifications.pop(id)
                notifications[id] = new_notification
                break


    def load_user_info(self):
        '''
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        '''

        users, _ = self.clients.resource_registry.find_resources(restype= RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications = []
            notifications_disabled = False
            notifications_daily_digest = False


            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):
                    if variable['name'] == 'notifications':
                        notifications = variable['value']

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']

                else:
                    log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)

            user_info[user._id] = { 'user_contact' : user.contact, 'notifications' : notifications,
                                    'notifications_daily_digest' : notifications_daily_digest, 'notifications_disabled' : notifications_disabled}

        return user_info
示例#15
0
    def run_even_odd_transform(self):
        '''
        This example script runs a chained three way transform:
            B
        A <
            C
        Where A is the even_odd transform (generates a stream of even and odd numbers from input)
        and B and C are the basic transforms that receive even and odd input
        '''
        pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        #-------------------------------
        # Process Definition
        #-------------------------------
        # Create the process definition for the basic transform
        process_definition = IonObject(RT.ProcessDefinition,
                                       name='basic_transform_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class': 'TransformExample'
        }
        basic_transform_definition_id = procd_cli.create_process_definition(
            process_definition=process_definition)

        # Create The process definition for the TransformEvenOdd
        process_definition = IonObject(RT.ProcessDefinition,
                                       name='basic_transform_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class': 'TransformEvenOdd'
        }
        evenodd_transform_definition_id = procd_cli.create_process_definition(
            process_definition=process_definition)

        #-------------------------------
        # Streams
        #-------------------------------
        input_stream_id = pubsub_cli.create_stream(name='input_stream',
                                                   original=True)

        even_stream_id = pubsub_cli.create_stream(name='even_stream',
                                                  original=True)

        odd_stream_id = pubsub_cli.create_stream(name='odd_stream',
                                                 original=True)

        #-------------------------------
        # Subscriptions
        #-------------------------------

        query = StreamQuery(stream_ids=[input_stream_id])
        input_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='input_queue')

        query = StreamQuery(stream_ids=[even_stream_id])
        even_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='even_queue')

        query = StreamQuery(stream_ids=[odd_stream_id])
        odd_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='odd_queue')

        #-------------------------------
        # Launch the EvenOdd Transform
        #-------------------------------

        evenodd_id = tms_cli.create_transform(
            name='even_odd',
            in_subscription_id=input_subscription_id,
            out_streams={
                'even': even_stream_id,
                'odd': odd_stream_id
            },
            process_definition_id=evenodd_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(evenodd_id)

        #-------------------------------
        # Launch the Even Processing Transform
        #-------------------------------

        even_transform_id = tms_cli.create_transform(
            name='even_transform',
            in_subscription_id=even_subscription_id,
            process_definition_id=basic_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(even_transform_id)

        #-------------------------------
        # Launch the Odd Processing Transform
        #-------------------------------

        odd_transform_id = tms_cli.create_transform(
            name='odd_transform',
            in_subscription_id=odd_subscription_id,
            process_definition_id=basic_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(odd_transform_id)

        #-------------------------------
        # Spawn the Streaming Producer
        #-------------------------------

        id_p = self.container.spawn_process(
            'myproducer', 'ion.processes.data.transforms.transform_example',
            'TransformExampleProducer', {
                'process': {
                    'type': 'stream_process',
                    'publish_streams': {
                        'out_stream': input_stream_id
                    }
                },
                'stream_producer': {
                    'interval': 4000
                }
            })
        self.container.proc_manager.procs[id_p].start()
示例#16
0
    def run_basic_transform(self):
        ''' Runs a basic example of a transform. It chains two transforms together, each add 1 to their input

        Producer -> A -> B
        Producer generates a number every four seconds and publishes it on the 'ctd_output_stream'
          the producer is acting as a CTD or instrument in this example.
        A is a basic transform that increments its input and publishes it on the 'transform_output' stream.
        B is a basic transform that receives input.
        All transforms write logging data to 'FS.TEMP/transform_output' so you can visually see activity of the transforms
        '''

        pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        #-------------------------------
        # Process Definition
        #-------------------------------

        process_definition = IonObject(RT.ProcessDefinition,
                                       name='transform_process_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class': 'TransformExample'
        }
        process_definition_id = procd_cli.create_process_definition(
            process_definition)

        #-------------------------------
        # First Transform
        #-------------------------------

        # Create a dummy output stream from a 'ctd' instrument
        ctd_output_stream_id = pubsub_cli.create_stream(
            name='ctd_output_stream', original=True)

        # Create the subscription to the ctd_output_stream
        query = StreamQuery(stream_ids=[ctd_output_stream_id])
        ctd_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='ctd_output')

        # Create an output stream for the transform
        transform_output_stream_id = pubsub_cli.create_stream(
            name='transform_output', original=True)

        configuration = {}

        # Launch the first transform process
        transform_id = tms_cli.create_transform(
            name='basic_transform',
            in_subscription_id=ctd_subscription_id,
            out_streams={'output': transform_output_stream_id},
            process_definition_id=process_definition_id,
            configuration=configuration)
        tms_cli.activate_transform(transform_id)

        #-------------------------------
        # Second Transform
        #-------------------------------

        # Create a SUBSCRIPTION to this output stream for the second transform
        query = StreamQuery(stream_ids=[transform_output_stream_id])
        second_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='final_output')

        # Create a final output stream
        final_output_id = pubsub_cli.create_stream(name='final_output',
                                                   original=True)

        configuration = {}

        second_transform_id = tms_cli.create_transform(
            name='second_transform',
            in_subscription_id=second_subscription_id,
            out_streams={'output': final_output_id},
            process_definition_id=process_definition_id,
            configuration=configuration)
        tms_cli.activate_transform(second_transform_id)

        #-------------------------------
        # Producer (Sample Input)
        #-------------------------------

        # Create a producing example process
        id_p = self.container.spawn_process(
            'myproducer', 'ion.processes.data.transforms.transform_example',
            'TransformExampleProducer', {
                'process': {
                    'type': 'stream_process',
                    'publish_streams': {
                        'out_stream': ctd_output_stream_id
                    }
                },
                'stream_producer': {
                    'interval': 4000
                }
            })
        self.container.proc_manager.procs[id_p].start()
示例#17
0
    def test_dm_integration(self):
        '''
        test_salinity_transform
        Test full DM Services Integration
        '''
        cc = self.container
        assertions = self.assertTrue

        #-----------------------------
        # Copy below here to run as a script (don't forget the imports of course!)
        #-----------------------------

        # Create some service clients...
        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(
            node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(
            node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)
        transform_management_service = TransformManagementServiceClient(
            node=cc.node)
        process_dispatcher = ProcessDispatcherServiceClient(node=cc.node)

        # declare some handy variables

        datastore_name = 'test_dm_integration'

        ###
        ### In the beginning there were two stream definitions...
        ###
        # create a stream definition for the data from the ctd simulator
        ctd_stream_def = SBE37_CDM_stream_definition()
        ctd_stream_def_id = pubsub_management_service.create_stream_definition(
            container=ctd_stream_def, name='Simulated CTD data')

        # create a stream definition for the data from the salinity Transform
        sal_stream_def_id = pubsub_management_service.create_stream_definition(
            container=SalinityTransform.outgoing_stream_def,
            name='Scalar Salinity data stream')

        ###
        ### And two process definitions...
        ###
        # one for the ctd simulator...
        producer_definition = ProcessDefinition()
        producer_definition.executable = {
            'module': 'ion.processes.data.ctd_stream_publisher',
            'class': 'SimpleCtdPublisher'
        }

        ctd_sim_procdef_id = process_dispatcher.create_process_definition(
            process_definition=producer_definition)

        # one for the salinity transform
        producer_definition = ProcessDefinition()
        producer_definition.executable = {
            'module': 'ion.processes.data.transforms.ctd.ctd_L2_salinity',
            'class': 'SalinityTransform'
        }

        salinity_transform_procdef_id = process_dispatcher.create_process_definition(
            process_definition=producer_definition)

        #---------------------------
        # Set up ingestion - this is an operator concern - not done by SA in a deployed system
        #---------------------------
        # Configure ingestion using eight workers, ingesting to test_dm_integration datastore with the SCIDATA profile
        log.debug('Calling create_ingestion_configuration')
        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id='science_data',
            couch_storage=CouchStorage(datastore_name=datastore_name,
                                       datastore_profile='SCIDATA'),
            number_of_workers=1)
        #
        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        #---------------------------
        # Set up the producer (CTD Simulator)
        #---------------------------

        # Create the stream
        ctd_stream_id = pubsub_management_service.create_stream(
            stream_definition_id=ctd_stream_def_id)

        # Set up the datasets
        ctd_dataset_id = dataset_management_service.create_dataset(
            stream_id=ctd_stream_id,
            datastore_name=datastore_name,
            view_name='datasets/stream_join_granule')

        # Configure ingestion of this dataset
        ctd_dataset_config_id = ingestion_management_service.create_dataset_configuration(
            dataset_id=ctd_dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=
            ingestion_configuration_id,  # you need to know the ingestion configuration id!
        )
        # Hold onto ctd_dataset_config_id if you want to stop/start ingestion of that dataset by the ingestion service

        #---------------------------
        # Set up the salinity transform
        #---------------------------

        # Create the stream
        sal_stream_id = pubsub_management_service.create_stream(
            stream_definition_id=sal_stream_def_id)

        # Set up the datasets
        sal_dataset_id = dataset_management_service.create_dataset(
            stream_id=sal_stream_id,
            datastore_name=datastore_name,
            view_name='datasets/stream_join_granule')

        # Configure ingestion of the salinity as a dataset
        sal_dataset_config_id = ingestion_management_service.create_dataset_configuration(
            dataset_id=sal_dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=
            ingestion_configuration_id,  # you need to know the ingestion configuration id!
        )
        # Hold onto sal_dataset_config_id if you want to stop/start ingestion of that dataset by the ingestion service

        # Create a subscription as input to the transform
        sal_transform_input_subscription_id = pubsub_management_service.create_subscription(
            query=StreamQuery(stream_ids=[
                ctd_stream_id,
            ]),
            exchange_name='salinity_transform_input'
        )  # how do we make these names??? i.e. Should they be anonymous?

        # create the salinity transform
        sal_transform_id = transform_management_service.create_transform(
            name='example salinity transform',
            in_subscription_id=sal_transform_input_subscription_id,
            out_streams={
                'output': sal_stream_id,
            },
            process_definition_id=salinity_transform_procdef_id,
            # no configuration needed at this time...
        )
        # start the transform - for a test case it makes sense to do it before starting the producer but it is not required
        transform_management_service.activate_transform(
            transform_id=sal_transform_id)

        # Start the ctd simulator to produce some data
        configuration = {
            'process': {
                'stream_id': ctd_stream_id,
            }
        }
        ctd_sim_pid = process_dispatcher.schedule_process(
            process_definition_id=ctd_sim_procdef_id,
            configuration=configuration)

        ###
        ### Make a subscriber in the test to listen for salinity data
        ###
        salinity_subscription_id = pubsub_management_service.create_subscription(
            query=StreamQuery([
                sal_stream_id,
            ]),
            exchange_name='salinity_test',
            name="test salinity subscription",
        )

        pid = cc.spawn_process(name='dummy_process_for_test',
                               module='pyon.ion.process',
                               cls='SimpleProcess',
                               config={})
        dummy_process = cc.proc_manager.procs[pid]

        subscriber_registrar = StreamSubscriberRegistrar(process=dummy_process,
                                                         node=cc.node)

        result = gevent.event.AsyncResult()
        results = []

        def message_received(message, headers):
            # Heads
            log.warn('Salinity data received!')
            results.append(message)
            if len(results) > 3:
                result.set(True)

        subscriber = subscriber_registrar.create_subscriber(
            exchange_name='salinity_test', callback=message_received)
        subscriber.start()

        # after the queue has been created it is safe to activate the subscription
        pubsub_management_service.activate_subscription(
            subscription_id=salinity_subscription_id)

        # Assert that we have received data
        assertions(result.get(timeout=10))

        # stop the flow parse the messages...
        process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        for message in results:

            psd = PointSupplementStreamParser(
                stream_definition=SalinityTransform.outgoing_stream_def,
                stream_granule=message)

            # Test the handy info method for the names of fields in the stream def
            assertions('salinity' in psd.list_field_names())

            # you have to know the name of the coverage in stream def
            salinity = psd.get_values('salinity')

            import numpy

            assertions(isinstance(salinity, numpy.ndarray))

            assertions(numpy.nanmin(salinity) >
                       0.0)  # salinity should always be greater than 0
class TestBulkIngest(IonIntegrationTestCase):

    EDA_MOD = 'ion.agents.data.external_dataset_agent'
    EDA_CLS = 'ExternalDatasetAgent'


    def setUp(self):
        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Now create client to DataAcquisitionManagementService
        self.client = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dams_client = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsub_client = PubsubManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.data_retriever    = DataRetrieverServiceClient(node=self.container.node)

        self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name)

        # Data async and subscription  TODO: Replace with new subscriber
        self._finished_count = None
        #TODO: Switch to gevent.queue.Queue
        self._async_finished_result = AsyncResult()
        self._finished_events_received = []
        self._finished_event_subscriber = None
        self._start_finished_event_subscriber()
        self.addCleanup(self._stop_finished_event_subscriber)


        self.DVR_CONFIG = {}
        self.DVR_CONFIG = {
            'dvr_mod' : 'ion.agents.data.handlers.slocum_data_handler',
            'dvr_cls' : 'SlocumDataHandler',
            }

        self._setup_resources()

        self.agent_config = {
            'driver_config' : self.DVR_CONFIG,
            'stream_config' : {},
            'agent'         : {'resource_id': self.EDA_RESOURCE_ID},
            'test_mode' : True
        }

        datasetagent_instance_obj = IonObject(RT.ExternalDatasetAgentInstance,  name='ExternalDatasetAgentInstance1', description='external data agent instance',
                                              handler_module=self.EDA_MOD, handler_class=self.EDA_CLS,
                                              dataset_driver_config=self.DVR_CONFIG, dataset_agent_config=self.agent_config )
        self.dataset_agent_instance_id = self.dams_client.create_external_dataset_agent_instance(external_dataset_agent_instance=datasetagent_instance_obj,
                                                                                                 external_dataset_agent_id=self.datasetagent_id, external_dataset_id=self.EDA_RESOURCE_ID)


        #TG: Setup/configure the granule logger to log granules as they're published
        pid = self.dams_client.start_external_dataset_agent_instance(self.dataset_agent_instance_id)

        dataset_agent_instance_obj= self.dams_client.read_external_dataset_agent_instance(self.dataset_agent_instance_id)
        print 'TestBulkIngest: Dataset agent instance obj: = ', dataset_agent_instance_obj


        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient('datasetagentclient', name=pid,  process=FakeProcess())
        log.debug(" test_createTransformsThenActivateInstrument:: got ia client %s", str(self._ia_client))



    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id= logger_procdef_id, configuration=configuration)

        return pid

    def _start_finished_event_subscriber(self):

        def consume_event(*args,**kwargs):
            log.debug('EventSubscriber event received: %s', str(args[0]) )
            if args[0].description == 'TestingFinished':
                log.debug('TestingFinished event received')
                self._finished_events_received.append(args[0])
                if self._finished_count and self._finished_count == len(self._finished_events_received):
                    log.debug('Finishing test...')
                    self._async_finished_result.set(len(self._finished_events_received))
                    log.debug('Called self._async_finished_result.set({0})'.format(len(self._finished_events_received)))

        self._finished_event_subscriber = EventSubscriber(event_type='DeviceEvent', callback=consume_event)
        self._finished_event_subscriber.start()

    def _stop_finished_event_subscriber(self):
        if self._finished_event_subscriber:
            self._finished_event_subscriber.stop()
            self._finished_event_subscriber = None


    def tearDown(self):
        pass


    @unittest.skip('Update to agent refactor.')
    def test_slocum_data_ingest(self):

        HIST_CONSTRAINTS_1 = {}
        # Test instrument driver execute interface to start and stop streaming mode.
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)

        cmd = AgentCommand(command='initialize')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.INACTIVE)

        cmd = AgentCommand(command='go_active')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.IDLE)

        cmd = AgentCommand(command='run')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.OBSERVATORY)




        # Make sure the polling interval is appropriate for a test
        params = {
            'POLLING_INTERVAL': 3
        }
        self._ia_client.set_param(params)

        self._finished_count = 1

        cmd = AgentCommand(command='acquire_data')
        self._ia_client.execute(cmd)

        # Assert that data was received
        self._async_finished_result.get(timeout=15)

        self.assertTrue(len(self._finished_events_received) >= 1)

        cmd = AgentCommand(command='reset')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)


        #todo enable after Luke's mor to retrieve, right now must have the Time axis called 'time'
        #        replay_granule = self.data_retriever.retrieve_last_data_points(self.dataset_id, 10)

        #        rdt = RecordDictionaryTool.load_from_granule(replay_granule)
        #
        #        comp = rdt['date_pattern'] == numpy.arange(10) + 10
        #
        #        log.debug("TestBulkIngest: comp: %s", comp)
        #
        #        self.assertTrue(comp.all())

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)


    def _setup_resources(self):

        self.loggerpids = []

        # Create DataProvider
        dprov = ExternalDataProvider(institution=Institution(), contact=ContactInformation())
        dprov.contact.name = 'Christopher Mueller'
        dprov.contact.email = '*****@*****.**'

        # Create DataSetModel
        dataset_model = ExternalDatasetModel(name='slocum_model')
        dataset_model.datset_type = 'SLOCUM'
        dataset_model_id = self.dams_client.create_external_dataset_model(dataset_model)

        # Create ExternalDataset
        ds_name = 'slocum_test_dataset'
        dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation())


        dset.dataset_description.parameters['base_url'] = 'test_data/slocum/'
        dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat'
        dset.dataset_description.parameters['date_pattern'] = '%Y %j'
        dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat'
        dset.dataset_description.parameters['temporal_dimension'] = None
        dset.dataset_description.parameters['zonal_dimension'] = None
        dset.dataset_description.parameters['meridional_dimension'] = None
        dset.dataset_description.parameters['vertical_dimension'] = None
        dset.dataset_description.parameters['variables'] = [
            'c_wpt_y_lmc',
            'sci_water_cond',
            'm_y_lmc',
            'u_hd_fin_ap_inflection_holdoff',
            'sci_m_present_time',
            'm_leakdetect_voltage_forward',
            'sci_bb3slo_b660_scaled',
            'c_science_send_all',
            'm_gps_status',
            'm_water_vx',
            'm_water_vy',
            'c_heading',
            'sci_fl3slo_chlor_units',
            'u_hd_fin_ap_gain',
            'm_vacuum',
            'u_min_water_depth',
            'm_gps_lat',
            'm_veh_temp',
            'f_fin_offset',
            'u_hd_fin_ap_hardover_holdoff',
            'c_alt_time',
            'm_present_time',
            'm_heading',
            'sci_bb3slo_b532_scaled',
            'sci_fl3slo_cdom_units',
            'm_fin',
            'x_cycle_overrun_in_ms',
            'sci_water_pressure',
            'u_hd_fin_ap_igain',
            'sci_fl3slo_phyco_units',
            'm_battpos',
            'sci_bb3slo_b470_scaled',
            'm_lat',
            'm_gps_lon',
            'sci_ctd41cp_timestamp',
            'm_pressure',
            'c_wpt_x_lmc',
            'c_ballast_pumped',
            'x_lmc_xy_source',
            'm_lon',
            'm_avg_speed',
            'sci_water_temp',
            'u_pitch_ap_gain',
            'm_roll',
            'm_tot_num_inflections',
            'm_x_lmc',
            'u_pitch_ap_deadband',
            'm_final_water_vy',
            'm_final_water_vx',
            'm_water_depth',
            'm_leakdetect_voltage',
            'u_pitch_max_delta_battpos',
            'm_coulomb_amphr',
            'm_pitch',
            ]



        ## Create the external dataset
        ds_id = self.dams_client.create_external_dataset(external_dataset=dset, external_dataset_model_id=dataset_model_id)
        ext_dprov_id = self.dams_client.create_external_data_provider(external_data_provider=dprov)

        # Register the ExternalDataset
        dproducer_id = self.dams_client.register_external_data_set(external_dataset_id=ds_id)

        ## Create the dataset agent
        datasetagent_obj = IonObject(RT.ExternalDatasetAgent,  name='ExternalDatasetAgent1', description='external data agent', handler_module=self.EDA_MOD, handler_class=self.EDA_CLS )
        self.datasetagent_id = self.dams_client.create_external_dataset_agent(external_dataset_agent=datasetagent_obj, external_dataset_model_id=dataset_model_id)

        # Generate the data product and associate it to the ExternalDataset
        pdict = DatasetManagementService.get_parameter_dictionary_by_name('ctd_parsed_param_dict')
        streamdef_id = self.pubsub_client.create_stream_definition(name="temp", parameter_dictionary_id=pdict.identifier)

        tdom, sdom = time_series_domain()
        tdom = tdom.dump()
        sdom = sdom.dump()


        dprod = IonObject(RT.DataProduct,
                          name='slocum_parsed_product',
                          description='parsed slocum product',
                          temporal_domain = tdom,
                          spatial_domain = sdom)

        self.dproduct_id = self.dataproductclient.create_data_product(data_product=dprod,
                                                                      stream_definition_id=streamdef_id)

        self.dams_client.assign_data_product(input_resource_id=ds_id, data_product_id=self.dproduct_id)

        #save the incoming slocum data
        self.dataproductclient.activate_data_product_persistence(self.dproduct_id)

        stream_ids, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True)
        stream_id = stream_ids[0]

        dataset_id, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasDataset, object_type=RT.Dataset, id_only=True)
        self.dataset_id = dataset_id[0]

        pid = self.create_logger('slocum_parsed_product', stream_id )
        self.loggerpids.append(pid)

        self.DVR_CONFIG['dh_cfg'] = {
            'TESTING':True,
            'stream_id':stream_id,
            'param_dictionary':pdict.dump(),
            'data_producer_id':dproducer_id, #CBM: Should this be put in the main body of the config - with mod & cls?
            'max_records':20,
            }

        # Create the logger for receiving publications
        #self.create_stream_and_logger(name='slocum',stream_id=stream_id)
        # Create agent config.
        self.EDA_RESOURCE_ID = ds_id
        self.EDA_NAME = ds_name
示例#19
0
    def test_usgs_integration(self):
        '''
        test_usgs_integration
        Test full DM Services Integration using usgs
        '''
        cc = self.container
        assertions = self.assertTrue

        #-----------------------------
        # Copy below here
        #-----------------------------
        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)
        transform_management_service = TransformManagementServiceClient(node=cc.node)
        process_dispatcher = ProcessDispatcherServiceClient(node=cc.node)

        process_list = []
        datasets = []

        datastore_name = 'test_usgs_integration'


        #---------------------------
        # Set up ingestion
        #---------------------------
        # Configure ingestion using eight workers, ingesting to test_dm_integration datastore with the SCIDATA profile
        log.debug('Calling create_ingestion_configuration')
        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id='science_data',
            couch_storage=CouchStorage(datastore_name=datastore_name,datastore_profile='SCIDATA'),
            number_of_workers=8
        )
        #
        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        usgs_stream_def = USGS_stream_definition()

        stream_def_id = pubsub_management_service.create_stream_definition(container=usgs_stream_def, name='Junk definition')


        #---------------------------
        # Set up the producers (CTD Simulators)
        #---------------------------
        # Launch five simulated CTD producers
        for iteration in xrange(2):
            # Make a stream to output on

            stream_id = pubsub_management_service.create_stream(stream_definition_id=stream_def_id)

            #---------------------------
            # Set up the datasets
            #---------------------------
            dataset_id = dataset_management_service.create_dataset(
                stream_id=stream_id,
                datastore_name=datastore_name,
                view_name='datasets/stream_join_granule'
            )
            # Keep track of the datasets
            datasets.append(dataset_id)

            stream_policy_id = ingestion_management_service.create_dataset_configuration(
                dataset_id = dataset_id,
                archive_data = True,
                archive_metadata = True,
                ingestion_configuration_id = ingestion_configuration_id
            )


            producer_definition = ProcessDefinition()
            producer_definition.executable = {
                'module':'ion.agents.eoi.handler.usgs_stream_publisher',
                'class':'UsgsPublisher'
            }
            configuration = {
                'process':{
                    'stream_id':stream_id,
                    }
            }
            procdef_id = process_dispatcher.create_process_definition(process_definition=producer_definition)
            log.debug('LUKE_DEBUG: procdef_id: %s', procdef_id)
            pid = process_dispatcher.schedule_process(process_definition_id=procdef_id, configuration=configuration)


            # Keep track, we'll kill 'em later.
            process_list.append(pid)
            # Get about 4 seconds of data
        time.sleep(4)

        #---------------------------
        # Stop producing data
        #---------------------------

        for process in process_list:
            process_dispatcher.cancel_process(process)

        #----------------------------------------------
        # The replay and the transform, a love story.
        #----------------------------------------------
        # Happy Valentines to the clever coder who catches the above!

        transform_definition = ProcessDefinition()
        transform_definition.executable = {
            'module':'ion.processes.data.transforms.transform_example',
            'class':'TransformCapture'
        }
        transform_definition_id = process_dispatcher.create_process_definition(process_definition=transform_definition)

        dataset_id = datasets.pop() # Just need one for now
        replay_id, stream_id = data_retriever_service.define_replay(dataset_id=dataset_id)

        #--------------------------------------------
        # I'm Selling magazine subscriptions here!
        #--------------------------------------------

        subscription = pubsub_management_service.create_subscription(query=StreamQuery(stream_ids=[stream_id]),
            exchange_name='transform_capture_point')

        #--------------------------------------------
        # Start the transform (capture)
        #--------------------------------------------
        transform_id = transform_management_service.create_transform(
            name='capture_transform',
            in_subscription_id=subscription,
            process_definition_id=transform_definition_id
        )

        transform_management_service.activate_transform(transform_id=transform_id)

        #--------------------------------------------
        # BEGIN REPLAY!
        #--------------------------------------------

        data_retriever_service.start_replay(replay_id=replay_id)

        #--------------------------------------------
        # Lets get some boundaries
        #--------------------------------------------

        bounds = dataset_management_service.get_dataset_bounds(dataset_id=dataset_id)
def upload_data(dataproduct_id):
    upload_folder = FileSystem.get_url(FS.TEMP,'uploads')
    try:

        rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
        object_store = Container.instance.object_store

        try:
            rr_client.read(str(dataproduct_id))
        except BadRequest:
            raise BadRequest('Unknown DataProduct ID %s' % dataproduct_id)

        # required fields
        upload = request.files['file']  # <input type=file name="file">

        # determine filetype
        filetype = _check_magic(upload)
        upload.seek(0)  # return to beginning for save

        if upload and filetype is not None:

            # upload file - run filename through werkzeug.secure_filename
            filename = secure_filename(upload.filename)
            path = os.path.join(upload_folder, filename)
            upload_time = time.time()
            upload.save(path)

            # register upload
            file_upload_context = {
                # TODO add dataproduct_id
                'name':'User uploaded file %s' % filename,
                'filename':filename,
                'filetype':filetype,
                'path':path,
                'upload_time':upload_time,
                'status':'File uploaded to server'
            }
            fuc_id, _ = object_store.create_doc(file_upload_context)

            # client to process dispatch
            pd_client = ProcessDispatcherServiceClient()

            # create process definition
            process_definition = ProcessDefinition(
                name='upload_data_processor',
                executable={
                    'module':'ion.processes.data.upload.upload_data_processing',
                    'class':'UploadDataProcessing'
                }
            )
            process_definition_id = pd_client.create_process_definition(process_definition)
            # create process
            process_id = pd_client.create_process(process_definition_id)
            #schedule process
            config = DotDict()
            config.process.fuc_id = fuc_id
            config.process.dp_id = dataproduct_id
            pid = pd_client.schedule_process(process_definition_id, process_id=process_id, configuration=config)
            log.info('UploadDataProcessing process created %s' % pid)
            # response - only FileUploadContext ID and determined filetype for UX display
            resp = {'fuc_id': fuc_id}
            return gateway_json_response(resp)

        raise BadRequest('Invalid Upload')

    except Exception as e:
        return build_error_response(e)
class ProcessStateGateIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')

        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = IonObject(OT.ProcessDefinition,
                                            name='test_process')
        self.process_definition.executable = {
            'module': 'ion.services.cei.test.test_process_state_gate',
            'class': 'TestProcess'
        }
        self.process_definition_id = self.pd_cli.create_process_definition(
            self.process_definition)
        self.event_queue = queue.Queue()

        self.process_schedule = IonObject(OT.ProcessSchedule)
        self.process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        self.pid = self.pd_cli.create_process(self.process_definition_id)

        self.event_queue = queue.Queue()

        self.event_sub = EventSubscriber(event_type="ProcessLifecycleEvent",
                                         callback=self._event_callback,
                                         origin=self.pid,
                                         origin_type="DispatchedProcess")

    def tearDown(self):
        #stop subscriber if its running
        if self.event_sub and self.event_sub._cbthread:
            self.event_sub.stop()
        self._stop_container()

    def _event_callback(self, event, *args, **kwargs):
        self.event_queue.put(event)

    def latest_event(self, timeout=10):
        # get latest event from our local event subscriber
        try:
            event = self.event_queue.get(timeout=timeout)
        except Empty:
            event = None
        return event

    def await_state(self, state, timeout=10):

        print "Emptying event queue"
        while True:
            event = self.latest_event(0)
            if event:
                print "State %s from event %s" % (event.state, event)
            else:
                break
        self.event_sub.start()

        #wait for process state
        print "Setting up %s gate" % ProcessStateEnum._str_map[state]
        gate = ProcessStateGate(self.pd_cli.read_process, self.pid, state)
        print "Waiting"
        ret = gate. await (timeout)

        print "Await got %s" % ret
        event = self.latest_event(timeout=1)

        # check false positives/negatives
        if ret and gate._get_first_chance() is None and event is None:
            self.fail(
                "ProcessStateGate got an event that EventSubscriber didnt....")

        self.event_sub.stop()

        if (not ret) or gate._get_last_chance():
            if event and event.state == state:
                self.fail(
                    "EventSubscriber got state event %s for process %s, ProcessStateGate missed it"
                    % (ProcessStateEnum._str_map[event.state], self.pid))

        return ret

    def process_start(self):
        print "Scheduling process...",
        self.pd_cli.schedule_process(self.process_definition_id,
                                     self.process_schedule,
                                     configuration={},
                                     process_id=self.pid)
        print "Done scheduling process."

    def process_stop(self):
        print "STOPPING process...",
        self.pd_cli.cancel_process(self.pid)
        print "Done stopping process"

    def test_process_state_gate(self):

        self.assertFalse(
            self.await_state(ProcessStateEnum.RUNNING, 1),
            "The process was reported as spawned, but we didn't yet")

        print "GOING TO ACTUALLY START PROCESS NOW"
        spawn_later(1, self.process_start)

        self.assertTrue(self.await_state(ProcessStateEnum.RUNNING),
                        "The process did not spawn")

        self.assertFalse(
            self.await_state(ProcessStateEnum.TERMINATED, 1),
            "The process claims to have terminated, but we didn't kill it")

        print "communicating with the process to make sure it is really running"
        test_client = TestClient()
        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        spawn_later(1, self.process_stop)

        self.assertTrue(
            self.await_state(ProcessStateEnum.TERMINATED),
            "The process failed to be reported as terminated when it was terminated"
        )

        self.assertFalse(
            self.await_state(ProcessStateEnum.RUNNING, 1),
            "The process was reported as spawned, but we killed it")
示例#22
0
class ProcessDispatcherServiceIntTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')

        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = ProcessDefinition(name='test_process')
        self.process_definition.executable = {
            'module': 'ion.services.cei.test.test_process_dispatcher',
            'class': 'TestProcess'
        }
        self.process_definition_id = self.pd_cli.create_process_definition(
            self.process_definition)
        self.event_queue = queue.Queue()

        self.event_sub = None

    def tearDown(self):
        if self.event_sub:
            self.event_sub.deactivate()

    def _event_callback(self, event, *args, **kwargs):
        self.event_queue.put(event)

    def subscribe_events(self, origin):
        self.event_sub = EventSubscriber(event_type="ProcessLifecycleEvent",
                                         callback=self._event_callback,
                                         origin=origin,
                                         origin_type="DispatchedProcess")
        self.event_sub.activate()

    def await_state_event(self, pid, state):
        event = self.event_queue.get(timeout=5)
        log.debug("Got event: %s", event)
        self.assertEqual(event.origin, pid)
        self.assertEqual(event.state, state)
        return event

    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid)
        self.assertEqual(pid, pid2)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i + 1, test_client.count(timeout=1))

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i + 1, test_client.count(timeout=1))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

    def test_schedule_bad_config(self):

        process_schedule = ProcessSchedule()

        # a non-JSON-serializable IonObject
        o = ProcessTarget()

        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.schedule_process(self.process_definition_id,
                                         process_schedule,
                                         configuration={"bad": o})
        self.assertTrue(ar.exception.message.startswith("bad configuration"))
示例#23
0
class TestBulkIngest(IonIntegrationTestCase):

    EDA_MOD = 'ion.agents.data.external_dataset_agent'
    EDA_CLS = 'ExternalDatasetAgent'


    def setUp(self):
        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Now create client to DataAcquisitionManagementService
        self.client = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dams_client = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsub_client = PubsubManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.data_retriever    = DataRetrieverServiceClient(node=self.container.node)

        self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name)

        # Data async and subscription  TODO: Replace with new subscriber
        self._finished_count = None
        #TODO: Switch to gevent.queue.Queue
        self._async_finished_result = AsyncResult()
        self._finished_events_received = []
        self._finished_event_subscriber = None
        self._start_finished_event_subscriber()
        self.addCleanup(self._stop_finished_event_subscriber)


        self.DVR_CONFIG = {}
        self.DVR_CONFIG = {
            'dvr_mod' : 'ion.agents.data.handlers.slocum_data_handler',
            'dvr_cls' : 'SlocumDataHandler',
            }

        self._setup_resources()

        self.agent_config = {
            'driver_config' : self.DVR_CONFIG,
            'stream_config' : {},
            'agent'         : {'resource_id': self.EDA_RESOURCE_ID},
            'test_mode' : True
        }

        datasetagent_instance_obj = IonObject(RT.ExternalDatasetAgentInstance,  name='ExternalDatasetAgentInstance1', description='external data agent instance',
                                              handler_module=self.EDA_MOD, handler_class=self.EDA_CLS,
                                              dataset_driver_config=self.DVR_CONFIG, dataset_agent_config=self.agent_config )
        self.dataset_agent_instance_id = self.dams_client.create_external_dataset_agent_instance(external_dataset_agent_instance=datasetagent_instance_obj,
                                                                                                 external_dataset_agent_id=self.datasetagent_id, external_dataset_id=self.EDA_RESOURCE_ID)


        #TG: Setup/configure the granule logger to log granules as they're published
        pid = self.dams_client.start_external_dataset_agent_instance(self.dataset_agent_instance_id)

        dataset_agent_instance_obj= self.dams_client.read_external_dataset_agent_instance(self.dataset_agent_instance_id)
        print 'TestBulkIngest: Dataset agent instance obj: = ', dataset_agent_instance_obj


        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient('datasetagentclient', name=pid,  process=FakeProcess())
        log.debug(" test_createTransformsThenActivateInstrument:: got ia client %s", str(self._ia_client))



    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id= logger_procdef_id, configuration=configuration)

        return pid

    def _start_finished_event_subscriber(self):

        def consume_event(*args,**kwargs):
            log.debug('EventSubscriber event received: %s', str(args[0]) )
            if args[0].description == 'TestingFinished':
                log.debug('TestingFinished event received')
                self._finished_events_received.append(args[0])
                if self._finished_count and self._finished_count == len(self._finished_events_received):
                    log.debug('Finishing test...')
                    self._async_finished_result.set(len(self._finished_events_received))
                    log.debug('Called self._async_finished_result.set({0})'.format(len(self._finished_events_received)))

        self._finished_event_subscriber = EventSubscriber(event_type='DeviceEvent', callback=consume_event)
        self._finished_event_subscriber.start()

    def _stop_finished_event_subscriber(self):
        if self._finished_event_subscriber:
            self._finished_event_subscriber.stop()
            self._finished_event_subscriber = None


    def tearDown(self):
        pass


    @unittest.skip('Update to agent refactor.')
    def test_slocum_data_ingest(self):

        HIST_CONSTRAINTS_1 = {}
        # Test instrument driver execute interface to start and stop streaming mode.
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)

        cmd = AgentCommand(command='initialize')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.INACTIVE)

        cmd = AgentCommand(command='go_active')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.IDLE)

        cmd = AgentCommand(command='run')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.OBSERVATORY)




        # Make sure the polling interval is appropriate for a test
        params = {
            'POLLING_INTERVAL': 3
        }
        self._ia_client.set_param(params)

        self._finished_count = 1

        cmd = AgentCommand(command='acquire_data')
        self._ia_client.execute(cmd)

        # Assert that data was received
        self._async_finished_result.get(timeout=15)

        self.assertTrue(len(self._finished_events_received) >= 1)

        cmd = AgentCommand(command='reset')
        retval = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)


        #todo enable after Luke's mor to retrieve, right now must have the Time axis called 'time'
        #        replay_granule = self.data_retriever.retrieve_last_data_points(self.dataset_id, 10)

        #        rdt = RecordDictionaryTool.load_from_granule(replay_granule)
        #
        #        comp = rdt['date_pattern'] == numpy.arange(10) + 10
        #
        #        log.debug("TestBulkIngest: comp: %s", comp)
        #
        #        self.assertTrue(comp.all())

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)


    def _setup_resources(self):

        self.loggerpids = []

        # Create DataProvider
        dprov = ExternalDataProvider(institution=Institution(), contact=ContactInformation())
        dprov.contact.name = 'Christopher Mueller'
        dprov.contact.email = '*****@*****.**'

        # Create DataSetModel
        dataset_model = ExternalDatasetModel(name='slocum_model')
        dataset_model.datset_type = 'SLOCUM'
        dataset_model_id = self.dams_client.create_external_dataset_model(dataset_model)

        # Create ExternalDataset
        ds_name = 'slocum_test_dataset'
        dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation())


        dset.dataset_description.parameters['base_url'] = 'test_data/slocum/'
        dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat'
        dset.dataset_description.parameters['date_pattern'] = '%Y %j'
        dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat'
        dset.dataset_description.parameters['temporal_dimension'] = None
        dset.dataset_description.parameters['zonal_dimension'] = None
        dset.dataset_description.parameters['meridional_dimension'] = None
        dset.dataset_description.parameters['vertical_dimension'] = None
        dset.dataset_description.parameters['variables'] = [
            'c_wpt_y_lmc',
            'sci_water_cond',
            'm_y_lmc',
            'u_hd_fin_ap_inflection_holdoff',
            'sci_m_present_time',
            'm_leakdetect_voltage_forward',
            'sci_bb3slo_b660_scaled',
            'c_science_send_all',
            'm_gps_status',
            'm_water_vx',
            'm_water_vy',
            'c_heading',
            'sci_fl3slo_chlor_units',
            'u_hd_fin_ap_gain',
            'm_vacuum',
            'u_min_water_depth',
            'm_gps_lat',
            'm_veh_temp',
            'f_fin_offset',
            'u_hd_fin_ap_hardover_holdoff',
            'c_alt_time',
            'm_present_time',
            'm_heading',
            'sci_bb3slo_b532_scaled',
            'sci_fl3slo_cdom_units',
            'm_fin',
            'x_cycle_overrun_in_ms',
            'sci_water_pressure',
            'u_hd_fin_ap_igain',
            'sci_fl3slo_phyco_units',
            'm_battpos',
            'sci_bb3slo_b470_scaled',
            'm_lat',
            'm_gps_lon',
            'sci_ctd41cp_timestamp',
            'm_pressure',
            'c_wpt_x_lmc',
            'c_ballast_pumped',
            'x_lmc_xy_source',
            'm_lon',
            'm_avg_speed',
            'sci_water_temp',
            'u_pitch_ap_gain',
            'm_roll',
            'm_tot_num_inflections',
            'm_x_lmc',
            'u_pitch_ap_deadband',
            'm_final_water_vy',
            'm_final_water_vx',
            'm_water_depth',
            'm_leakdetect_voltage',
            'u_pitch_max_delta_battpos',
            'm_coulomb_amphr',
            'm_pitch',
            ]



        ## Create the external dataset
        ds_id = self.dams_client.create_external_dataset(external_dataset=dset, external_dataset_model_id=dataset_model_id)
        ext_dprov_id = self.dams_client.create_external_data_provider(external_data_provider=dprov)

        # Register the ExternalDataset
        dproducer_id = self.dams_client.register_external_data_set(external_dataset_id=ds_id)

        ## Create the dataset agent
        datasetagent_obj = IonObject(RT.ExternalDatasetAgent,  name='ExternalDatasetAgent1', description='external data agent', handler_module=self.EDA_MOD, handler_class=self.EDA_CLS )
        self.datasetagent_id = self.dams_client.create_external_dataset_agent(external_dataset_agent=datasetagent_obj, external_dataset_model_id=dataset_model_id)

        # Generate the data product and associate it to the ExternalDataset
        pdict = DatasetManagementService.get_parameter_dictionary_by_name('ctd_parsed_param_dict')
        streamdef_id = self.pubsub_client.create_stream_definition(name="temp", parameter_dictionary_id=pdict.identifier)

        tdom, sdom = time_series_domain()
        tdom = tdom.dump()
        sdom = sdom.dump()


        dprod = IonObject(RT.DataProduct,
                          name='slocum_parsed_product',
                          description='parsed slocum product',
                          temporal_domain = tdom,
                          spatial_domain = sdom)

        self.dproduct_id = self.dataproductclient.create_data_product(data_product=dprod,
                                                                      stream_definition_id=streamdef_id)

        self.dams_client.assign_data_product(input_resource_id=ds_id, data_product_id=self.dproduct_id)

        #save the incoming slocum data
        self.dataproductclient.activate_data_product_persistence(self.dproduct_id)
        self.addCleanup(self.dataproductclient.suspend_data_product_persistence, self.dproduct_id)
        
        stream_ids, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True)
        stream_id = stream_ids[0]

        dataset_id, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasDataset, object_type=RT.Dataset, id_only=True)
        self.dataset_id = dataset_id[0]

        pid = self.create_logger('slocum_parsed_product', stream_id )
        self.loggerpids.append(pid)

        self.DVR_CONFIG['dh_cfg'] = {
            'TESTING':True,
            'stream_id':stream_id,
            'param_dictionary':pdict.dump(),
            'data_producer_id':dproducer_id, #CBM: Should this be put in the main body of the config - with mod & cls?
            'max_records':20,
            }

        # Create the logger for receiving publications
        #self.create_stream_and_logger(name='slocum',stream_id=stream_id)
        # Create agent config.
        self.EDA_RESOURCE_ID = ds_id
        self.EDA_NAME = ds_name
示例#24
0
class BulkIngestBase(object):
    """
    awkward, non-obvious test class!  subclasses will implement data-specific methods and
    this test class will parse sample file and assert data was read.

    test_data_ingest: create resources and call...
        start_agent: starts agent and then call...
            start_listener: starts listeners for data, including one that when granule is received calls...
                get_retrieve_client: asserts that callback had some data

    See replacement TestPreloadThenLoadDataset.  A little more declarative and straight-forward, but much slower (requires preload).
    """
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.pubsub_management    = PubsubManagementServiceClient()
        self.dataset_management   = DatasetManagementServiceClient()
        self.data_product_management = DataProductManagementServiceClient()
        self.data_acquisition_management = DataAcquisitionManagementServiceClient()
        self.data_retriever = DataRetrieverServiceClient()
        self.process_dispatch_client = ProcessDispatcherServiceClient(node=self.container.node)
        self.resource_registry       = self.container.resource_registry
        self.context_ids = self.build_param_contexts()
        self.setup_resources()

    def build_param_contexts(self):
        raise NotImplementedError('build_param_contexts must be implemented in child classes')

    def create_external_dataset(self):
        raise NotImplementedError('create_external_dataset must be implemented in child classes')

    def get_dvr_config(self):
        raise NotImplementedError('get_dvr_config must be implemented in child classes')

    def get_retrieve_client(self, dataset_id=''):
        raise NotImplementedError('get_retrieve_client must be implemented in child classes')

    def test_data_ingest(self):
        self.pdict_id = self.create_parameter_dict(self.name)
        self.stream_def_id = self.create_stream_def(self.name, self.pdict_id)
        self.data_product_id = self.create_data_product(self.name, self.description, self.stream_def_id)
        self.dataset_id = self.get_dataset_id(self.data_product_id)
        self.stream_id, self.route = self.get_stream_id_and_route(self.data_product_id)
        self.external_dataset_id = self.create_external_dataset()
        self.data_producer_id = self.register_external_dataset(self.external_dataset_id)
        self.start_agent()

    def create_parameter_dict(self, name=''):
        return self.dataset_management.create_parameter_dictionary(name=name, parameter_context_ids=self.context_ids, temporal_context='time')

    def create_stream_def(self, name='', pdict_id=''):
        return self.pubsub_management.create_stream_definition(name=name, parameter_dictionary_id=pdict_id)

    def create_data_product(self, name='', description='', stream_def_id=''):
        tdom, sdom = time_series_domain()
        tdom = tdom.dump()
        sdom = sdom.dump()
        dp_obj = DataProduct(
            name=name,
            description=description,
            processing_level_code='Parsed_Canonical',
            temporal_domain=tdom,
            spatial_domain=sdom)

        data_product_id = self.data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=stream_def_id)
        self.data_product_management.activate_data_product_persistence(data_product_id)
        self.addCleanup(self.data_product_management.suspend_data_product_persistence, data_product_id)
        return data_product_id

    def register_external_dataset(self, external_dataset_id=''):
        return self.data_acquisition_management.register_external_data_set(external_dataset_id=external_dataset_id)

    def get_dataset_id(self, data_product_id=''):
        dataset_ids, assocs = self.resource_registry.find_objects(subject=data_product_id, predicate='hasDataset', id_only=True)
        return dataset_ids[0]

    def get_stream_id_and_route(self, data_product_id):
        stream_ids, _ = self.resource_registry.find_objects(data_product_id, PRED.hasStream, RT.Stream, id_only=True)
        stream_id = stream_ids[0]
        route = self.pubsub_management.read_stream_route(stream_id)
        #self.create_logger(self.name, stream_id)
        return stream_id, route

    def start_agent(self):
        agent_config = {
            'driver_config': self.get_dvr_config(),
            'stream_config': {},
            'agent': {'resource_id': self.external_dataset_id},
            'test_mode': True
        }

        self._ia_pid = self.container.spawn_process(
            name=self.EDA_NAME,
            module=self.EDA_MOD,
            cls=self.EDA_CLS,
            config=agent_config)

        self._ia_client = ResourceAgentClient(self.external_dataset_id, process=FakeProcess())

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
        self._ia_client.execute_resource(command=cmd)

        self.start_listener(self.dataset_id)

    def stop_agent(self):
        cmd = AgentCommand(command=DriverEvent.STOP_AUTOSAMPLE)
        self._ia_client.execute_resource(cmd)
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        self._ia_client.execute_agent(cmd)
        self.container.terminate_process(self._ia_pid)

    def start_listener(self, dataset_id=''):
        dataset_modified = Event()
        #callback to use retrieve to get data from the coverage
        def cb(*args, **kwargs):
            self.get_retrieve_client(dataset_id=dataset_id)

        #callback to keep execution going once dataset has been fully ingested
        def cb2(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id)
        es.start()

        es2 = EventSubscriber(event_type=OT.DeviceCommonLifecycleEvent, callback=cb2, origin='BaseDataHandler._acquire_sample')
        es2.start()

        self.addCleanup(es.stop)
        self.addCleanup(es2.stop)

        #let it go for up to 120 seconds, then stop the agent and reset it
        dataset_modified.wait(120)
        self.stop_agent()

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.process_dispatch_client.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.process_dispatch_client.schedule_process(process_definition_id=logger_procdef_id, configuration=configuration)

        return pid
示例#25
0
class UserNotificationService(BaseUserNotificationService):
    """
    A service that provides users with an API for CRUD methods for notifications.
    """
    def __init__(self, *args, **kwargs):
        self._schedule_ids = []
        BaseUserNotificationService.__init__(self, *args, **kwargs)

    def on_start(self):
        self.ION_NOTIFICATION_EMAIL_ADDRESS = CFG.get_safe(
            'server.smtp.sender')

        # Create an event processor
        self.event_processor = EmailEventProcessor()

        # Dictionaries that maintain information asetting_up_smtp_clientbout users and their subscribed notifications
        self.user_info = {}

        # The reverse_user_info is calculated from the user_info dictionary
        self.reverse_user_info = {}

        # Get the clients
        # @TODO: Why are these not dependencies in the service YML???
        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()

        self.event_publisher = EventPublisher()
        self.datastore = self.container.datastore_manager.get_datastore(
            'events')

        self.start_time = get_ion_ts()

        #------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        #------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            """
            Callback method for the subscriber to ReloadUserInfoEvent
            """

            notification_id = event_msg.notification_id
            log.debug(
                "(UNS instance received a ReloadNotificationEvent. The relevant notification_id is %s"
                % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info = calculate_reverse_user_info(
                self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" %
                      self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" %
                      self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(
            event_type=OT.ReloadUserInfoEvent,
            origin='UserNotificationService',
            callback=reload_user_info)
        self.add_endpoint(self.reload_user_info_subscriber)

    def on_quit(self):
        """
        Handles stop/terminate.

        Cleans up subscribers spawned here, terminates any scheduled tasks to the scheduler.
        """
        for sid in self._schedule_ids:
            try:
                self.clients.scheduler.cancel_timer(sid)
            except IonException as ex:
                log.info(
                    "Ignoring exception while cancelling schedule id (%s): %s: %s",
                    sid, ex.__class__.__name__, ex)

        super(UserNotificationService, self).on_quit()

    def set_process_batch_key(self, process_batch_key=''):
        """
        This method allows an operator to set the process_batch_key, a string.
        Once this method is used by the operator, the UNS will start listening for timer events
        published by the scheduler with origin = process_batch_key.

        @param process_batch_key str
        """
        def process(event_msg, headers):
            self.end_time = get_ion_ts()

            # run the process_batch() method
            self.process_batch(start_time=self.start_time,
                               end_time=self.end_time)
            self.start_time = self.end_time

        # the subscriber for the batch processing
        # To trigger the batch notification, have the scheduler create a timer with event_origin = process_batch_key
        self.batch_processing_subscriber = EventSubscriber(
            event_type=OT.TimerEvent,
            origin=process_batch_key,
            callback=process)
        self.add_endpoint(self.batch_processing_subscriber)

    def create_notification(self, notification=None, user_id=''):
        """
        Persists the provided NotificationRequest object for the specified Origin id.
        Associate the Notification resource with the user_id string.
        returned id is the internal id by which NotificationRequest will be identified
        in the data store.

        @param notification        NotificationRequest
        @param user_id             str
        @retval notification_id    str
        @throws BadRequest    if object passed has _id or _rev attribute
        """
        if not user_id:
            raise BadRequest("User id not provided.")

        log.debug(
            "Create notification called for user_id: %s, and notification: %s",
            user_id, notification)

        #---------------------------------------------------------------------------------------------------
        # Persist Notification object as a resource if it has already not been persisted
        #---------------------------------------------------------------------------------------------------

        notification_id = None
        # if the notification has already been registered, simply use the old id
        existing_user_notifications = self.get_user_notifications(
            user_info_id=user_id)
        if existing_user_notifications:
            notification_id = self._notification_in_notifications(
                notification, existing_user_notifications)

        # since the notification has not been registered yet, register it and get the id

        temporal_bounds = TemporalBounds()
        temporal_bounds.start_datetime = get_ion_ts()
        temporal_bounds.end_datetime = ''

        if not notification_id:
            notification.temporal_bounds = temporal_bounds
            notification_id, rev = self.clients.resource_registry.create(
                notification)
        else:
            log.debug(
                "Notification object has already been created in resource registry before. No new id to be generated. notification_id: %s",
                notification_id)
            # Read the old notification already in the resource registry
            notification = self.clients.resource_registry.read(notification_id)

            # Update the temporal bounds of the old notification resource
            notification.temporal_bounds = temporal_bounds

            # Update the notification in the resource registry
            self.clients.resource_registry.update(notification)

            log.debug(
                "The temporal bounds for this resubscribed notification object with id: %s, is: %s",
                notification._id, notification.temporal_bounds)

        # Link the user and the notification with a hasNotification association
        assocs = self.clients.resource_registry.find_associations(
            subject=user_id,
            predicate=PRED.hasNotification,
            object=notification_id,
            id_only=True)

        if assocs:
            log.debug(
                "Got an already existing association: %s, between user_id: %s, and notification_id: %s",
                assocs, user_id, notification_id)
            return notification_id
        else:
            log.debug(
                "Creating association between user_id: %s, and notification_id: %s",
                user_id, notification_id)
            self.clients.resource_registry.create_association(
                user_id, PRED.hasNotification, notification_id)

        # read the registered notification request object because this has an _id and is more useful
        notification = self.clients.resource_registry.read(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.debug(
            "(create notification) Publishing ReloadUserInfoEvent for notification_id: %s",
            notification_id)

        self.event_publisher.publish_event(
            event_type=OT.ReloadUserInfoEvent,
            origin="UserNotificationService",
            description="A notification has been created.",
            notification_id=notification_id)

        return notification_id

    def update_notification(self, notification=None, user_id=''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        raise NotImplementedError(
            "This method needs to be worked out in terms of implementation")

#        #-------------------------------------------------------------------------------------------------------------------
#        # Get the old notification
#        #-------------------------------------------------------------------------------------------------------------------
#
#        old_notification = self.clients.resource_registry.read(notification._id)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the notifications dict
#        #-------------------------------------------------------------------------------------------------------------------
#
#
#        self._update_notification_in_notifications_dict(new_notification=notification,
#                                                        notifications=self.notifications)
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the registry
#        #-------------------------------------------------------------------------------------------------------------------
#        '''
#        Since one user should not be able to update the notification request resource without the knowledge of other users
#        who have subscribed to the same notification request, we do not update the resource in the resource registry
#        '''
#
##        self.clients.resource_registry.update(notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # reading up the notification object to make sure we have the newly registered notification request object
#        #-------------------------------------------------------------------------------------------------------------------
#
#        notification_id = notification._id
#        notification = self.clients.resource_registry.read(notification_id)
#
#        #------------------------------------------------------------------------------------
#        # Update the UserInfo object
#        #------------------------------------------------------------------------------------
#
#        user = self.update_user_info_object(user_id, notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
#        #-------------------------------------------------------------------------------------------------------------------
#        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")
#
#        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
#            origin="UserNotificationService",
#            description= "A notification has been updated.",
#            notification_id = notification_id
#        )

    def read_notification(self, notification_id=''):
        """Returns the NotificationRequest object for the specified notification id.
        Throws exception if id does not match any persisted NotificationRequest
        objects.

        @param notification_id    str
        @retval notification    NotificationRequest
        @throws NotFound    object with specified id does not exist
        """
        notification = self.clients.resource_registry.read(notification_id)

        return notification

    def delete_notification(self, notification_id=''):
        """For now, permanently deletes NotificationRequest object with the specified
        id. Throws exception if id does not match any persisted NotificationRequest.

        @param notification_id    str
        @throws NotFound    object with specified id does not exist
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Stop the event subscriber for the notification
        #-------------------------------------------------------------------------------------------------------------------
        notification_request = self.clients.resource_registry.read(
            notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the resource registry
        #-------------------------------------------------------------------------------------------------------------------

        notification_request.temporal_bounds.end_datetime = get_ion_ts()

        self.clients.resource_registry.update(notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Find users who are interested in the notification and update the notification in the list maintained by the UserInfo object
        #-------------------------------------------------------------------------------------------------------------------
        #        user_ids, _ = self.clients.resource_registry.find_subjects(RT.UserInfo, PRED.hasNotification, notification_id, True)
        #
        #        for user_id in user_ids:
        #            self.update_user_info_object(user_id, notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info(
            "(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s",
            notification_id)

        self.event_publisher.publish_event(
            event_type=OT.ReloadUserInfoEvent,
            origin="UserNotificationService",
            description="A notification has been deleted.",
            notification_id=notification_id)

#    def delete_notification_from_user_info(self, notification_id):
#        """
#        Helper method to delete the notification from the user_info dictionary
#
#        @param notification_id str
#        """
#
#        user_ids, assocs = self.clients.resource_registry.find_subjects(object=notification_id, predicate=PRED.hasNotification, id_only=True)
#
#        for assoc in assocs:
#            self.clients.resource_registry.delete_association(assoc)
#
#        for user_id in user_ids:
#
#            value = self.user_info[user_id]
#
#            for notif in value['notifications']:
#                if notification_id == notif._id:
#                    # remove the notification
#                    value['notifications'].remove(notif)
#
#        self.reverse_user_info = calculate_reverse_user_info(self.user_info)

    def find_events(self,
                    origin='',
                    type='',
                    min_datetime=0,
                    max_datetime=0,
                    limit=-1,
                    descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        event_tuples = []

        try:
            event_tuples = self.container.event_repository.find_events(
                event_type=type,
                origin=origin,
                start_ts=min_datetime,
                end_ts=max_datetime,
                limit=limit,
                descending=descending)
        except Exception as exc:
            log.warning(
                "The UNS find_events operation for event origin = %s and type = %s failed. Error message = %s",
                origin, type, exc.message)

        events = [item[2] for item in event_tuples]
        log.debug("(find_events) UNS found the following relevant events: %s",
                  events)

        return events

    #todo Uses Elastic Search. Later extend this to a larger search criteria
    def find_events_extended(self,
                             origin='',
                             type='',
                             min_time=0,
                             max_time=0,
                             limit=-1,
                             descending=False):
        """Uses Elastic Search. Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param type           str
        @param min_time   int seconds
        @param max_time   int seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """

        query = []

        if min_time and max_time:
            query.append(
                "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'"
                % (min_time, max_time))

        if origin:
            query.append('search "origin" is "%s" from "events_index"' %
                         origin)

        if type:
            query.append('search "type_" is "%s" from "events_index"' % type)

        search_string = ' and '.join(query)

        # get the list of ids corresponding to the events
        ret_vals = self.discovery.parse(search_string)
        if len(query) > 1:
            events = self.datastore.read_mult(ret_vals)
        else:
            events = [i['_source'] for i in ret_vals]

        log.debug(
            "(find_events_extended) Discovery search returned the following event ids: %s",
            ret_vals)

        log.debug(
            "(find_events_extended) UNS found the following relevant events: %s",
            events)

        if limit > 0:
            return events[:limit]

        #todo implement time ordering: ascending or descending

        return events

    def publish_event_object(self, event=None):
        """
        This service operation would publish the given event from an event object.

        @param event    !Event
        @retval event   !Event
        """
        event = self.event_publisher.publish_event_object(event_object=event)
        log.info(
            "The publish_event_object(event) method of UNS was used to publish the event: %s",
            event)

        return event

    def publish_event(self,
                      event_type='',
                      origin='',
                      origin_type='',
                      sub_type='',
                      description='',
                      event_attrs=None):
        """
        This service operation assembles a new Event object based on event_type 
        (e.g. via the pyon Event publisher) with optional additional attributes from a event_attrs
        dict of arbitrary attributes.
        
        
        @param event_type   str
        @param origin       str
        @param origin_type  str
        @param sub_type     str
        @param description  str
        @param event_attrs  dict
        @retval event       !Event
        """
        event_attrs = event_attrs or {}

        event = self.event_publisher.publish_event(event_type=event_type,
                                                   origin=origin,
                                                   origin_type=origin_type,
                                                   sub_type=sub_type,
                                                   description=description,
                                                   **event_attrs)
        log.info(
            "The publish_event() method of UNS was used to publish an event: %s",
            event)

        return event

    def get_recent_events(self, resource_id='', limit=100):
        """
        Get recent events for use in extended resource computed attribute
        @param resource_id str
        @param limit int
        @retval ComputedListValue with value list of 4-tuple with Event objects
        """

        now = get_ion_ts()
        events = self.find_events(origin=resource_id,
                                  limit=limit,
                                  max_datetime=now,
                                  descending=True)

        ret = IonObject(OT.ComputedEventListValue)
        if events:
            ret.value = events
            ret.computed_list = [
                get_event_computed_attributes(event) for event in events
            ]
            ret.status = ComputedValueAvailability.PROVIDED
        else:
            ret.status = ComputedValueAvailability.NOTAVAILABLE

        return ret

    def get_user_notifications(self, user_info_id=''):
        """
        Get the notification request objects that are subscribed to by the user

        @param user_info_id str

        @retval notifications list of NotificationRequest objects
        """
        notifications = []
        user_notif_req_objs, _ = self.clients.resource_registry.find_objects(
            subject=user_info_id,
            predicate=PRED.hasNotification,
            object_type=RT.NotificationRequest,
            id_only=False)

        log.debug("Got %s notifications, for the user: %s",
                  len(user_notif_req_objs), user_info_id)

        for notif in user_notif_req_objs:
            # do not include notifications that have expired
            if notif.temporal_bounds.end_datetime == '':
                notifications.append(notif)

        return notifications

    def create_worker(self, number_of_workers=1):
        """
        Creates notification workers

        @param number_of_workers int
        @retval pids list

        """

        pids = []

        for n in xrange(number_of_workers):

            process_definition = ProcessDefinition(
                name='notification_worker_%s' % n)

            process_definition.executable = {
                'module': 'ion.processes.data.transforms.notification_worker',
                'class': 'NotificationWorker'
            }
            process_definition_id = self.process_dispatcher.create_process_definition(
                process_definition=process_definition)

            # ------------------------------------------------------------------------------------
            # Process Spawning
            # ------------------------------------------------------------------------------------

            pid2 = self.process_dispatcher.create_process(
                process_definition_id)

            #@todo put in a configuration
            configuration = {}
            configuration['process'] = dict({
                'name':
                'notification_worker_%s' % n,
                'type':
                'simple',
                'queue_name':
                'notification_worker_queue'
            })

            pid = self.process_dispatcher.schedule_process(
                process_definition_id,
                configuration=configuration,
                process_id=pid2)

            pids.append(pid)

        return pids

    def process_batch(self, start_time='', end_time=''):
        """
        This method is launched when an process_batch event is received. The user info dictionary maintained
        by the User Notification Service is used to query the event repository for all events for a particular
        user that have occurred in a provided time interval, and then an email is sent to the user containing
        the digest of all the events.

        @param start_time int milliseconds
        @param end_time int milliseconds
        """
        self.smtp_client = setting_up_smtp_client()

        if end_time <= start_time:
            return

        for user_id, value in self.user_info.iteritems():

            notifications = self.get_user_notifications(user_info_id=user_id)
            notifications_disabled = value['notifications_disabled']
            notifications_daily_digest = value['notifications_daily_digest']

            # Ignore users who do NOT want batch notifications or who have disabled the delivery switch
            # However, if notification preferences have not been set for the user, use the default mechanism and do not bother
            if notifications_disabled or not notifications_daily_digest:
                continue

            events_for_message = []

            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (
                start_time, end_time)

            for notification in notifications:
                # If the notification request has expired, then do not use it in the search
                if notification.temporal_bounds.end_datetime:
                    continue

                if CFG_ELASTIC_SEARCH:
                    if notification.origin:
                        search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin
                    else:
                        search_origin = 'search "origin" is "*" from "events_index"'

                    if notification.origin_type:
                        search_origin_type = 'search "origin_type" is "%s" from "events_index"' % notification.origin_type
                    else:
                        search_origin_type = 'search "origin_type" is "*" from "events_index"'

                    if notification.event_type:
                        search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type
                    else:
                        search_event_type = 'search "type_" is "*" from "events_index"'

                    search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type

                    # get the list of ids corresponding to the events
                    log.debug('process_batch  search_string: %s',
                              search_string)
                    ret_vals = self.discovery.parse(search_string)

                    events_for_message.extend(
                        self.datastore.read_mult(ret_vals))

                else:
                    # Adding a branch
                    event_tuples = self.container.event_repository.find_events(
                        origin=notification.origin,
                        event_type=notification.event_type,
                        start_ts=start_time,
                        end_ts=end_time)
                    events = [item[2] for item in event_tuples]

                    events_for_message.extend(events)

            log.debug("Found following events of interest to user, %s: %s",
                      user_id, events_for_message)

            # send a notification email to each user using a _send_email() method
            if events_for_message:
                self.format_and_send_email(
                    events_for_message=events_for_message,
                    user_id=user_id,
                    smtp_client=self.smtp_client)

        self.smtp_client.quit()

    def format_and_send_email(self,
                              events_for_message=None,
                              user_id=None,
                              smtp_client=None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """
        message = str(events_for_message)
        log.debug(
            "The user, %s, will get the following events in his batch notification email: %s",
            user_id, message)

        msg = convert_events_to_email_message(events_for_message,
                                              self.clients.resource_registry)
        msg["Subject"] = "(SysName: " + get_sys_name() + ") ION event "
        msg["To"] = self.user_info[user_id]['user_contact'].email
        self.send_batch_email(msg, smtp_client)

    def send_batch_email(self, msg=None, smtp_client=None):
        """
        Send the email

        @param msg MIMEText object of email message
        @param smtp_client object
        """

        if msg is None: msg = {}
        for f in ["Subject", "To"]:
            if not f in msg: raise BadRequest("'%s' not in msg %s" % (f, msg))

        msg_subject = msg["Subject"]
        msg_recipient = msg["To"]

        msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS
        log.debug("UNS sending batch (digest) email from %s to %s",
                  self.ION_NOTIFICATION_EMAIL_ADDRESS, msg_recipient)

        smtp_sender = CFG.get_safe('server.smtp.sender')

        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())

    def update_user_info_object(self, user_id, new_notification):
        """
        Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification

        @param user_id str
        @param new_notification NotificationRequest
        """

        #this is not necessary if notifiactions are not stored in the userinfo object
        raise NotImplementedError(
            "This method is not necessary because Notifications are not stored in the userinfo object"
        )

        #------------------------------------------------------------------------------------
        # read the user
        #------------------------------------------------------------------------------------

#        user = self.clients.resource_registry.read(user_id)
#
#        if not user:
#            raise BadRequest("No user with the provided user_id: %s" % user_id)
#
#        for item in user.variables:
#            if type(item) is dict and item.has_key('name') and item['name'] == 'notifications':
#                for notif in item['value']:
#                    if notif._id == new_notification._id:
#                        log.debug("came here for updating notification")
#                        notifications = item['value']
#                        notifications.remove(notif)
#                        notifications.append(new_notification)
#                break
#            else:
#                log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)
#
#
#        #------------------------------------------------------------------------------------
#        # update the resource registry
#        #------------------------------------------------------------------------------------
#
#        log.debug("user.variables::: %s", user.variables)
#
#        self.clients.resource_registry.update(user)
#
#        return user

    def get_subscriptions(self,
                          resource_id='',
                          user_id='',
                          include_nonactive=False):
        """
        @param resource_id  a resource id (or other origin) that is the origin of events for notifications
        @param user_id  a UserInfo ID that owns the NotificationRequest
        @param include_nonactive  if False, filter to active NotificationRequest only
        Return all NotificationRequest resources where origin is given resource_id.
        """
        notif_reqs, _ = self.clients.resource_registry.find_resources_ext(
            restype=RT.NotificationRequest,
            attr_name="origin",
            attr_value=resource_id,
            id_only=False)

        log.debug(
            "Got %s active and past NotificationRequests for resource/origin %s",
            len(notif_reqs), resource_id)

        if not include_nonactive:
            notif_reqs = [
                nr for nr in notif_reqs
                if nr.temporal_bounds.end_datetime == ''
            ]
            log.debug("Filtered to %s active NotificationRequests",
                      len(notif_reqs))

        if user_id:
            # Get all NotificationRequests (ID) that are associated to given UserInfo_id
            user_notif_req_ids, _ = self.clients.resource_registry.find_objects(
                subject=user_id,
                predicate=PRED.hasNotification,
                object_type=RT.NotificationRequest,
                id_only=True)

            notif_reqs = [
                nr for nr in notif_reqs if nr._id in user_notif_req_ids
            ]
            log.debug(
                "Filtered to %s NotificationRequests associated to user %s",
                len(notif_reqs), user_id)

        return notif_reqs

    def get_subscriptions_attribute(self,
                                    resource_id='',
                                    user_id='',
                                    include_nonactive=False):
        retval = self.get_subscriptions(resource_id=resource_id,
                                        user_id=user_id,
                                        include_nonactive=include_nonactive)
        container = ComputedListValue(value=retval)
        return container

    def _notification_in_notifications(self,
                                       notification=None,
                                       notifications=None):

        for notif in notifications:
            if notif.name == notification.name and \
            notif.origin == notification.origin and \
            notif.origin_type == notification.origin_type and \
            notif.event_type == notification.event_type:
                return notif._id
        return None

    def load_user_info(self):
        """
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        """
        users, _ = self.clients.resource_registry.find_resources(
            restype=RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications = []
            notifications_disabled = False
            notifications_daily_digest = False

            #retrieve all the active notifications assoc to this user
            notifications = self.get_user_notifications(user_info_id=user)
            log.debug('load_user_info notifications:   %s', notifications)

            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']

                else:
                    log.warning(
                        'Invalid variables attribute on UserInfo instance. UserInfo: %s',
                        user)

            user_info[user._id] = {
                'user_contact': user.contact,
                'notifications': notifications,
                'notifications_daily_digest': notifications_daily_digest,
                'notifications_disabled': notifications_disabled
            }

        return user_info

    ##
    ##
    ##  GOVERNANCE FUNCTIONS
    ##
    ##

    def check_subscription_policy(self, process, message, headers):

        try:
            gov_values = GovernanceHeaderValues(headers=headers,
                                                process=process,
                                                resource_id_required=False)

        except Inconsistent, ex:
            return False, ex.message

        if gov_values.op == 'delete_notification':
            return True, ''

        notification = message['notification']
        resource_id = notification.origin

        if notification.origin_type == RT.Org:
            org = self.clients.resource_registry.read(resource_id)
            if (has_org_role(gov_values.actor_roles, org.org_governance_name,
                             [ORG_MEMBER_ROLE])):
                return True, ''
        else:
            orgs, _ = self.clients.resource_registry.find_subjects(
                subject_type=RT.Org,
                predicate=PRED.hasResource,
                object=resource_id,
                id_only=False)
            for org in orgs:
                if (has_org_role(gov_values.actor_roles,
                                 org.org_governance_name, [ORG_MEMBER_ROLE])):
                    return True, ''

        return False, '%s(%s) has been denied since the user is not a member in any org to which the resource id %s belongs ' % (
            process.name, gov_values.op, resource_id)
示例#26
0
class TestWorkflowManagementIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubclient = PubsubManagementServiceClient(
            node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.workflowclient = WorkflowManagementServiceClient(
            node=self.container.node)
        self.process_dispatcher = ProcessDispatcherServiceClient(
            node=self.container.node)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def _create_ctd_input_stream_and_data_product(
            self, data_product_name='ctd_parsed'):

        cc = self.container
        assertions = self.assertTrue

        #-------------------------------
        # Create CTD Parsed as the initial data product
        #-------------------------------
        # create a stream definition for the data from the ctd simulator
        ctd_stream_def = SBE37_CDM_stream_definition()
        ctd_stream_def_id = self.pubsubclient.create_stream_definition(
            container=self.ctd_stream_def, name='Simulated CTD data')

        log.debug('Creating new CDM data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
                           name=data_product_name,
                           description='ctd stream test')
        try:
            ctd_parsed_data_product_id = self.dataproductclient.create_data_product(
                dp_obj, ctd_stream_def_id)
        except Exception as ex:
            self.fail("failed to create new data product: %s" % ex)

        log.debug('new ctd_parsed_data_product_id = ',
                  ctd_parsed_data_product_id)

        #Only ever need one device for testing purposes.
        instDevice_obj, _ = self.rrclient.find_resources(
            restype=RT.InstrumentDevice, name='SBE37IMDevice')
        if instDevice_obj:
            instDevice_id = instDevice_obj[0]._id
        else:
            instDevice_obj = IonObject(RT.InstrumentDevice,
                                       name='SBE37IMDevice',
                                       description="SBE37IMDevice",
                                       serial_number="12345")
            instDevice_id = self.imsclient.create_instrument_device(
                instrument_device=instDevice_obj)

        self.damsclient.assign_data_product(
            input_resource_id=instDevice_id,
            data_product_id=ctd_parsed_data_product_id)

        self.dataproductclient.activate_data_product_persistence(
            data_product_id=ctd_parsed_data_product_id,
            persist_data=False,
            persist_metadata=False)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product_id,
                                                   PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        ctd_stream_id = stream_ids[0]

        return ctd_stream_id, ctd_parsed_data_product_id

    def _start_simple_input_stream_process(self, ctd_stream_id):
        return self._start_input_stream_process(ctd_stream_id)

    def _start_sinusoidal_input_stream_process(self, ctd_stream_id):
        return self._start_input_stream_process(
            ctd_stream_id, 'ion.processes.data.sinusoidal_stream_publisher',
            'SinusoidalCtdPublisher')

    def _start_input_stream_process(
            self,
            ctd_stream_id,
            module='ion.processes.data.ctd_stream_publisher',
            class_name='SimpleCtdPublisher'):

        ###
        ### Start the process for producing the CTD data
        ###
        # process definition for the ctd simulator...
        producer_definition = ProcessDefinition()
        producer_definition.executable = {
            'module': module,
            'class': class_name
        }

        ctd_sim_procdef_id = self.process_dispatcher.create_process_definition(
            process_definition=producer_definition)

        # Start the ctd simulator to produce some data
        configuration = {
            'process': {
                'stream_id': ctd_stream_id,
            }
        }
        ctd_sim_pid = self.process_dispatcher.schedule_process(
            process_definition_id=ctd_sim_procdef_id,
            configuration=configuration)

        return ctd_sim_pid

    def _start_output_stream_listener(self,
                                      data_product_stream_ids,
                                      message_count_per_stream=10):

        cc = self.container
        assertions = self.assertTrue

        ###
        ### Make a subscriber in the test to listen for transformed data
        ###
        salinity_subscription_id = self.pubsubclient.create_subscription(
            query=StreamQuery(data_product_stream_ids),
            exchange_name='workflow_test',
            name="test workflow transformations",
        )

        pid = cc.spawn_process(name='dummy_process_for_test',
                               module='pyon.ion.process',
                               cls='SimpleProcess',
                               config={})
        dummy_process = cc.proc_manager.procs[pid]

        subscriber_registrar = StreamSubscriberRegistrar(process=dummy_process,
                                                         node=cc.node)

        result = gevent.event.AsyncResult()
        results = []

        def message_received(message, headers):
            # Heads
            log.warn(' data received!')
            results.append(message)
            if len(results) >= len(
                    data_product_stream_ids
            ) * message_count_per_stream:  #Only wait for so many messages - per stream
                result.set(True)

        subscriber = subscriber_registrar.create_subscriber(
            exchange_name='workflow_test', callback=message_received)
        subscriber.start()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id)

        # Assert that we have received data
        assertions(result.get(timeout=30))

        self.pubsubclient.deactivate_subscription(
            subscription_id=salinity_subscription_id)

        subscriber.stop()

        return results

    def _validate_messages(self, results):

        cc = self.container
        assertions = self.assertTrue

        first_salinity_values = None

        for message in results:

            try:
                psd = PointSupplementStreamParser(
                    stream_definition=self.ctd_stream_def,
                    stream_granule=message)
                temp = psd.get_values('temperature')
                log.info(psd.list_field_names())
            except KeyError as ke:
                temp = None

            if temp is not None:
                assertions(isinstance(temp, numpy.ndarray))

                log.info('temperature=' + str(numpy.nanmin(temp)))

                first_salinity_values = None

            else:
                psd = PointSupplementStreamParser(
                    stream_definition=SalinityTransform.outgoing_stream_def,
                    stream_granule=message)
                log.info(psd.list_field_names())

                # Test the handy info method for the names of fields in the stream def
                assertions('salinity' in psd.list_field_names())

                # you have to know the name of the coverage in stream def
                salinity = psd.get_values('salinity')
                log.info('salinity=' + str(numpy.nanmin(salinity)))

                assertions(isinstance(salinity, numpy.ndarray))

                assertions(numpy.nanmin(salinity) >
                           0.0)  # salinity should always be greater than 0

                if first_salinity_values is None:
                    first_salinity_values = salinity.tolist()
                else:
                    second_salinity_values = salinity.tolist()
                    assertions(
                        len(first_salinity_values) == len(
                            second_salinity_values))
                    for idx in range(0, len(first_salinity_values)):
                        assertions(first_salinity_values[idx] *
                                   2.0 == second_salinity_values[idx])

    def _create_salinity_data_process_definition(self):

        # Salinity: Data Process Definition

        #First look to see if it exists and if not, then create it
        dpd, _ = self.rrclient.find_resources(restype=RT.DataProcessDefinition,
                                              name='ctd_salinity')
        if len(dpd) > 0:
            return dpd[0]

        log.debug("Create data process definition SalinityTransform")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_salinity',
            description='create a salinity data product',
            module='ion.processes.data.transforms.ctd.ctd_L2_salinity',
            class_name='SalinityTransform',
            process_source='SalinityTransform source code here...')
        try:
            ctd_L2_salinity_dprocdef_id = self.dataprocessclient.create_data_process_definition(
                dpd_obj)
        except Excpetion as ex:
            self.fail(
                "failed to create new SalinityTransform data process definition: %s"
                % ex)

        # create a stream definition for the data from the salinity Transform
        sal_stream_def_id = self.pubsubclient.create_stream_definition(
            container=SalinityTransform.outgoing_stream_def, name='Salinity')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            sal_stream_def_id, ctd_L2_salinity_dprocdef_id)

        return ctd_L2_salinity_dprocdef_id

    def _create_salinity_doubler_data_process_definition(self):

        #First look to see if it exists and if not, then create it
        dpd, _ = self.rrclient.find_resources(restype=RT.DataProcessDefinition,
                                              name='salinity_doubler')
        if len(dpd) > 0:
            return dpd[0]

        # Salinity Doubler: Data Process Definition
        log.debug("Create data process definition SalinityDoublerTransform")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='salinity_doubler',
            description='create a salinity doubler data product',
            module='ion.processes.data.transforms.example_double_salinity',
            class_name='SalinityDoubler',
            process_source='SalinityDoubler source code here...')
        try:
            salinity_doubler_dprocdef_id = self.dataprocessclient.create_data_process_definition(
                dpd_obj)
        except Exception as ex:
            self.fail(
                "failed to create new SalinityDoubler data process definition: %s"
                % ex)

        # create a stream definition for the data from the salinity Transform
        salinity_double_stream_def_id = self.pubsubclient.create_stream_definition(
            container=SalinityDoubler.outgoing_stream_def,
            name='SalinityDoubler')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            salinity_double_stream_def_id, salinity_doubler_dprocdef_id)

        return salinity_doubler_dprocdef_id

    def create_transform_process(self, data_process_definition_id,
                                 data_process_input_dp_id):

        data_process_definition = self.rrclient.read(
            data_process_definition_id)

        # Find the link between the output Stream Definition resource and the Data Process Definition resource
        stream_ids, _ = self.rrclient.find_objects(data_process_definition._id,
                                                   PRED.hasStreamDefinition,
                                                   RT.StreamDefinition,
                                                   id_only=True)
        if not stream_ids:
            raise Inconsistent(
                "The data process definition %s is missing an association to an output stream definition"
                % data_process_definition._id)
        process_output_stream_def_id = stream_ids[0]

        #Concatenate the name of the workflow and data process definition for the name of the data product output
        data_process_name = data_process_definition.name

        # Create the output data product of the transform
        transform_dp_obj = IonObject(
            RT.DataProduct,
            name=data_process_name,
            description=data_process_definition.description)
        transform_dp_id = self.dataproductclient.create_data_product(
            transform_dp_obj, process_output_stream_def_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=transform_dp_id,
            persist_data=True,
            persist_metadata=True)

        #last one out of the for loop is the output product id
        output_data_product_id = transform_dp_id

        # Create the  transform data process
        log.debug("create data_process and start it")
        data_process_id = self.dataprocessclient.create_data_process(
            data_process_definition._id, [data_process_input_dp_id],
            {'output': transform_dp_id})
        self.dataprocessclient.activate_data_process(data_process_id)

        #Find the id of the output data stream
        stream_ids, _ = self.rrclient.find_objects(transform_dp_id,
                                                   PRED.hasStream, None, True)
        if not stream_ids:
            raise Inconsistent(
                "The data process %s is missing an association to an output stream"
                % data_process_id)

        return data_process_id, output_data_product_id

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    #@unittest.skip("Skipping for debugging ")
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self._create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self._create_salinity_data_process_definition(
        )

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(
            ctd_L2_salinity_dprocdef_id, ctd_parsed_data_product_id)

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(
            ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)

        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self._create_salinity_doubler_data_process_definition(
        )

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(
            salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id)

        stream_ids, _ = self.rrclient.find_objects(
            salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)

        #Start the input stream process
        ctd_sim_pid = self._start_simple_input_stream_process(ctd_stream_id)

        #Start te output stream listener to monitor and verify messages
        results = self._start_output_stream_listener(data_product_stream_ids)

        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(
            salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(
            l2_salinity_all_data_process_id)

        # stop the flow parse the messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Validate the data from each of the messages along the way
        self._validate_messages(results)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    #@unittest.skip("Skipping for debugging ")
    def test_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Salinity_Test_Workflow',
            description='tests a workflow of multiple transform data processes'
        )

        workflow_data_product_name = 'TEST-Workflow_Output_Product'  #Set a specific output product name

        #Add a transformation process definition
        ctd_L2_salinity_dprocdef_id = self._create_salinity_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=ctd_L2_salinity_dprocdef_id,
            persist_process_output_data=False
        )  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Add a transformation process definition
        salinity_doubler_dprocdef_id = self._create_salinity_doubler_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=salinity_doubler_dprocdef_id,
            output_data_product_name=workflow_data_product_name)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id,
                                               PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self._create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Verify the output data product name matches what was specified in the workflow definition
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(workflow_product.name == workflow_data_product_name)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the input stream process
        ctd_sim_pid = self._start_simple_input_stream_process(ctd_stream_id)

        #Start the output stream listener to monitor and verify messages
        results = self._start_output_stream_listener(data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id, False, timeout=15)  # Should test true at some point

        #Make sure the Workflow object was removed
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        # stop the flow parse the messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Validate the data from each of the messages along the way
        self._validate_messages(results)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids, _ = self.rrclient.find_resources(
            restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0)

    def _create_google_dt_data_process_definition(self):

        #First look to see if it exists and if not, then create it
        dpd, _ = self.rrclient.find_resources(restype=RT.DataProcessDefinition,
                                              name='google_dt_transform')
        if len(dpd) > 0:
            return dpd[0]

        # Data Process Definition
        log.debug("Create data process definition GoogleDtTransform")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='google_dt_transform',
            description='Convert data streams to Google DataTables',
            module='ion.processes.data.transforms.viz.google_dt',
            class_name='VizTransformGoogleDT',
            process_source='VizTransformGoogleDT source code here...')
        try:
            procdef_id = self.dataprocessclient.create_data_process_definition(
                dpd_obj)
        except Exception as ex:
            self.fail(
                "failed to create new VizTransformGoogleDT data process definition: %s"
                % ex)

        # create a stream definition for the data from the
        stream_def_id = self.pubsubclient.create_stream_definition(
            container=VizTransformGoogleDT.outgoing_stream_def,
            name='VizTransformGoogleDT')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            stream_def_id, procdef_id)

        return procdef_id

    def _validate_google_dt_results(self, results_stream_def, results):

        cc = self.container
        assertions = self.assertTrue

        for g in results:

            if isinstance(g, Granule):

                tx = TaxyTool.load_from_granule(g)
                rdt = RecordDictionaryTool.load_from_granule(g)
                #log.warn(tx.pretty_print())
                #log.warn(rdt.pretty_print())

                gdt_component = rdt['google_dt_components'][0]

                assertions(
                    gdt_component['viz_product_type'] == 'google_realtime_dt')
                gdt_description = gdt_component['data_table_description']
                gdt_content = gdt_component['data_table_content']

                assertions(gdt_description[0][0] == 'time')
                assertions(len(gdt_description) > 1)
                assertions(len(gdt_content) >= 0)

        return

    def _create_mpl_graphs_data_process_definition(self):

        #First look to see if it exists and if not, then create it
        dpd, _ = self.rrclient.find_resources(restype=RT.DataProcessDefinition,
                                              name='mpl_graphs_transform')
        if len(dpd) > 0:
            return dpd[0]

        #Data Process Definition
        log.debug("Create data process definition MatplotlibGraphsTransform")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='mpl_graphs_transform',
            description='Convert data streams to Matplotlib graphs',
            module='ion.processes.data.transforms.viz.matplotlib_graphs',
            class_name='VizTransformMatplotlibGraphs',
            process_source='VizTransformMatplotlibGraphs source code here...')
        try:
            procdef_id = self.dataprocessclient.create_data_process_definition(
                dpd_obj)
        except Exception as ex:
            self.fail(
                "failed to create new VizTransformMatplotlibGraphs data process definition: %s"
                % ex)

        # create a stream definition for the data
        stream_def_id = self.pubsubclient.create_stream_definition(
            container=VizTransformMatplotlibGraphs.outgoing_stream_def,
            name='VizTransformMatplotlibGraphs')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            stream_def_id, procdef_id)

        return procdef_id

    def _validate_mpl_graphs_results(self, results_stream_def, results):

        cc = self.container
        assertions = self.assertTrue

        for g in results:
            if isinstance(g, Granule):

                tx = TaxyTool.load_from_granule(g)
                rdt = RecordDictionaryTool.load_from_granule(g)
                #log.warn(tx.pretty_print())
                #log.warn(rdt.pretty_print())

                graphs = rdt['matplotlib_graphs']

                for graph in graphs:
                    assertions(
                        graph['viz_product_type'] == 'matplotlib_graphs')
                    # check to see if the list (numpy array) contians actual images
                    assertions(
                        imghdr.what(graph['image_name'], graph['image_obj']) ==
                        'png')

        return

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    #unittest.skip("Skipping for debugging ")
    def test_google_dt_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='GoogleDT_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to Google DT')

        #Add a transformation process definition
        google_dt_procdef_id = self._create_google_dt_data_process_definition()
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=google_dt_procdef_id)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self._create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the input stream process
        ctd_sim_pid = self._start_simple_input_stream_process(ctd_stream_id)

        #Start the output stream listener to monitor and verify messages
        results = self._start_output_stream_listener(data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id, False)  # Should test true at some point

        # stop the flow parse the messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Validate the data from each of the messages along the way
        self._validate_google_dt_results(
            VizTransformGoogleDT.outgoing_stream_def, results)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids, _ = self.rrclient.find_resources(
            restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    #@unittest.skip("Skipping for debugging ")
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Mpl_Graphs_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to Matplotlib graphs'
        )

        #Add a transformation process definition
        mpl_graphs_procdef_id = self._create_mpl_graphs_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=mpl_graphs_procdef_id)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self._create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the input stream process
        ctd_sim_pid = self._start_sinusoidal_input_stream_process(
            ctd_stream_id)

        #Start the output stream listener to monitor and verify messages
        results = self._start_output_stream_listener(data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id, False)  # Should test true at some point

        # stop the flow parse the messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Validate the data from each of the messages along the way
        self._validate_mpl_graphs_results(
            VizTransformGoogleDT.outgoing_stream_def, results)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids, _ = self.rrclient.find_resources(
            restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    #@unittest.skip("Skipping for debugging ")
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Multiple_Test_Workflow',
            description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        google_dt_procdef_id = self._create_google_dt_data_process_definition()
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=google_dt_procdef_id)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self._create_ctd_input_stream_and_data_product(
            'ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id1)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self._create_ctd_input_stream_and_data_product(
            'ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the first workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id2)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2)

        #Start the first input stream process
        ctd_sim_pid1 = self._start_sinusoidal_input_stream_process(
            ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self._start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self._start_output_stream_listener(
            data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid1
        )  # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id1, False)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id2, False)  # Should test true at some point

        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids, _ = self.rrclient.find_resources(
            restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0)

        aid_list = self.rrclient.find_associations(
            workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aid_list) == 0)
示例#27
0
class HighAvailabilityAgentSensorPolicyTest(IonIntegrationTestCase):

    def _start_webserver(self, port=None):
        """ Start a webserver for testing code download
        Note: tries really hard to get a port, and if it can't use
        the suggested port, randomly picks another, and returns it
        """
        def log_message(self, format, *args):
            #swallow log massages
            pass

        class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
            server_version = 'test_server'
            extensions_map = ''

            def do_GET(self):
                self.send_response(200)
                self.send_header("Content-type", "text/plain")
                self.send_header("Content-Length", len(self.server.response))
                self.end_headers()
                self.wfile.write(self.server.response)

        class Server(HTTPServer):

            response = ''

            def serve_forever(self):
                self._serving = 1
                while self._serving:
                    self.handle_request()

            def stop(self):
                self._serving = 0

        if port is None:
            port = 8008
        Handler = TestRequestHandler
        Handler.log_message = log_message

        for i in range(0, 100):
            try:
                self._webserver = Server(("localhost", port), Handler)
            except socket.error:
                print "port %s is in use, picking another" % port
                port = randint(8000, 10000)
                continue
            else:
                break

        self._web_glet = gevent.spawn(self._webserver.serve_forever)
        return port

    def _stop_webserver(self):
        if self._webserver is not None:
            self._webserver.stop()
            gevent.sleep(2)
            self._web_glet.kill()

    def await_ha_state(self, want_state, timeout=20):

        for i in range(0, timeout):
            try:
                status = self.haa_client.status().result
                if status == want_state:
                    return
                else:
                    procs = self.get_running_procs()
                    num_procs = len(procs)
                    log.debug("assert wants state %s, got state %s, with %s procs" % (want_state,status, num_procs))
            except Exception:
                log.exception("Problem getting HA status, trying again...")
                gevent.sleep(1)

        raise Exception("Took more than %s to get to ha state %s" % (timeout, want_state))

    @needs_epu
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')
        self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher")

        self.process_definition_id = uuid4().hex
        self.process_definition = ProcessDefinition(name='test', executable={
                'module': 'ion.agents.cei.test.test_haagent',
                'class': 'TestProcess'
        })

        self.pd_cli.create_process_definition(self.process_definition,
                self.process_definition_id)

        http_port = 8919
        http_port = self._start_webserver(port=http_port)

        self.resource_id = "haagent_4567"
        self._haa_name = "high_availability_agent"
        self._haa_config = {
            'server': {
                'trafficsentinel': {
                    'host': 'localhost',
                    'port': http_port,
                    'protocol': 'http',
                    'username': '******',
                    'password': '******'
                }
            },
            'highavailability': {
                'policy': {
                    'interval': 1,
                    'name': 'sensor',
                    'parameters': {
                        'metric': 'app_attributes:ml',
                        'sample_period': 600,
                        'sample_function': 'Average',
                        'cooldown_period': 5,
                        'scale_up_threshold': 2.0,
                        'scale_up_n_processes': 1,
                        'scale_down_threshold': 1.0,
                        'scale_down_n_processes': 1,
                        'maximum_processes': 5,
                        'minimum_processes': 1,
                    }
                },
                'process_definition_id': self.process_definition_id,
                "process_dispatchers": [
                    'process_dispatcher'
                ]
            },
            'agent': {'resource_id': self.resource_id},
        }

        self._base_procs = self.pd_cli.list_processes()

        self.waiter = ProcessStateWaiter()
        self.waiter.start()

        self.container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
        self._haa_pid = self.container_client.spawn_process(name=self._haa_name,
            module="ion.agents.cei.high_availability_agent",
            cls="HighAvailabilityAgent", config=self._haa_config)

        # Start a resource agent client to talk with the instrument agent.
        self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess())
        log.info('Got haa client %s.', str(self._haa_pyon_client))

        self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)

    def tearDown(self):
        new_policy = { 'metric': 'app_attributes:ml',
                        'sample_period': 600,
                        'sample_function': 'Average',
                        'cooldown_period': 0,
                        'scale_up_threshold': 2.0,
                        'scale_up_n_processes': 1,
                        'scale_down_threshold': 1.0,
                        'scale_down_n_processes': 1,
                        'maximum_processes': 0,
                        'minimum_processes': 0,
                    }
        self.haa_client.reconfigure_policy(new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)
        self.assertEqual(len(self.get_running_procs()), 0)

        self.waiter.stop()
        self.container.terminate_process(self._haa_pid)
        self._stop_webserver()
        self._stop_container()

    def get_running_procs(self):
        """returns a normalized set of running procs (removes the ones that
        were there at setup time)
        """

        base = self._base_procs
        base_pids = [proc.process_id for proc in base]
        current = self.pd_cli.list_processes()
        current_pids = [proc.process_id for proc in current]
        print "filtering base procs %s from %s" % (base_pids, current_pids)
        normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING]
        return normal

    def _get_managed_upids(self):
        result = self.haa_client.dump().result
        upids = result['managed_upids']
        return upids

    def _set_response(self, response):
        self._webserver.response = response

    def test_sensor_policy(self):
        status = self.haa_client.status().result
        # Ensure HA hasn't already failed
        assert status in ('PENDING', 'READY', 'STEADY')

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 1)

        self.await_ha_state('STEADY')

        # Set ml for each proc such that we scale up
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "pid=%s&ml=5\n" % upid
        self._set_response(response)

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 2)

        # Set ml so we stay steady
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "pid=%s&ml=1.5\n" % upid
        self._set_response(response)

        self.assertEqual(len(self.get_running_procs()), 2)

        self.await_ha_state('STEADY')

        # Set ml so we scale down
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "pid=%s&ml=0.5\n" % upid
        self._set_response(response)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)

        self.assertEqual(len(self.get_running_procs()), 1)

        self.await_ha_state('STEADY')
class TestCTDTransformsIntegration(IonIntegrationTestCase):
    pdict_id = None

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()
        #container = Container()
        #print 'starting container'
        #container.start()
        #print 'started container

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        print 'started services'

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubclient = PubsubManagementServiceClient(
            node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataset_management = self.datasetclient

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_instrument_model(self):

        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)

        return instModel_id

    def _create_instrument_agent(self, instModel_id):

        raw_config = StreamConfiguration(
            stream_name='raw', parameter_dictionary_name='ctd_raw_param_dict')
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict')

        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=DRV_URI_GOOD,
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        return instAgent_id

    def _create_instrument_device(self, instModel_id):

        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")

        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s    (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        return instDevice_id

    def _create_instrument_agent_instance(self, instAgent_id, instDevice_id):

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config)

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        return instAgentInstance_id

    def _create_param_dicts(self):
        tdom, sdom = time_series_domain()

        self.sdom = sdom.dump()
        self.tdom = tdom.dump()

        self.pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

    def _create_input_data_products(
        self,
        ctd_stream_def_id,
        instDevice_id,
    ):

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=self.tdom,
                           spatial_domain=self.sdom)

        ctd_parsed_data_product = self.dataproductclient.create_data_product(
            dp_obj, ctd_stream_def_id)

        self.damsclient.assign_data_product(
            input_resource_id=instDevice_id,
            data_product_id=ctd_parsed_data_product)

        self.dataproductclient.activate_data_product_persistence(
            data_product_id=ctd_parsed_data_product)

        #---------------------------------------------------------------------------
        # Retrieve the id of the OUTPUT stream from the out Data Product
        #---------------------------------------------------------------------------

        stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product,
                                                   PRED.hasStream, None, True)

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        #---------------------------------------------------------------------------
        # Create CTD Raw as the second data product
        #---------------------------------------------------------------------------
        if not self.pdict_id:
            self._create_param_dicts()
        raw_stream_def_id = self.pubsubclient.create_stream_definition(
            name='SBE37_RAW', parameter_dictionary_id=self.pdict_id)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=self.tdom,
                           spatial_domain=self.sdom)

        ctd_raw_data_product = self.dataproductclient.create_data_product(
            dp_obj, raw_stream_def_id)

        self.damsclient.assign_data_product(
            input_resource_id=instDevice_id,
            data_product_id=ctd_raw_data_product)

        self.dataproductclient.activate_data_product_persistence(
            data_product_id=ctd_raw_data_product)

        #---------------------------------------------------------------------------
        # Retrieve the id of the OUTPUT stream from the out Data Product
        #---------------------------------------------------------------------------

        stream_ids, _ = self.rrclient.find_objects(ctd_raw_data_product,
                                                   PRED.hasStream, None, True)

        #---------------------------------------------------------------------------
        # Retrieve the id of the OUTPUT stream from the out Data Product
        #---------------------------------------------------------------------------

        stream_ids, _ = self.rrclient.find_objects(ctd_raw_data_product,
                                                   PRED.hasStream, None, True)
        print 'Data product streams2 = ', stream_ids

        return ctd_parsed_data_product

    def _create_data_process_definitions(self):

        #-------------------------------------------------------------------------------------
        # L0 Conductivity - Temperature - Pressure: Data Process Definition
        #-------------------------------------------------------------------------------------

        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L0_all',
            description='transform ctd package into three separate L0 streams',
            module='ion.processes.data.transforms.ctd.ctd_L0_all',
            class_name='ctd_L0_all')
        self.ctd_L0_all_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        #-------------------------------------------------------------------------------------
        # L1 Conductivity: Data Process Definition
        #-------------------------------------------------------------------------------------
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L1_conductivity',
            description='create the L1 conductivity data product',
            module='ion.processes.data.transforms.ctd.ctd_L1_conductivity',
            class_name='CTDL1ConductivityTransform')
        self.ctd_L1_conductivity_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        #-------------------------------------------------------------------------------------
        # L1 Pressure: Data Process Definition
        #-------------------------------------------------------------------------------------
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L1_pressure',
            description='create the L1 pressure data product',
            module='ion.processes.data.transforms.ctd.ctd_L1_pressure',
            class_name='CTDL1PressureTransform')
        self.ctd_L1_pressure_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        #-------------------------------------------------------------------------------------
        # L1 Temperature: Data Process Definition
        #-------------------------------------------------------------------------------------
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L1_temperature',
            description='create the L1 temperature data product',
            module='ion.processes.data.transforms.ctd.ctd_L1_temperature',
            class_name='CTDL1TemperatureTransform')
        self.ctd_L1_temperature_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        #-------------------------------------------------------------------------------------
        # L2 Salinity: Data Process Definition
        #-------------------------------------------------------------------------------------
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L2_salinity',
            description='create the L1 temperature data product',
            module='ion.processes.data.transforms.ctd.ctd_L2_salinity',
            class_name='SalinityTransform')
        self.ctd_L2_salinity_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        #-------------------------------------------------------------------------------------
        # L2 Density: Data Process Definition
        #-------------------------------------------------------------------------------------
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name='ctd_L2_density',
            description='create the L1 temperature data product',
            module='ion.processes.data.transforms.ctd.ctd_L2_density',
            class_name='DensityTransform')
        self.ctd_L2_density_dprocdef_id = self.dataprocessclient.create_data_process_definition(
            dpd_obj)

        return self.ctd_L0_all_dprocdef_id, self.ctd_L1_conductivity_dprocdef_id,\
               self.ctd_L1_pressure_dprocdef_id,self.ctd_L1_temperature_dprocdef_id, \
                self.ctd_L2_salinity_dprocdef_id, self.ctd_L2_density_dprocdef_id

    def _create_stream_definitions(self):
        if not self.pdict_id:
            self._create_param_dicts()

        outgoing_stream_l0_conductivity_id = self.pubsubclient.create_stream_definition(
            name='L0_Conductivity', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_conductivity_id,
            self.ctd_L0_all_dprocdef_id,
            binding='conductivity')

        outgoing_stream_l0_pressure_id = self.pubsubclient.create_stream_definition(
            name='L0_Pressure', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_pressure_id,
            self.ctd_L0_all_dprocdef_id,
            binding='pressure')

        outgoing_stream_l0_temperature_id = self.pubsubclient.create_stream_definition(
            name='L0_Temperature', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_temperature_id,
            self.ctd_L0_all_dprocdef_id,
            binding='temperature')

        return outgoing_stream_l0_conductivity_id, outgoing_stream_l0_pressure_id, outgoing_stream_l0_temperature_id

    def _create_l0_output_data_products(self,
                                        outgoing_stream_l0_conductivity_id,
                                        outgoing_stream_l0_pressure_id,
                                        outgoing_stream_l0_temperature_id):

        out_data_prods = []

        ctd_l0_conductivity_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L0_Conductivity',
            description='transform output conductivity',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l0_conductivity_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_conductivity_output_dp_obj,
            outgoing_stream_l0_conductivity_id)
        out_data_prods.append(self.ctd_l0_conductivity_output_dp_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l0_conductivity_output_dp_id)

        ctd_l0_pressure_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L0_Pressure',
            description='transform output pressure',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l0_pressure_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_pressure_output_dp_obj, outgoing_stream_l0_pressure_id)
        out_data_prods.append(self.ctd_l0_pressure_output_dp_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l0_pressure_output_dp_id)

        ctd_l0_temperature_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L0_Temperature',
            description='transform output temperature',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l0_temperature_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_temperature_output_dp_obj,
            outgoing_stream_l0_temperature_id)
        out_data_prods.append(self.ctd_l0_temperature_output_dp_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l0_temperature_output_dp_id)

        return out_data_prods

    def _create_l1_out_data_products(self):

        ctd_l1_conductivity_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L1_Conductivity',
            description='transform output L1 conductivity',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l1_conductivity_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l1_conductivity_output_dp_obj,
            self.outgoing_stream_l1_conductivity_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l1_conductivity_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(
            self.ctd_l1_conductivity_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_conductivity', stream_ids[0])
        self.loggerpids.append(pid)

        ctd_l1_pressure_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L1_Pressure',
            description='transform output L1 pressure',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l1_pressure_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l1_pressure_output_dp_obj, self.outgoing_stream_l1_pressure_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l1_pressure_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(
            self.ctd_l1_pressure_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_pressure', stream_ids[0])
        self.loggerpids.append(pid)

        ctd_l1_temperature_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L1_Temperature',
            description='transform output L1 temperature',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l1_temperature_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l1_temperature_output_dp_obj,
            self.outgoing_stream_l1_temperature_id)
        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l1_temperature_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(
            self.ctd_l1_temperature_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_temperature', stream_ids[0])
        self.loggerpids.append(pid)

    def _create_l2_out_data_products(self):

        #-------------------------------
        # L2 Salinity - Density: Output Data Products
        #-------------------------------

        if not self.pdict_id: self._create_param_dicts()
        outgoing_stream_l2_salinity_id = self.pubsubclient.create_stream_definition(
            name='L2_salinity', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l2_salinity_id,
            self.ctd_L2_salinity_dprocdef_id,
            binding='salinity')

        outgoing_stream_l2_density_id = self.pubsubclient.create_stream_definition(
            name='L2_Density', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l2_density_id,
            self.ctd_L2_density_dprocdef_id,
            binding='density')

        ctd_l2_salinity_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L2_Salinity',
            description='transform output L2 salinity',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l2_salinity_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l2_salinity_output_dp_obj, outgoing_stream_l2_salinity_id)

        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l2_salinity_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(
            self.ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l2_salinity', stream_ids[0])
        self.loggerpids.append(pid)

        ctd_l2_density_output_dp_obj = IonObject(
            RT.DataProduct,
            name='L2_Density',
            description='transform output pressure',
            temporal_domain=self.tdom,
            spatial_domain=self.sdom)

        self.ctd_l2_density_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l2_density_output_dp_obj, outgoing_stream_l2_density_id)

        self.dataproductclient.activate_data_product_persistence(
            data_product_id=self.ctd_l2_density_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(
            self.ctd_l2_density_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l2_density', stream_ids[0])
        self.loggerpids.append(pid)

    @unittest.skip('This test errors on coi-nightly, may be OBE.')
    def test_createTransformsThenActivateInstrument(self):

        self.loggerpids = []

        #-------------------------------------------------------------------------------------
        # Create InstrumentModel
        #-------------------------------------------------------------------------------------

        instModel_id = self._create_instrument_model()

        #-------------------------------------------------------------------------------------
        # Create InstrumentAgent
        #-------------------------------------------------------------------------------------

        instAgent_id = self._create_instrument_agent(instModel_id)

        #-------------------------------------------------------------------------------------
        # Create InstrumentDevice
        #-------------------------------------------------------------------------------------

        instDevice_id = self._create_instrument_device(instModel_id)

        #-------------------------------------------------------------------------------------
        # Create Instrument Agent Instance
        #-------------------------------------------------------------------------------------

        instAgentInstance_id = self._create_instrument_agent_instance(
            instAgent_id, instDevice_id)

        #-------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #-------------------------------------------------------------------------------------

        self._create_param_dicts()
        ctd_stream_def_id = self.pubsubclient.create_stream_definition(
            name='SBE37_CDM', parameter_dictionary_id=self.pdict_id)

        #-------------------------------------------------------------------------------------
        # Create two data products
        #-------------------------------------------------------------------------------------

        ctd_parsed_data_product = self._create_input_data_products(
            ctd_stream_def_id, instDevice_id)

        #-------------------------------------------------------------------------------------
        # Create data process definitions
        #-------------------------------------------------------------------------------------
        self._create_data_process_definitions()

        #-------------------------------------------------------------------------------------
        # L0 Conductivity - Temperature - Pressure: Output Data Products
        #-------------------------------------------------------------------------------------

        outgoing_stream_l0_conductivity_id, \
        outgoing_stream_l0_pressure_id, \
        outgoing_stream_l0_temperature_id = self._create_stream_definitions()

        self.out_prod_ids = self._create_l0_output_data_products(
            outgoing_stream_l0_conductivity_id, outgoing_stream_l0_pressure_id,
            outgoing_stream_l0_temperature_id)

        self.outgoing_stream_l1_conductivity_id = self.pubsubclient.create_stream_definition(
            name='L1_conductivity', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            self.outgoing_stream_l1_conductivity_id,
            self.ctd_L1_conductivity_dprocdef_id,
            binding='conductivity')

        self.outgoing_stream_l1_pressure_id = self.pubsubclient.create_stream_definition(
            name='L1_Pressure', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            self.outgoing_stream_l1_pressure_id,
            self.ctd_L1_pressure_dprocdef_id,
            binding='pressure')

        self.outgoing_stream_l1_temperature_id = self.pubsubclient.create_stream_definition(
            name='L1_Temperature', parameter_dictionary_id=self.pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            self.outgoing_stream_l1_temperature_id,
            self.ctd_L1_temperature_dprocdef_id,
            binding='temperature')

        self._create_l1_out_data_products()

        self._create_l2_out_data_products()

        #-------------------------------------------------------------------------------------
        # L0 Conductivity - Temperature - Pressure: Create the data process
        #-------------------------------------------------------------------------------------
        ctd_l0_all_data_process_id = self.dataprocessclient.create_data_process(
            data_process_definition_id=self.ctd_L0_all_dprocdef_id,
            in_data_product_ids=[ctd_parsed_data_product],
            out_data_product_ids=self.out_prod_ids)
        self.dataprocessclient.activate_data_process(
            ctd_l0_all_data_process_id)

        data_process = self.rrclient.read(ctd_l0_all_data_process_id)

        process_ids, _ = self.rrclient.find_objects(
            subject=ctd_l0_all_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        extended_process = self.dataprocessclient.get_data_process_extension(
            ctd_l0_all_data_process_id)
        self.assertEquals(extended_process.computed.operational_state.status,
                          ComputedValueAvailability.NOTAVAILABLE)
        self.assertEquals(data_process.message_controllable, True)

        #-------------------------------------------------------------------------------------
        # L1 Conductivity: Create the data process
        #-------------------------------------------------------------------------------------
        l1_conductivity_data_process_id = self.dataprocessclient.create_data_process(
            self.ctd_L1_conductivity_dprocdef_id,
            [self.ctd_l0_conductivity_output_dp_id],
            [self.ctd_l1_conductivity_output_dp_id])
        self.dataprocessclient.activate_data_process(
            l1_conductivity_data_process_id)

        data_process = self.rrclient.read(l1_conductivity_data_process_id)
        process_ids, _ = self.rrclient.find_objects(
            subject=l1_conductivity_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        #-------------------------------------------------------------------------------------
        # L1 Pressure: Create the data process
        #-------------------------------------------------------------------------------------
        l1_pressure_data_process_id = self.dataprocessclient.create_data_process(
            self.ctd_L1_pressure_dprocdef_id,
            [self.ctd_l0_pressure_output_dp_id],
            [self.ctd_l1_pressure_output_dp_id])
        self.dataprocessclient.activate_data_process(
            l1_pressure_data_process_id)

        data_process = self.rrclient.read(l1_pressure_data_process_id)
        process_ids, _ = self.rrclient.find_objects(
            subject=l1_pressure_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        #-------------------------------------------------------------------------------------
        # L1 Temperature: Create the data process
        #-------------------------------------------------------------------------------------
        l1_temperature_all_data_process_id = self.dataprocessclient.create_data_process(
            self.ctd_L1_temperature_dprocdef_id,
            [self.ctd_l0_temperature_output_dp_id],
            [self.ctd_l1_temperature_output_dp_id])
        self.dataprocessclient.activate_data_process(
            l1_temperature_all_data_process_id)

        data_process = self.rrclient.read(l1_temperature_all_data_process_id)
        process_ids, _ = self.rrclient.find_objects(
            subject=l1_temperature_all_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        #-------------------------------------------------------------------------------------
        # L2 Salinity: Create the data process
        #-------------------------------------------------------------------------------------
        l2_salinity_all_data_process_id = self.dataprocessclient.create_data_process(
            self.ctd_L2_salinity_dprocdef_id, [ctd_parsed_data_product],
            [self.ctd_l2_salinity_output_dp_id])
        self.dataprocessclient.activate_data_process(
            l2_salinity_all_data_process_id)

        data_process = self.rrclient.read(l2_salinity_all_data_process_id)
        process_ids, _ = self.rrclient.find_objects(
            subject=l2_salinity_all_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        #-------------------------------------------------------------------------------------
        # L2 Density: Create the data process
        #-------------------------------------------------------------------------------------
        l2_density_all_data_process_id = self.dataprocessclient.create_data_process(
            self.ctd_L2_density_dprocdef_id, [ctd_parsed_data_product],
            [self.ctd_l2_density_output_dp_id])
        self.dataprocessclient.activate_data_process(
            l2_density_all_data_process_id)

        data_process = self.rrclient.read(l2_density_all_data_process_id)
        process_ids, _ = self.rrclient.find_objects(
            subject=l2_density_all_data_process_id,
            predicate=PRED.hasProcess,
            id_only=True)
        self.addCleanup(self.processdispatchclient.cancel_process,
                        process_ids[0])

        #-------------------------------------------------------------------------------------
        # Launch InstrumentAgentInstance, connect to the resource agent client
        #-------------------------------------------------------------------------------------
        self.imsclient.start_instrument_agent_instance(
            instrument_agent_instance_id=instAgentInstance_id)
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)

        # Wait for instrument agent to spawn
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id, ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (15),
            "The instrument agent instance did not spawn in 15 seconds")

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        #-------------------------------------------------------------------------------------
        # Streaming
        #-------------------------------------------------------------------------------------

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        reply = self._ia_client.execute_agent(cmd)
        self.assertTrue(reply.status == 0)

        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        self.assertTrue(reply.status == 0)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))
        self.assertTrue(state, 'DRIVER_STATE_COMMAND')

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        self.assertTrue(reply.status == 0)

        #todo ResourceAgentClient no longer has method set_param
        #        # Make sure the sampling rate and transmission are sane.
        #        params = {
        #            SBE37Parameter.NAVG : 1,
        #            SBE37Parameter.INTERVAL : 5,
        #            SBE37Parameter.TXREALTIME : True
        #        }
        #        self._ia_client.set_param(params)

        #todo There is no ResourceAgentEvent attribute for go_streaming... so what should be the command for it?
        cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)
        retval = self._ia_client.execute_resource(cmd)

        # This gevent sleep is there to test the autosample time, which will show something different from default
        # only if the instrument runs for over a minute
        gevent.sleep(90)

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id)

        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        autosample_string = extended_instrument.computed.uptime.value
        autosampling_time = int(autosample_string.split()[4])

        self.assertTrue(autosampling_time > 0)

        cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)
        retval = self._ia_client.execute_resource(cmd)

        #todo There is no ResourceAgentEvent attribute for go_observatory... so what should be the command for it?
        #        log.debug("test_activateInstrumentStream: calling go_observatory")
        #        cmd = AgentCommand(command='go_observatory')
        #        reply = self._ia_client.execute_agent(cmd)
        #        cmd = AgentCommand(command='get_current_state')
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = retval.result
        #        log.debug("test_activateInstrumentStream: return from go_observatory state  %s", str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        self.assertTrue(reply.status == 0)

        #-------------------------------------------------------------------------------------------------
        # Cleanup processes
        #-------------------------------------------------------------------------------------------------
        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        #--------------------------------------------------------------------------------
        # Cleanup data products
        #--------------------------------------------------------------------------------
        dp_ids, _ = self.rrclient.find_resources(restype=RT.DataProduct,
                                                 id_only=True)

        for dp_id in dp_ids:
            self.dataproductclient.delete_data_product(dp_id)
示例#29
0
class HighAvailabilityAgentSensorPolicyTest(IonIntegrationTestCase):

    def _start_webserver(self, port=None):
        """ Start a webserver for testing code download
        Note: tries really hard to get a port, and if it can't use
        the suggested port, randomly picks another, and returns it
        """
        def log_message(self, format, *args):
            #swallow log massages
            pass

        class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
            server_version = 'test_server'
            extensions_map = ''
            def do_GET(self):
                self.send_response(200)
                self.send_header("Content-type", "text/plain")
                self.send_header("Content-Length", len(self.server.response))
                self.end_headers()
                self.wfile.write(self.server.response)


        class Server(HTTPServer):

            response = ''

            def serve_forever(self):
                self._serving = 1
                while self._serving:
                    self.handle_request()

            def stop(self):
                self._serving = 0

        if port is None:
            port = 8008
        Handler = TestRequestHandler
        Handler.log_message = log_message

        for i in range(0, 100):
            try:
                self._webserver = Server(("localhost", port), Handler)
            except socket.error:
                print "port %s is in use, picking another" % port
                port = randint(8000, 10000)
                continue
            else:
                break

        self._web_glet = gevent.spawn(self._webserver.serve_forever)
        return port

    def _stop_webserver(self):
        if self._webserver is not None:
            self._webserver.stop()
            gevent.sleep(2)
            self._web_glet.kill()

    @needs_epu
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')
        self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher")

        self.process_definition_id = uuid4().hex
        self.process_definition =  ProcessDefinition(name='test', executable={
                'module': 'ion.agents.cei.test.test_haagent',
                'class': 'TestProcess'
        })

        self.pd_cli.create_process_definition(self.process_definition,
                self.process_definition_id)


        http_port = 8919
        http_port = self._start_webserver(port=http_port)

        self.resource_id = "haagent_4567"
        self._haa_name = "high_availability_agent"
        self._haa_config = {
            'highavailability': {
                'policy': {
                    'interval': 1,
                    'name': 'sensor',
                    'parameters': {
                        'metric': 'app_attributes:ml',
                        'sample_period': 600,
                        'sample_function': 'Average',
                        'cooldown_period': 20,
                        'scale_up_threshold': 2.0,
                        'scale_up_n_processes': 1,
                        'scale_down_threshold': 1.0,
                        'scale_down_n_processes': 1,
                        'maximum_processes': 5,
                        'minimum_processes': 1,
                    }
                },
                'aggregator': {
                    'type': 'trafficsentinel',
                    'host': 'localhost',
                    'port': http_port,
                    'protocol': 'http',
                    'username': '******',
                    'password': '******'
                },
                'process_definition_id': self.process_definition_id,
                "process_dispatchers": [
                    'process_dispatcher'
                ]
            },
            'agent': {'resource_id': self.resource_id},
        }


        self._base_procs = self.pd_cli.list_processes()

        self.waiter = ProcessStateWaiter()
        self.waiter.start()

        self.container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
        self._haa_pid = self.container_client.spawn_process(name=self._haa_name,
            module="ion.agents.cei.high_availability_agent",
            cls="HighAvailabilityAgent", config=self._haa_config)

        # Start a resource agent client to talk with the instrument agent.
        self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess())
        log.info('Got haa client %s.', str(self._haa_pyon_client))

        self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)


    def tearDown(self):
        self.waiter.stop()
        self.container.terminate_process(self._haa_pid)
        self._stop_webserver()
        self._stop_container()

    def get_running_procs(self):
        """returns a normalized set of running procs (removes the ones that
        were there at setup time)
        """

        base = self._base_procs
        base_pids = [proc.process_id for proc in base]
        current = self.pd_cli.list_processes()
        current_pids = [proc.process_id for proc in current]
        print "filtering base procs %s from %s" % (base_pids, current_pids)
        normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING]
        return normal

    def _get_managed_upids(self):
        result = self.haa_client.dump().result
        upids = result['managed_upids']
        return upids

    def _set_response(self, response):
        self._webserver.response = response

    def test_sensor_policy(self):
        status = self.haa_client.status().result
        # Ensure HA hasn't already failed
        assert status in ('PENDING', 'READY', 'STEADY')

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 1)

        for i in range(0, 5):
            status = self.haa_client.status().result
            try:
                self.assertEqual(status, 'STEADY')
                break
            except:
                gevent.sleep(1)
        else:
            assert False, "HA Service took too long to get to state STEADY"

        # Set ml for each proc such that we scale up
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "%s,ml=5\n"
        self._set_response(response)

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 2)

        # Set ml so we stay steady
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "%s,ml=1.5\n"
        self._set_response(response)

        self.assertEqual(len(self.get_running_procs()), 2)

        for i in range(0, 5):
            status = self.haa_client.status().result
            try:
                self.assertEqual(status, 'STEADY')
                break
            except:
                gevent.sleep(1)
        else:
            assert False, "HA Service took too long to get to state STEADY"

        # Set ml so we scale down
        upids = self._get_managed_upids()
        response = ""
        for upid in upids:
            response += "%s,ml=0.5\n"
        self._set_response(response)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)

        self.assertEqual(len(self.get_running_procs()), 1)

        for i in range(0, 5):
            status = self.haa_client.status().result
            try:
                self.assertEqual(status, 'STEADY')
                break
            except:
                gevent.sleep(1)
        else:
            assert False, "HA Service took too long to get to state STEADY"
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli           = DataProductManagementServiceClient()
        self.rrclient           = ResourceRegistryServiceClient()
        self.damsclient         = DataAcquisitionManagementServiceClient()
        self.pubsubcli          = PubsubManagementServiceClient()
        self.ingestclient       = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc               = UserNotificationServiceClient()
        self.data_retriever     = DataRetrieverServiceClient()
        self.identcli           = IdentityManagementServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    @attr('EXT')
    @attr('PREP')
    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary._id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------




        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 10.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -10.0
        dp_obj.ooi_product_name = "PRODNAME"

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        # Assert that the data product has an associated stream at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertNotEquals(len(stream_ids), 0)

        # Assert that the data product has an associated stream def at this stage
        stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStreamDefinition, RT.StreamDefinition, True)
        self.assertNotEquals(len(stream_ids), 0)

        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp')

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)

        group_names = self.dpsc_cli.get_data_product_group_list()
        self.assertIn("PRODNAME", group_names)


        #----------------------------------------------------------------------------------------
        # Create users then notifications to this data product for each user
        #----------------------------------------------------------------------------------------

        # user_1
        user_1 = UserInfo()
        user_1.name = 'user_1'
        user_1.contact.email = '*****@*****.**'

        # user_2
        user_2 = UserInfo()
        user_2.name = 'user_2'
        user_2.contact.email = '*****@*****.**'
        #user1 is a complete user
        self.subject = "/DC=org/DC=cilogon/C=US/O=ProtectNetwork/CN=Roger Unwin A254"
        actor_identity_obj = IonObject("ActorIdentity", {"name": self.subject})
        actor_id = self.identcli.create_actor_identity(actor_identity_obj)

        user_credentials_obj = IonObject("UserCredentials", {"name": self.subject})
        self.identcli.register_user_credentials(actor_id, user_credentials_obj)
        user_id_1 = self.identcli.create_user_info(actor_id, user_1)
        user_id_2, _ = self.rrclient.create(user_2)

        delivery_config1a = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        delivery_config1b = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        notification_request_1 = NotificationRequest(   name = "notification_1",
            origin=dp_id,
            origin_type="type_1",
            event_type=OT.ResourceLifecycleEvent,
            disabled_by_system = False,
            delivery_configurations=[delivery_config1a, delivery_config1b])

        delivery_config2a = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        delivery_config2b = IonObject(OT.DeliveryConfiguration, email='*****@*****.**', mode=DeliveryModeEnum.EMAIL, frequency=NotificationFrequencyEnum.BATCH)
        notification_request_2 = NotificationRequest(   name = "notification_2",
            origin=dp_id,
            origin_type="type_2",
            disabled_by_system = False,
            event_type=OT.DetectionEvent,
            delivery_configurations=[delivery_config2a, delivery_config2b])

        notification_request_1_id = self.unsc.create_notification(notification=notification_request_1, user_id=user_id_1)
        notification_request_2_id = self.unsc.create_notification(notification=notification_request_2, user_id=user_id_2)
        self.unsc.delete_notification(notification_request_1_id)



        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 20.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 20.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -20.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)


        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 0.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        #validate that there is one active and one retired user notification for this data product
        self.assertEqual(1, len(extended_product.computed.active_user_subscriptions.value))
        self.assertEqual(1, len(extended_product.computed.past_user_subscriptions.value))

        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)


        def ion_object_encoder(obj):
            return obj.__dict__


        #test prepare for create
        data_product_data = self.dpsc_cli.prepare_data_product_support()

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, "")
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)
        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 0)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 0)
        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 0)

        #test prepare for update
        data_product_data = self.dpsc_cli.prepare_data_product_support(dp_id)

        #print simplejson.dumps(data_product_data, default=ion_object_encoder, indent= 2)

        self.assertEqual(data_product_data._id, dp_id)
        self.assertEqual(data_product_data.type_, OT.DataProductPrepareSupport)
        self.assertEqual(len(data_product_data.associations['StreamDefinition'].resources), 2)

        self.assertEqual(len(data_product_data.associations['Dataset'].resources), 1)

        self.assertEqual(len(data_product_data.associations['StreamDefinition'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['StreamDefinition'].associated_resources[0].s, dp_id)

        self.assertEqual(len(data_product_data.associations['Dataset'].associated_resources), 1)
        self.assertEqual(data_product_data.associations['Dataset'].associated_resources[0].s, dp_id)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)

        # Assert that there are no associated streams leftover after deleting the data product
        stream_ids, assoc_ids = self.rrclient.find_objects(dp_id, PRED.hasStream, RT.Stream, True)
        self.assertEquals(len(stream_ids), 0)
        self.assertEquals(len(assoc_ids), 0)

        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)


        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)


    def test_derived_data_product(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='ctd parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(self.pubsubcli.delete_stream_definition, ctd_stream_def_id)


        dp = DataProduct(name='Instrument DP')
        dp_id = self.dpsc_cli.create_data_product(dp, stream_definition_id=ctd_stream_def_id)
        self.addCleanup(self.dpsc_cli.force_delete_data_product, dp_id)

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        self.addCleanup(self.dpsc_cli.suspend_data_product_persistence, dp_id)


        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]
        
        # Make the derived data product
        simple_stream_def_id = self.pubsubcli.create_stream_definition(name='TEMPWAT stream def', parameter_dictionary_id=pdict_id, available_fields=['time','temp'])
        tempwat_dp = DataProduct(name='TEMPWAT', category=DataProductTypeEnum.DERIVED)
        tempwat_dp_id = self.dpsc_cli.create_data_product(tempwat_dp, stream_definition_id=simple_stream_def_id, parent_data_product_id=dp_id)
        self.addCleanup(self.dpsc_cli.delete_data_product, tempwat_dp_id)
        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)
        rdt['pressure'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        tempwat_dataset_ids, _ = self.rrclient.find_objects(tempwat_dp_id, PRED.hasDataset, id_only=True)
        tempwat_dataset_id = tempwat_dataset_ids[0]
        granule = self.data_retriever.retrieve(tempwat_dataset_id, delivery_format=simple_stream_def_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_equal(rdt['time'], np.arange(20))
        self.assertEquals(set(rdt.fields), set(['time','temp']))


    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp')

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # Subscribe to persist events
        #------------------------------------------------------------------------------------------------
        queue = gevent.queue.Queue()

        def info_event_received(message, headers):
            queue.put(message)

        es = EventSubscriber(event_type=OT.InformationContentStatusEvent, callback=info_event_received, origin=dp_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)


        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        dataset_id = dataset_ids[0]


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        stream_id = stream_ids[0]
        route = self.pubsubcli.read_stream_route(stream_id=stream_id)

        rdt = RecordDictionaryTool(stream_definition_id=ctd_stream_def_id)
        rdt['time'] = np.arange(20)
        rdt['temp'] = np.arange(20)

        publisher = StandaloneStreamPublisher(stream_id,route)
        
        dataset_modified = Event()
        def cb(*args, **kwargs):
            dataset_modified.set()
        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id, auto_delete=True)
        es.start()
        self.addCleanup(es.stop)

        publisher.publish(rdt.to_granule())

        self.assertTrue(dataset_modified.wait(30))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)


        dataset_modified.clear()

        rdt['time'] = np.arange(20,40)

        publisher.publish(rdt.to_granule())
        self.assertFalse(dataset_modified.wait(2))

        self.dpsc_cli.activate_data_product_persistence(dp_id)
        dataset_modified.clear()

        publisher.publish(rdt.to_granule())
        self.assertTrue(dataset_modified.wait(30))

        granule = self.data_retriever.retrieve(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        np.testing.assert_array_almost_equal(rdt['time'], np.arange(40))


        dataset_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasDataset, id_only=True)
        self.assertEquals(len(dataset_ids), 1)

        self.dpsc_cli.suspend_data_product_persistence(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)


        info_event_counter = 0
        runtime = 0
        starttime = time.time()
        caught_events = []

        #check that the four InfoStatusEvents were received
        while info_event_counter < 4 and runtime < 60 :
            a = queue.get(timeout=60)
            caught_events.append(a)
            info_event_counter += 1
            runtime = time.time() - starttime

        self.assertEquals(info_event_counter, 4)
示例#31
0
class HighAvailabilityAgentTest(IonIntegrationTestCase):

    @needs_epu
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')
        #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)
        self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher")

        self.process_definition_id = uuid4().hex
        self.process_definition_name = 'test'
        self.process_definition =  ProcessDefinition(name=self.process_definition_name, executable={
                'module': 'ion.agents.cei.test.test_haagent',
                'class': 'TestProcess'
        })
        self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id)

        self.resource_id = "haagent_1234"
        self._haa_name = "high_availability_agent"
        self._haa_dashi_name = "dashi_haa_" + uuid4().hex
        self._haa_dashi_uri = get_dashi_uri_from_cfg()
        self._haa_dashi_exchange = "%s.hatests" % bootstrap.get_sys_name()
        self._haa_config = {
            'highavailability': {
                'policy': {
                    'interval': 1,
                    'name': 'npreserving',
                    'parameters': {
                        'preserve_n': 0
                    }
                },
                'process_definition_id': self.process_definition_id,
                'dashi_messaging' : True,
                'dashi_exchange' : self._haa_dashi_exchange,
                'dashi_name': self._haa_dashi_name
            },
            'agent': {'resource_id': self.resource_id},
        }

        self._base_services, _ = self.container.resource_registry.find_resources(
                restype="Service", name=self.process_definition_name)

        self._base_procs = self.pd_cli.list_processes()

        self.waiter = ProcessStateWaiter()
        self.waiter.start()

        self.container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
        self._haa_pid = self.container_client.spawn_process(name=self._haa_name,
            module="ion.agents.cei.high_availability_agent",
            cls="HighAvailabilityAgent", config=self._haa_config)

        # Start a resource agent client to talk with the instrument agent.
        self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess())
        log.info('Got haa client %s.', str(self._haa_pyon_client))

        self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)


    def tearDown(self):
        self.waiter.stop()
        try:
            self.container.terminate_process(self._haa_pid)
        except BadRequest:
            log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)")
        self._stop_container()

    def get_running_procs(self):
        """returns a normalized set of running procs (removes the ones that
        were there at setup time)
        """

        base = self._base_procs
        base_pids = [proc.process_id for proc in base]
        current = self.pd_cli.list_processes()
        current_pids = [proc.process_id for proc in current]
        print "filtering base procs %s from %s" % (base_pids, current_pids)
        normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING]
        return normal

    def get_new_services(self):

        base = self._base_services
        base_names = [i.name for i in base]
        services_registered, _ = self.container.resource_registry.find_resources(
                restype="Service", name=self.process_definition_name)
        current_names = [i.name for i in services_registered]
        normal = [cserv for cserv in services_registered if cserv.name not in base_names]
        return normal

    def await_ha_state(self, want_state, timeout=10):

        for i in range(0, timeout):
            status = self.haa_client.status().result
            if status == want_state:
                return
            gevent.sleep(1)

        raise Exception("Took more than %s to get to ha state %s" % (timeout, want_state))


    def test_features(self):
        status = self.haa_client.status().result
        # Ensure HA hasn't already failed
        assert status in ('PENDING', 'READY', 'STEADY')


        # verifies L4-CI-CEI-RQ44
        # Note: the HA agent is started in the setUp() method, with config
        # pointing to the test "service". The initial config is set to preserve
        # 0 service processes. With this reconfigure step below, we change that
        # to launch 1.

        new_policy = {'preserve_n': 1}
        self.haa_client.reconfigure_policy(new_policy)

        result = self.haa_client.dump().result
        self.assertEqual(result['policy'], new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 1)

        for i in range(0, 5):
            status = self.haa_client.status().result
            try:
                self.assertEqual(status, 'STEADY')
                break
            except:
                gevent.sleep(1)
        else:
            assert False, "HA Service took too long to get to state STEADY"

        # verifies L4-CI-CEI-RQ122 and L4-CI-CEI-RQ124

        new_policy = {'preserve_n': 2}
        self.haa_client.reconfigure_policy(new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)

        self.assertEqual(len(self.get_running_procs()), 2)

        new_policy = {'preserve_n': 1}
        self.haa_client.reconfigure_policy(new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)

        self.assertEqual(len(self.get_running_procs()), 1)

        new_policy = {'preserve_n': 0}
        self.haa_client.reconfigure_policy(new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)
        self.assertEqual(len(self.get_running_procs()), 0)

    def test_associations(self):

        # Ensure that once the HA Agent starts, there is a Service object in
        # the registry
        result = self.haa_client.dump().result
        service_id = result.get('service_id')
        self.assertIsNotNone(service_id)
        service = self.container.resource_registry.read(service_id)
        self.assertIsNotNone(service)

        # Ensure that once a process is started, there is an association between
        # it and the service
        new_policy = {'preserve_n': 1}
        self.haa_client.reconfigure_policy(new_policy)
        self.waiter.await_state_event(state=ProcessStateEnum.RUNNING)
        self.assertEqual(len(self.get_running_procs()), 1)

        self.await_ha_state('STEADY')

        proc = self.get_running_procs()[0]

        processes_associated, _ = self.container.resource_registry.find_resources(
                restype="Process", name=proc.process_id)
        self.assertEqual(len(processes_associated), 1)

        has_processes = self.container.resource_registry.find_associations(
            service, "hasProcess")
        self.assertEqual(len(has_processes), 1)

        self.await_ha_state('STEADY')

        # Ensure that once we terminate that process, there are no associations
        new_policy = {'preserve_n': 0}
        self.haa_client.reconfigure_policy(new_policy)

        self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)
        self.assertEqual(len(self.get_running_procs()), 0)

        processes_associated, _ = self.container.resource_registry.find_resources(
                restype="Process", name=proc.process_id)
        self.assertEqual(len(processes_associated), 0)

        has_processes = self.container.resource_registry.find_associations(
            service, "hasProcess")
        self.assertEqual(len(has_processes), 0)

        # Ensure that once we terminate that HA Agent, the Service object is
        # cleaned up
        self.container.terminate_process(self._haa_pid)

        with self.assertRaises(NotFound):
            service = self.container.resource_registry.read(service_id)

    def test_dashi(self):

        import dashi

        dashi_conn = dashi.DashiConnection("something", self._haa_dashi_uri,
            self._haa_dashi_exchange)

        status = dashi_conn.call(self._haa_dashi_name, "status")
        assert status in ('PENDING', 'READY', 'STEADY')

        new_policy = {'preserve_n': 0}
        dashi_conn.call(self._haa_dashi_name, "reconfigure_policy",
            new_policy=new_policy)
class EventTriggeredTransformIntTest(IonIntegrationTestCase):
    def setUp(self):
        super(EventTriggeredTransformIntTest, self).setUp()

        self._start_container()
        self.container.start_rel_from_url("res/deploy/r2deploy.yml")

        self.queue_cleanup = []
        self.exchange_cleanup = []

        self.pubsub = PubsubManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()

        self.exchange_name = "test_queue"
        self.exchange_point = "test_exchange"

        self.dataset_management = DatasetManagementServiceClient()

    def tearDown(self):
        for queue in self.queue_cleanup:
            xn = self.container.ex_manager.create_xn_queue(queue)
            xn.delete()
        for exchange in self.exchange_cleanup:
            xp = self.container.ex_manager.create_xp(exchange)
            xp.delete()

    @attr("LOCOINT")
    @unittest.skipIf(os.getenv("CEI_LAUNCH_TEST", False), "Skip test while in CEI LAUNCH mode")
    def test_event_triggered_transform_A(self):
        """
        Test that packets are processed by the event triggered transform
        """

        # ---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        # ---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name="EventTriggeredTransform_A", description="For testing EventTriggeredTransform_A"
        )
        process_definition.executable["module"] = "ion.processes.data.transforms.event_triggered_transform"
        process_definition.executable["class"] = "EventTriggeredTransform_A"
        event_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition
        )

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name("ctd_parsed_param_dict", id_only=True)

        stream_def_id = self.pubsub.create_stream_definition("cond_stream_def", parameter_dictionary_id=pdict_id)
        cond_stream_id, _ = self.pubsub.create_stream(
            "test_conductivity", exchange_point="science_data", stream_definition_id=stream_def_id
        )

        config.process.publish_streams.conductivity = cond_stream_id
        config.process.event_type = "ResourceLifecycleEvent"

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=event_transform_proc_def_id, configuration=config
        )

        # ---------------------------------------------------------------------------------------------
        # Publish an event to wake up the event triggered transform
        # ---------------------------------------------------------------------------------------------

        event_publisher = EventPublisher("ResourceLifecycleEvent")
        event_publisher.publish_event(origin="fake_origin")

        # ---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the conductivity, temperature and pressure granules from
        # the ctd transform
        # ---------------------------------------------------------------------------------------------
        ar_cond = gevent.event.AsyncResult()

        def subscriber1(m, r, s):
            ar_cond.set(m)

        sub_event_transform = StandaloneStreamSubscriber("sub_event_transform", subscriber1)
        self.addCleanup(sub_event_transform.stop)

        sub_event_transform_id = self.pubsub.create_subscription(
            "subscription_cond", stream_ids=[cond_stream_id], exchange_name="sub_event_transform"
        )

        self.pubsub.activate_subscription(sub_event_transform_id)

        self.queue_cleanup.append(sub_event_transform.xn.queue)

        sub_event_transform.start()

        # ------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        # ------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = "stream_id.stream"
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind("stream_id.stream", xp)

        pub = StandaloneStreamPublisher("stream_id", stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        # ------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        # ------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result_cond = ar_cond.get(timeout=10)
        self.assertTrue(isinstance(result_cond, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result_cond)
        self.assertTrue(rdt.__contains__("conductivity"))

        self.check_cond_algorithm_execution(publish_granule, result_cond)

    def check_cond_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform["conductivity"]
        input_data = input_rdt_to_transform["conductivity"]

        self.assertTrue(((input_data / 100000.0) - 0.5).all() == output_data.all())

    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        for field in rdt:
            rdt[field] = numpy.array([random.uniform(0.0, 75.0) for i in xrange(length)])

        g = rdt.to_granule()

        return g

    def test_event_triggered_transform_B(self):
        """
        Test that packets are processed by the event triggered transform
        """

        # ---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        # ---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name="EventTriggeredTransform_B", description="For testing EventTriggeredTransform_B"
        )
        process_definition.executable["module"] = "ion.processes.data.transforms.event_triggered_transform"
        process_definition.executable["class"] = "EventTriggeredTransform_B"
        event_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition
        )

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name("ctd_parsed_param_dict", id_only=True)

        stream_def_id = self.pubsub.create_stream_definition("stream_def", parameter_dictionary_id=pdict_id)
        stream_id, _ = self.pubsub.create_stream(
            "test_stream", exchange_point="science_data", stream_definition_id=stream_def_id
        )

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point
        config.process.publish_streams.output = stream_id
        config.process.event_type = "ResourceLifecycleEvent"
        config.process.stream_id = stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=event_transform_proc_def_id, configuration=config
        )

        # ---------------------------------------------------------------------------------------------
        # Publish an event to wake up the event triggered transform
        # ---------------------------------------------------------------------------------------------

        event_publisher = EventPublisher("ResourceLifecycleEvent")
        event_publisher.publish_event(origin="fake_origin")
示例#33
0
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)

        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

#        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)



    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        self.get_datastore(dataset_ids[0])


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)
class TestCTDTransformsIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()
        #container = Container()
        #print 'starting container'
        #container.start()
        #print 'started container'

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        print 'started services'

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubclient =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.datasetclient =  DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id= logger_procdef_id, configuration=configuration)

        return pid

    def test_createTransformsThenActivateInstrument(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel, name='SBE37IMModel', description="SBE37IMModel", model="SBE37IMModel" )
        try:
            instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentModel: %s" %ex)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)

        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent, name='agent007', description="SBE37IMAgent", driver_module="ion.agents.instrument.instrument_agent", driver_class="InstrumentAgent" )
        try:
            instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentAgent: %s" %ex)
        log.debug( 'new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice, name='SBE37IMDevice', description="SBE37IMDevice", serial_number="12345" )
        try:
            instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
            self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentDevice: %s" %ex)

        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s    (SA Req: L4-CI-SA-RQ-241) ", instDevice_id)

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance', description="SBE37IMAgentInstance",
                                          driver_module='mi.instrument.seabird.sbe37smb.ooicore.driver', driver_class='SBE37Driver',
                                          comms_device_address='sbe37-simulator.oceanobservatories.org',   comms_device_port=4001,  port_agent_work_dir='/tmp/', port_agent_delimeter=['<<','>>'] )
        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj, instAgent_id, instDevice_id)

        # create a stream definition for the data from the ctd simulator
        ctd_stream_def = SBE37_CDM_stream_definition()
        ctd_stream_def_id = self.pubsubclient.create_stream_definition(container=ctd_stream_def)

        log.debug( 'new Stream Definition id = %s', instDevice_id)

        log.debug( 'Creating new CDM data product with a stream definition')

        craft = CoverageCraft
        sdom, tdom = craft.create_domains()
        sdom = sdom.dump()
        tdom = tdom.dump()
        parameter_dictionary = craft.create_parameters()
        parameter_dictionary = parameter_dictionary.dump()

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        ctd_parsed_data_product = self.dataproductclient.create_data_product(dp_obj, ctd_stream_def_id, parameter_dictionary)
        log.debug( 'new dp_id = %s', ctd_parsed_data_product)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=ctd_parsed_data_product)

        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_parsed_data_product)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product, PRED.hasStream, None, True)
        log.debug( 'Data product streams1 = %s', stream_ids)

        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)
 
        print 'test_createTransformsThenActivateInstrument: Data product streams1 = ', stream_ids

        #-------------------------------
        # Create CTD Raw as the second data product
        #-------------------------------
        log.debug( 'Creating new RAW data product with a stream definition')
        raw_stream_def = SBE37_RAW_stream_definition()
        raw_stream_def_id = self.pubsubclient.create_stream_definition(container=raw_stream_def)

        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        ctd_raw_data_product = self.dataproductclient.create_data_product(dp_obj, raw_stream_def_id, parameter_dictionary)
        log.debug( 'new dp_id = %s', str(ctd_raw_data_product))

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=ctd_raw_data_product)

        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_raw_data_product)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_raw_data_product, PRED.hasStream, None, True)
        log.debug( 'Data product streams2 = %s', str(stream_ids))

        #todo: attaching the taxonomy to the stream is a TEMPORARY measure
        # Create taxonomies for both parsed and attach to the stream
        RawTax = TaxyTool()
        RawTax.add_taxonomy_set('raw_fixed','Fixed length bytes in an array of records')
        RawTax.add_taxonomy_set('raw_blob','Unlimited length bytes in an array')

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_raw_data_product, PRED.hasStream, None, True)
        print 'Data product streams2 = ', stream_ids



        #-------------------------------
        # L0 Conductivity - Temperature - Pressure: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition ctd_L0_all")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L0_all',
                            description='transform ctd package into three separate L0 streams',
                            module='ion.processes.data.transforms.ctd.ctd_L0_all',
                            class_name='ctd_L0_all',
                            process_source='some_source_reference')
        try:
            ctd_L0_all_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new ctd_L0_all data process definition: %s" %ex)


        #-------------------------------
        # L1 Conductivity: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition CTDL1ConductivityTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L1_conductivity',
                            description='create the L1 conductivity data product',
                            module='ion.processes.data.transforms.ctd.ctd_L1_conductivity',
                            class_name='CTDL1ConductivityTransform',
                            process_source='CTDL1ConductivityTransform source code here...')
        try:
            ctd_L1_conductivity_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new CTDL1ConductivityTransform data process definition: %s" %ex)

        #-------------------------------
        # L1 Pressure: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition CTDL1PressureTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L1_pressure',
                            description='create the L1 pressure data product',
                            module='ion.processes.data.transforms.ctd.ctd_L1_pressure',
                            class_name='CTDL1PressureTransform',
                            process_source='CTDL1PressureTransform source code here...')
        try:
            ctd_L1_pressure_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new CTDL1PressureTransform data process definition: %s" %ex)


        #-------------------------------
        # L1 Temperature: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition CTDL1TemperatureTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L1_temperature',
                            description='create the L1 temperature data product',
                            module='ion.processes.data.transforms.ctd.ctd_L1_temperature',
                            class_name='CTDL1TemperatureTransform',
                            process_source='CTDL1TemperatureTransform source code here...')
        try:
            ctd_L1_temperature_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new CTDL1TemperatureTransform data process definition: %s" %ex)


        #-------------------------------
        # L2 Salinity: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition SalinityTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L2_salinity',
                            description='create the L1 temperature data product',
                            module='ion.processes.data.transforms.ctd.ctd_L2_salinity',
                            class_name='SalinityTransform',
                            process_source='SalinityTransform source code here...')
        try:
            ctd_L2_salinity_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new SalinityTransform data process definition: %s" %ex)


        #-------------------------------
        # L2 Density: Data Process Definition
        #-------------------------------
        log.debug("TestIntDataProcessMgmtServiceMultiOut: create data process definition DensityTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
                            name='ctd_L2_density',
                            description='create the L1 temperature data product',
                            module='ion.processes.data.transforms.ctd.ctd_L2_density',
                            class_name='DensityTransform',
                            process_source='DensityTransform source code here...')
        try:
            ctd_L2_density_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new DensityTransform data process definition: %s" %ex)





        #-------------------------------
        # L0 Conductivity - Temperature - Pressure: Output Data Products
        #-------------------------------

        outgoing_stream_l0_conductivity = L0_conductivity_stream_definition()
        outgoing_stream_l0_conductivity_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l0_conductivity, name='L0_Conductivity')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l0_conductivity_id, ctd_L0_all_dprocdef_id )

        outgoing_stream_l0_pressure = L0_pressure_stream_definition()
        outgoing_stream_l0_pressure_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l0_pressure, name='L0_Pressure')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l0_pressure_id, ctd_L0_all_dprocdef_id )

        outgoing_stream_l0_temperature = L0_temperature_stream_definition()
        outgoing_stream_l0_temperature_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l0_temperature, name='L0_Temperature')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l0_temperature_id, ctd_L0_all_dprocdef_id )


        self.output_products={}
        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 conductivity")

        ctd_l0_conductivity_output_dp_obj = IonObject(  RT.DataProduct,
                                                        name='L0_Conductivity',
                                                        description='transform output conductivity',
                                                        temporal_domain = tdom,
                                                        spatial_domain = sdom)

        ctd_l0_conductivity_output_dp_id = self.dataproductclient.create_data_product(ctd_l0_conductivity_output_dp_obj,
                                                                                    outgoing_stream_l0_conductivity_id,
                                                                                    parameter_dictionary)
        self.output_products['conductivity'] = ctd_l0_conductivity_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_conductivity_output_dp_id)


        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 pressure")

        ctd_l0_pressure_output_dp_obj = IonObject(  RT.DataProduct,
                                                    name='L0_Pressure',
                                                    description='transform output pressure',
                                                    temporal_domain = tdom,
                                                    spatial_domain = sdom)

        ctd_l0_pressure_output_dp_id = self.dataproductclient.create_data_product(ctd_l0_pressure_output_dp_obj,
                                                                                    outgoing_stream_l0_pressure_id,
                                                                                    parameter_dictionary)
        self.output_products['pressure'] = ctd_l0_pressure_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_pressure_output_dp_id)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 temperature")

        ctd_l0_temperature_output_dp_obj = IonObject(   RT.DataProduct,
                                                        name='L0_Temperature',
                                                        description='transform output temperature',
                                                        temporal_domain = tdom,
                                                        spatial_domain = sdom)

        ctd_l0_temperature_output_dp_id = self.dataproductclient.create_data_product(ctd_l0_temperature_output_dp_obj,
                                                                                    outgoing_stream_l0_temperature_id,
                                                                                    parameter_dictionary)
        self.output_products['temperature'] = ctd_l0_temperature_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_temperature_output_dp_id)


        #-------------------------------
        # L1 Conductivity - Temperature - Pressure: Output Data Products
        #-------------------------------

        outgoing_stream_l1_conductivity = L1_conductivity_stream_definition()
        outgoing_stream_l1_conductivity_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l1_conductivity, name='L1_conductivity')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l1_conductivity_id, ctd_L1_conductivity_dprocdef_id )

        outgoing_stream_l1_pressure = L1_pressure_stream_definition()
        outgoing_stream_l1_pressure_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l1_pressure, name='L1_Pressure')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l1_pressure_id, ctd_L1_pressure_dprocdef_id )

        outgoing_stream_l1_temperature = L1_temperature_stream_definition()
        outgoing_stream_l1_temperature_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l1_temperature, name='L1_Temperature')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l1_temperature_id, ctd_L1_temperature_dprocdef_id )

        log.debug("test_createTransformsThenActivateInstrument: create output data product L1 conductivity")

        ctd_l1_conductivity_output_dp_obj = IonObject(  RT.DataProduct,
                                                        name='L1_Conductivity',
                                                        description='transform output L1 conductivity',
                                                        temporal_domain = tdom,
                                                        spatial_domain = sdom)

        ctd_l1_conductivity_output_dp_id = self.dataproductclient.create_data_product(ctd_l1_conductivity_output_dp_obj,
                                                                                        outgoing_stream_l1_conductivity_id,
                                                                                        parameter_dictionary)
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l1_conductivity_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(ctd_l1_conductivity_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_conductivity', stream_ids[0] )
        self.loggerpids.append(pid)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L1 pressure")

        ctd_l1_pressure_output_dp_obj = IonObject(  RT.DataProduct,
                                                    name='L1_Pressure',
                                                    description='transform output L1 pressure',
                                                    temporal_domain = tdom,
                                                    spatial_domain = sdom)

        ctd_l1_pressure_output_dp_id = self.dataproductclient.create_data_product(ctd_l1_pressure_output_dp_obj,
                                                                                outgoing_stream_l1_pressure_id,
                                                                                parameter_dictionary
        )
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l1_pressure_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(ctd_l1_pressure_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_pressure', stream_ids[0] )
        self.loggerpids.append(pid)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L1 temperature")

        ctd_l1_temperature_output_dp_obj = IonObject(   RT.DataProduct,
                                                        name='L1_Temperature',
                                                        description='transform output L1 temperature',
                                                        temporal_domain = tdom,
                                                        spatial_domain = sdom)

        ctd_l1_temperature_output_dp_id = self.dataproductclient.create_data_product(ctd_l1_temperature_output_dp_obj,
                                                                                    outgoing_stream_l1_temperature_id,
                                                                                    parameter_dictionary)
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l1_temperature_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(ctd_l1_temperature_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l1_temperature', stream_ids[0] )
        self.loggerpids.append(pid)

        #-------------------------------
        # L2 Salinity - Density: Output Data Products
        #-------------------------------

        outgoing_stream_l2_salinity = L2_practical_salinity_stream_definition()
        outgoing_stream_l2_salinity_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l2_salinity, name='L2_salinity')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l2_salinity_id, ctd_L2_salinity_dprocdef_id )

        outgoing_stream_l2_density = L2_density_stream_definition()
        outgoing_stream_l2_density_id = self.pubsubclient.create_stream_definition(container=outgoing_stream_l2_density, name='L2_Density')
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(outgoing_stream_l2_density_id, ctd_L2_density_dprocdef_id )

        log.debug("test_createTransformsThenActivateInstrument: create output data product L2 Salinity")

        ctd_l2_salinity_output_dp_obj = IonObject(RT.DataProduct,
            name='L2_Salinity',
            description='transform output L2 salinity',
            temporal_domain = tdom,
            spatial_domain = sdom)

        ctd_l2_salinity_output_dp_id = self.dataproductclient.create_data_product(ctd_l2_salinity_output_dp_obj,
                                                                                outgoing_stream_l2_salinity_id,
                                                                                parameter_dictionary)

        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l2_salinity_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l2_salinity', stream_ids[0] )
        self.loggerpids.append(pid)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L2 Density")

        ctd_l2_density_output_dp_obj = IonObject(   RT.DataProduct,
                                                    name='L2_Density',
                                                    description='transform output pressure',
                                                    temporal_domain = tdom,
                                                    spatial_domain = sdom)

        ctd_l2_density_output_dp_id = self.dataproductclient.create_data_product(ctd_l2_density_output_dp_obj,
                                                                                outgoing_stream_l2_density_id,
                                                                                parameter_dictionary)

        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l2_density_output_dp_id)
        # Retrieve the id of the OUTPUT stream from the out Data Product and add to granule logger
        stream_ids, _ = self.rrclient.find_objects(ctd_l2_density_output_dp_id, PRED.hasStream, None, True)
        pid = self.create_logger('ctd_l2_density', stream_ids[0] )
        self.loggerpids.append(pid)


        #-------------------------------
        # L0 Conductivity - Temperature - Pressure: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L0 all data_process start")
        try:
            ctd_l0_all_data_process_id = self.dataprocessclient.create_data_process(ctd_L0_all_dprocdef_id, [ctd_parsed_data_product], self.output_products)
            self.dataprocessclient.activate_data_process(ctd_l0_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L0 all data_process return")


        #-------------------------------
        # L1 Conductivity: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L1 Conductivity data_process start")
        try:
            l1_conductivity_data_process_id = self.dataprocessclient.create_data_process(ctd_L1_conductivity_dprocdef_id, [ctd_l0_conductivity_output_dp_id], {'output':ctd_l1_conductivity_output_dp_id})
            self.dataprocessclient.activate_data_process(l1_conductivity_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L1 Conductivity data_process return")


        #-------------------------------
        # L1 Pressure: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L1_Pressure data_process start")
        try:
            l1_pressure_data_process_id = self.dataprocessclient.create_data_process(ctd_L1_pressure_dprocdef_id, [ctd_l0_pressure_output_dp_id], {'output':ctd_l1_pressure_output_dp_id})
            self.dataprocessclient.activate_data_process(l1_pressure_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L1_Pressure data_process return")



        #-------------------------------
        # L1 Temperature: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L1_Pressure data_process start")
        try:
            l1_temperature_all_data_process_id = self.dataprocessclient.create_data_process(ctd_L1_temperature_dprocdef_id, [ctd_l0_temperature_output_dp_id], {'output':ctd_l1_temperature_output_dp_id})
            self.dataprocessclient.activate_data_process(l1_temperature_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L1_Pressure data_process return")



        #-------------------------------
        # L2 Salinity: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L2_salinity data_process start")
        try:
            l2_salinity_all_data_process_id = self.dataprocessclient.create_data_process(ctd_L2_salinity_dprocdef_id, [ctd_parsed_data_product], {'output':ctd_l2_salinity_output_dp_id})
            self.dataprocessclient.activate_data_process(l2_salinity_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L2_salinity data_process return")

        #-------------------------------
        # L2 Density: Create the data process
        #-------------------------------
        log.debug("test_createTransformsThenActivateInstrument: create L2_Density data_process start")
        try:
            l2_density_all_data_process_id = self.dataprocessclient.create_data_process(ctd_L2_density_dprocdef_id, [ctd_parsed_data_product], {'output':ctd_l2_density_output_dp_id})
            self.dataprocessclient.activate_data_process(l2_density_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" %ex)

        log.debug("test_createTransformsThenActivateInstrument: create L2_Density data_process return")


        #-------------------------------
        # Launch InstrumentAgentInstance, connect to the resource agent client
        #-------------------------------
        self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        inst_agent_instance_obj= self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        print 'test_createTransformsThenActivateInstrument: Instrument agent instance obj: = ', inst_agent_instance_obj

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient('iaclient', name=inst_agent_instance_obj.agent_process_id,  process=FakeProcess())
        print 'activate_instrument: got ia client %s', self._ia_client
        log.debug(" test_createTransformsThenActivateInstrument:: got ia client %s", str(self._ia_client))


        #-------------------------------
        # Streaming
        #-------------------------------


        cmd = AgentCommand(command='initialize')
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: initialize %s", str(retval))


        log.debug("test_activateInstrumentStream: Sending go_active command (L4-CI-SA-RQ-334)")
        cmd = AgentCommand(command='go_active')
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: return value from go_active %s", str(reply))
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: current state after sending go_active command %s    (L4-CI-SA-RQ-334)", str(state))

        cmd = AgentCommand(command='run')
        reply = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from run state: %s", str(state))

        # Make sure the sampling rate and transmission are sane.
        params = {
            SBE37Parameter.NAVG : 1,
            SBE37Parameter.INTERVAL : 5,
            SBE37Parameter.TXREALTIME : True
        }
        self._ia_client.set_param(params)


        log.debug("test_activateInstrumentStream: calling go_streaming ")
        cmd = AgentCommand(command='go_streaming')
        reply = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from go_streaming state: %s", str(state))


        time.sleep(7)

        log.debug("test_activateInstrumentStream: calling go_observatory")
        cmd = AgentCommand(command='go_observatory')
        reply = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command='get_current_state')
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from go_observatory state  %s", str(state))



        log.debug("test_activateInstrumentStream: calling reset ")
        cmd = AgentCommand(command='reset')
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: return from reset state:%s", str(reply.result))
        time.sleep(2)


        #-------------------------------
        # Deactivate InstrumentAgentInstance
        #-------------------------------
        self.imsclient.stop_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)
        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)
示例#35
0
class TestDataProductManagementServiceCoverage(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        log.debug("Start rel from url")
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.DPMS = DataProductManagementServiceClient()
        self.RR = ResourceRegistryServiceClient()
        self.RR2 = EnhancedResourceRegistryClient(self.RR)
        self.DAMS = DataAcquisitionManagementServiceClient()
        self.PSMS = PubsubManagementServiceClient()
        self.ingestclient = IngestionManagementServiceClient()
        self.PD = ProcessDispatcherServiceClient()
        self.DSMS = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------
        log.debug("get datastore")
        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(
            datastore_name)
        self.stream_def_id = self.PSMS.create_stream_definition(
            name='SBE37_CDM')

        self.process_definitions = {}
        ingestion_worker_definition = ProcessDefinition(
            name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':
            'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class': 'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.PD.create_process_definition(
            process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        self.addCleanup(self.cleaning_up)

    @staticmethod
    def clean_subscriptions():
        ingestion_management = IngestionManagementServiceClient()
        pubsub = PubsubManagementServiceClient()
        rr = ResourceRegistryServiceClient()
        ingestion_config_ids = ingestion_management.list_ingestion_configurations(
            id_only=True)
        for ic in ingestion_config_ids:
            subscription_ids, assocs = rr.find_objects(
                subject=ic, predicate=PRED.hasSubscription, id_only=True)
            for subscription_id, assoc in zip(subscription_ids, assocs):
                rr.delete_association(assoc)
                try:
                    pubsub.deactivate_subscription(subscription_id)
                except:
                    log.exception("Unable to decativate subscription: %s",
                                  subscription_id)
                pubsub.delete_subscription(subscription_id)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.PD.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        TestDataProductManagementServiceCoverage.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def test_CRUD_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary = self.DSMS.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict')
        ctd_stream_def_id = self.PSMS.create_stream_definition(
            name='Simulated CTD data',
            parameter_dictionary_id=parameter_dictionary._id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()

        dp_obj = IonObject(RT.DataProduct,
                           name='DP1',
                           description='some new dp',
                           temporal_domain=tdom.dump(),
                           spatial_domain=sdom.dump())

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 10.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = -10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 10.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = -10.0
        dp_obj.ooi_product_name = "PRODNAME"

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        log.debug("create dataset")
        dataset_id = self.RR2.create(any_old(RT.Dataset))
        log.debug("dataset_id = %s", dataset_id)

        log.debug("create data product 1")
        dp_id = self.DPMS.create_data_product(
            data_product=dp_obj,
            stream_definition_id=ctd_stream_def_id,
            dataset_id=dataset_id)
        log.debug("dp_id = %s", dp_id)
        # Assert that the data product has an associated stream at this stage
        stream_ids, _ = self.RR.find_objects(dp_id, PRED.hasStream, RT.Stream,
                                             True)
        self.assertNotEquals(len(stream_ids), 0)

        log.debug("read data product")
        dp_obj = self.DPMS.read_data_product(dp_id)

        log.debug("find data products")
        self.assertIn(dp_id, [r._id for r in self.DPMS.find_data_products()])

        log.debug("update data product")
        dp_obj.name = "brand new"
        self.DPMS.update_data_product(dp_obj)
        self.assertEqual("brand new", self.DPMS.read_data_product(dp_id).name)

        log.debug("activate/suspend persistence")
        self.assertFalse(self.DPMS.is_persisted(dp_id))
        self.DPMS.activate_data_product_persistence(dp_id)
        self.addCleanup(self.DPMS.suspend_data_product_persistence,
                        dp_id)  # delete op will do this for us
        self.assertTrue(self.DPMS.is_persisted(dp_id))

        log.debug("check error on checking persistence of nonexistent stream")
        #with self.assertRaises(NotFound):
        if True:
            self.DPMS.is_persisted(self.RR2.create(any_old(RT.DataProduct)))

        #log.debug("get extension")
        #self.DPMS.get_data_product_extension(dp_id)

        log.debug("getting last update")
        lastupdate = self.DPMS.get_last_update(dp_id)
        self.assertEqual(
            {}, lastupdate)  # should be no updates to a new data product

        log.debug("prepare resource support")
        support = self.DPMS.prepare_data_product_support(dp_id)
        self.assertIsNotNone(support)

        log.debug("delete data product")
        self.DPMS.delete_data_product(dp_id)

        log.debug("try to suspend again")
        self.DPMS.suspend_data_product_persistence(dp_id)

        # try basic create
        log.debug("create without a dataset")
        dp_id2 = self.DPMS.create_data_product_(any_old(RT.DataProduct))
        self.assertIsNotNone(dp_id2)
        log.debug("activate product %s", dp_id2)
        self.assertRaises(BadRequest,
                          self.DPMS.activate_data_product_persistence, dp_id2)

        #self.assertNotEqual(0, len(self.RR2.find_dataset_ids_of_data_product_using_has_dataset(dp_id2)))

        log.debug("force delete data product")
        self.DPMS.force_delete_data_product(dp_id2)

        log.debug("test complete")
class TransformManagementServiceIntTest(IonIntegrationTestCase):

    def setUp(self):
        # set up the container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2dm.yml')

        self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
        self.tms_cli = TransformManagementServiceClient(node=self.container.node)
        self.rr_cli = ResourceRegistryServiceClient(node=self.container.node)
        self.procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.input_stream_id = self.pubsub_cli.create_stream(name='input_stream',original=True)

        self.input_subscription_id = self.pubsub_cli.create_subscription(query=StreamQuery(stream_ids=[self.input_stream_id]),exchange_name='transform_input',name='input_subscription')

        self.output_stream_id = self.pubsub_cli.create_stream(name='output_stream',original=True)

        self.process_definition = ProcessDefinition(name='basic_transform_definition')
        self.process_definition.executable = {'module': 'ion.processes.data.transforms.transform_example',
                                              'class':'TransformExample'}
        self.process_definition_id = self.procd_cli.create_process_definition(process_definition=self.process_definition)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_create_transform(self):
        configuration = {'program_args':{'arg1':'value'}}

        transform_id = self.tms_cli.create_transform(
              name='test_transform',
              in_subscription_id=self.input_subscription_id,
              out_streams={'output':self.output_stream_id},
              process_definition_id=self.process_definition_id)

        # test transform creation in rr
        transform = self.rr_cli.read(transform_id)
        self.assertEquals(transform.name,'test_transform')


        # test associations
        predicates = [PRED.hasSubscription, PRED.hasOutStream, PRED.hasProcessDefinition]
        assocs = []
        for p in predicates:
            assocs += self.rr_cli.find_associations(transform_id,p,id_only=True)
        self.assertEquals(len(assocs),3)

        # test process creation
        transform = self.tms_cli.read_transform(transform_id)
        pid = transform.process_id
        proc = self.container.proc_manager.procs.get(pid)
        self.assertIsInstance(proc,TransformExample)

        # clean up
        self.tms_cli.delete_transform(transform_id)

    def test_create_transform_no_procdef(self):
        with self.assertRaises(NotFound):
            self.tms_cli.create_transform(name='test',in_subscription_id=self.input_subscription_id)

    def test_create_transform_bad_procdef(self):
        with self.assertRaises(NotFound):
            self.tms_cli.create_transform(name='test',
                in_subscription_id=self.input_subscription_id,
                process_definition_id='bad')
    
    def test_create_transform_no_config(self):
        transform_id = self.tms_cli.create_transform(
            name='test_transform',
            in_subscription_id=self.input_subscription_id,
            out_streams={'output':self.output_stream_id},
            process_definition_id=self.process_definition_id,
        )
        self.tms_cli.delete_transform(transform_id)

    def test_create_transform_name_failure(self):
        transform_id = self.tms_cli.create_transform(
            name='test_transform',
            in_subscription_id=self.input_subscription_id,
            out_streams={'output':self.output_stream_id},
            process_definition_id=self.process_definition_id,
        )
        with self.assertRaises(BadRequest):
            transform_id = self.tms_cli.create_transform(
                name='test_transform',
                in_subscription_id=self.input_subscription_id,
                out_streams={'output':self.output_stream_id},
                process_definition_id=self.process_definition_id,
            )
        self.tms_cli.delete_transform(transform_id)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_create_no_output(self):
        transform_id = self.tms_cli.create_transform(
            name='test_transform',
            in_subscription_id=self.input_subscription_id,
            process_definition_id=self.process_definition_id,
        )

        predicates = [PRED.hasSubscription, PRED.hasOutStream, PRED.hasProcessDefinition]
        assocs = []
        for p in predicates:
            assocs += self.rr_cli.find_associations(transform_id,p,id_only=True)
        self.assertEquals(len(assocs),2)

        # test process creation
        transform = self.tms_cli.read_transform(transform_id)
        pid = transform.process_id
        proc = self.container.proc_manager.procs.get(pid)
        self.assertIsInstance(proc,TransformExample)

        self.tms_cli.delete_transform(transform_id)
    def test_read_transform_exists(self):
        trans_obj = IonObject(RT.Transform,name='trans_obj')
        trans_id, _ = self.rr_cli.create(trans_obj)

        res = self.tms_cli.read_transform(trans_id)
        actual = self.rr_cli.read(trans_id)

        self.assertEquals(res._id,actual._id)


    def test_read_transform_nonexist(self):
        with self.assertRaises(NotFound) as e:
            self.tms_cli.read_transform('123')

    def test_activate_transform(self):

        transform_id = self.tms_cli.create_transform(
            name='test_transform',
            in_subscription_id=self.input_subscription_id,
            out_streams={'output':self.output_stream_id},
            process_definition_id=self.process_definition_id
        )

        self.tms_cli.activate_transform(transform_id)

        # pubsub check if activated?
        self.tms_cli.delete_transform(transform_id)

    def test_activate_transform_nonexist(self):
        with self.assertRaises(NotFound):
            self.tms_cli.activate_transform('1234')

    def test_delete_transform(self):

        transform_id = self.tms_cli.create_transform(
            name='test_transform',
            in_subscription_id=self.input_subscription_id,
            process_definition_id=self.process_definition_id
        )
        self.tms_cli.delete_transform(transform_id)

        # assertions
        with self.assertRaises(NotFound):
            self.rr_cli.read(transform_id)


    def test_delete_transform_nonexist(self):
        with self.assertRaises(NotFound):
            self.tms_cli.delete_transform('123')

    def test_execute_transform(self):
        # set up
        process_definition = ProcessDefinition(name='procdef_execute')
        process_definition.executable['module'] = 'ion.processes.data.transforms.transform_example'
        process_definition.executable['class'] = 'ReverseTransform'
        data = [1,2,3]

        process_definition_id, _ = self.rr_cli.create(process_definition)

        retval = self.tms_cli.execute_transform(process_definition_id,data)

        self.assertEquals(retval,[3,2,1])


    def test_integrated_transform(self):
        '''
        This example script runs a chained three way transform:
            B
        A <
            C
        Where A is the even_odd transform (generates a stream of even and odd numbers from input)
        and B and C are the basic transforms that receive even and odd input
        '''
        cc = self.container
        assertions = self.assertTrue

        pubsub_cli = PubsubManagementServiceClient(node=cc.node)
        rr_cli = ResourceRegistryServiceClient(node=cc.node)
        tms_cli = TransformManagementServiceClient(node=cc.node)
        #-------------------------------
        # Process Definition
        #-------------------------------
        # Create the process definition for the basic transform
        process_definition = IonObject(RT.ProcessDefinition, name='basic_transform_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class':'TransformExample'
        }
        basic_transform_definition_id, _ = rr_cli.create(process_definition)

        # Create The process definition for the TransformEvenOdd
        process_definition = IonObject(RT.ProcessDefinition, name='evenodd_transform_definition')
        process_definition.executable = {
            'module': 'ion.processes.data.transforms.transform_example',
            'class':'TransformEvenOdd'
        }
        evenodd_transform_definition_id, _ = rr_cli.create(process_definition)

        #-------------------------------
        # Streams
        #-------------------------------
        streams = [pubsub_cli.create_stream() for i in xrange(5)]

        #-------------------------------
        # Subscriptions
        #-------------------------------

        query = StreamQuery(stream_ids=[streams[0]])
        input_subscription_id = pubsub_cli.create_subscription(query=query, exchange_name='input_queue')

        query = StreamQuery(stream_ids = [streams[1]]) # even output
        even_subscription_id = pubsub_cli.create_subscription(query=query, exchange_name='even_queue')

        query = StreamQuery(stream_ids = [streams[2]]) # odd output
        odd_subscription_id = pubsub_cli.create_subscription(query=query, exchange_name='odd_queue')


        #-------------------------------
        # Launch the EvenOdd Transform
        #-------------------------------

        evenodd_id = tms_cli.create_transform(name='even_odd',
            in_subscription_id=input_subscription_id,
            out_streams={'even':streams[1], 'odd':streams[2]},
            process_definition_id=evenodd_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(evenodd_id)


        #-------------------------------
        # Launch the Even Processing Transform
        #-------------------------------

        even_transform_id = tms_cli.create_transform(name='even_transform',
            in_subscription_id = even_subscription_id,
            out_streams={'even_plus1':streams[3]},
            process_definition_id=basic_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(even_transform_id)

        #-------------------------------
        # Launch the Odd Processing Transform
        #-------------------------------

        odd_transform_id = tms_cli.create_transform(name='odd_transform',
            in_subscription_id = odd_subscription_id,
            out_streams={'odd_plus1':streams[4]},
            process_definition_id=basic_transform_definition_id,
            configuration={})
        tms_cli.activate_transform(odd_transform_id)

        #-------------------------------
        # Set up final subscribers
        #-------------------------------

        evenplus1_subscription_id = pubsub_cli.create_subscription(
            query=StreamQuery([streams[3]]),
            exchange_name='evenplus1_queue',
            name='EvenPlus1Subscription',
            description='EvenPlus1 SubscriptionDescription'
        )
        oddplus1_subscription_id = pubsub_cli.create_subscription(
            query=StreamQuery([streams[4]]),
            exchange_name='oddplus1_queue',
            name='OddPlus1Subscription',
            description='OddPlus1 SubscriptionDescription'
        )

        total_msg_count = 2

        msgs = gevent.queue.Queue()


        def even1_message_received(message, headers):
            input = int(message.get('num'))
            assertions( (input % 2) ) # Assert it is odd (transform adds 1)
            msgs.put(True)


        def odd1_message_received(message, headers):
            input = int(message.get('num'))
            assertions(not (input % 2)) # Assert it is even
            msgs.put(True)

        subscriber_registrar = StreamSubscriberRegistrar(process=cc, node=cc.node)
        even_subscriber = subscriber_registrar.create_subscriber(exchange_name='evenplus1_queue', callback=even1_message_received)
        odd_subscriber = subscriber_registrar.create_subscriber(exchange_name='oddplus1_queue', callback=odd1_message_received)

        # Start subscribers
        even_subscriber.start()
        odd_subscriber.start()

        # Activate subscriptions
        pubsub_cli.activate_subscription(evenplus1_subscription_id)
        pubsub_cli.activate_subscription(oddplus1_subscription_id)

        #-------------------------------
        # Set up fake stream producer
        #-------------------------------

        pid = cc.spawn_process(name='dummy_process_for_test',
            module='pyon.ion.process',
            cls='SimpleProcess',
            config={})
        dummy_process = cc.proc_manager.procs[pid]

        # Normally the user does not see or create the publisher, this is part of the containers business.
        # For the test we need to set it up explicitly
        publisher_registrar = StreamPublisherRegistrar(process=dummy_process, node=cc.node)
        stream_publisher = publisher_registrar.create_publisher(stream_id=streams[0])

        #-------------------------------
        # Start test
        #-------------------------------

        # Publish a stream
        for i in xrange(total_msg_count):
            stream_publisher.publish({'num':str(i)})

        time.sleep(0.5)

        for i in xrange(total_msg_count * 2):
            try:
                msgs.get()
            except Empty:
                assertions(False, "Failed to process all messages correctly.")

    """
示例#37
0
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete, index)
            IndexManagementService._es_call(es.index_delete, index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_notification(self,
                             user_name='',
                             instrument_id='',
                             product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(
            name='notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(
            name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(
            notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(
            notification=notification_request_2, user_id=user_id)
        log.debug(
            "test_activateInstrumentSample: create_user_notifications user_id %s",
            str(user_id))

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(
            self, expected_instrument_device_id='', extended_instrument=None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_instrument.computed.last_data_received_datetime,
            ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        self.assertIsInstance(
            extended_instrument.computed.power_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.communications_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.location_status_roll_up,
            ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )

        # Verify the computed attribute for user notification requests
        self.assertEqual(
            1,
            len(extended_instrument.computed.user_notification_requests.value))
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(expected_instrument_device_id, notification.origin)
        self.assertEqual("instrument", notification.origin_type)
        self.assertEqual('ResourceLifecycleEvent', notification.event_type)

    def _check_computed_attributes_of_extended_product(
            self, expected_data_product_id='', extended_data_product=None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s",
                  extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_data_product.computed.product_download_size_estimated,
            ComputedFloatValue)
        self.assertIsInstance(
            extended_data_product.computed.number_active_subscriptions,
            ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url,
                              ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size,
                              ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters,
                              ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events,
                              ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance,
                              ComputedDictValue)
        self.assertIsInstance(
            extended_data_product.computed.user_notification_requests,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.active_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.past_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.data_contents_updated,
            ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime,
                              ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertIn(
            'ok',
            extended_data_product.computed.last_granule.value['quality_flag'])
        self.assertEqual(
            2, len(extended_data_product.computed.data_datetime.value))

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(expected_data_product_id, notification.origin)
        self.assertEqual("data product", notification.origin_type)
        self.assertEqual('DetectionEvent', notification.event_type)

    @attr('LOCOINT')
    #@unittest.skip('refactoring')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 90}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        raw_config = StreamConfiguration(stream_name='raw',
                                         parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict')

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=DRV_URI_GOOD,
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)
        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config,
                                          alerts=[])

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id, ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()))

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" %
                      t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals):
                log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(
            self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt_parsed.pretty_print()))
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
示例#38
0
class TestGranulePublish(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.pubsubclient = PubsubManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    #overriding trigger function here to use new granule
    def test_granule_publish(self):
        log.debug("test_granule_publish ")
        self.loggerpids = []

        #retrieve the param dict from the repository
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        stream_definition_id = self.pubsubclient.create_stream_definition(
            'parsed stream', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        dp_obj = IonObject(RT.DataProduct,
                           name=str(uuid.uuid4()),
                           description='ctd stream test',
                           temporal_domain=tdom.dump(),
                           spatial_domain=sdom.dump())

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=stream_definition_id)

        # Retrieve the id of the output stream of the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('test_granule_publish: Data product streams1 = %s',
                  stream_ids)

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        #create the publisher from the stream route
        stream_route = self.pubsubclient.read_stream_route(stream_ids[0])
        publisher = StandaloneStreamPublisher(stream_ids[0], stream_route)

        # this is one sample from the ctd driver
        tomato = {
            "driver_timestamp":
            3555971105.1268806,
            "instrument_id":
            "ABC-123",
            "pkt_format_id":
            "JSON_Data",
            "pkt_version":
            1,
            "preferred_timestamp":
            "driver_timestamp",
            "quality_flag":
            "ok",
            "stream_name":
            "parsed",
            "values": [{
                "value": 22.9304,
                "value_id": "temp"
            }, {
                "value": 51.57381,
                "value_id": "conductivity"
            }, {
                "value": 915.551,
                "value_id": "pressure"
            }]
        }

        for value in tomato['values']:
            log.debug(
                "test_granule_publish: Looping tomato values  key: %s    val: %s ",
                str(value['value']), str(value['value_id']))

            if value['value_id'] in rdt:
                rdt[value['value_id']] = numpy.array([value['value']])
                log.debug(
                    "test_granule_publish: Added data item  %s  val: %s ",
                    str(value['value']), str(value['value_id']))

        g = rdt.to_granule()

        publisher.publish(g)

        gevent.sleep(3)

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        #--------------------------------------------------------------------------------
        # Cleanup data products
        #--------------------------------------------------------------------------------
        dp_ids, _ = self.rrclient.find_resources(restype=RT.DataProduct,
                                                 id_only=True)

        for dp_id in dp_ids:
            self.dataproductclient.delete_data_product(dp_id)
示例#39
0
class _Launcher(object):
    """
    Helper for launching platform agent processes. This helper was introduced
    to facilitate testing and diagnosing of launching issues in coi_pycc and
    other builds in buildbot.
    """
    def __init__(self, use_gate=True):
        """
        @param use_gate True (the default) to use ProcessStateGate pattern.
                        Otherwise, use the
                        "create_process/subscribe-to-event/schedule_process/_await_state_event"
                        pattern (as described in
                        https://confluence.oceanobservatories.org/display/CIDev/R2+Process+Dispatcher+Guide
                        as of Sept 14/12).
        """
        self._use_gate = use_gate
        self._pd_client = ProcessDispatcherServiceClient()

    def launch(self, platform_id, agent_config, timeout_spawn=30):
        """
        Launches a sub-platform agent.

        @param platform_id      Platform ID
        @param agent_config     Agent configuration
        @param timeout_spawn    Timeout in secs for the SPAWN event (by
                                default 30). If None or zero, no wait is performed.

        @retval process ID
        """
        log.debug("launch: platform_id=%r, timeout_spawn=%s", platform_id,
                  str(timeout_spawn))

        if self._use_gate:
            return self._do_launch_gate(platform_id, agent_config,
                                        timeout_spawn)

#        NOTE 2013-01-15: self._use_gate is always True. No need for the following.
#
#        try:
#            return self._do_launch(platform_id, agent_config, timeout_spawn)
#        finally:
#            self._event_queue = None
#            self._event_sub = None

    def cancel_process(self, pid):
        """
        Helper to terminate a process
        """
        self._pd_client.cancel_process(pid)

    def _do_launch_gate(self, platform_id, agent_config, timeout_spawn):
        """
        The method for when using the ProcessStateGate pattern, which is the
        one used by test_oms_launch2 to launch the root platform.
        """
        log.debug("_do_launch_gate: platform_id=%r, timeout_spawn=%s",
                  platform_id, timeout_spawn)

        pa_name = 'PlatformAgent_%s' % platform_id

        pdef = ProcessDefinition(name=pa_name)
        pdef.executable = {'module': PA_MOD, 'class': PA_CLS}
        pdef_id = self._pd_client.create_process_definition(
            process_definition=pdef)

        log.debug("using schedule_process directly %r", platform_id)

        pid = self._pd_client.schedule_process(process_definition_id=pdef_id,
                                               schedule=None,
                                               configuration=agent_config)

        if timeout_spawn:
            # ProcessStateGate used as indicated in its pydoc (9/21/12)
            gate = ProcessStateGate(self._pd_client.read_process, pid,
                                    ProcessStateEnum.RUNNING)
            err_msg = None
            try:
                if not gate. await (timeout_spawn):
                    err_msg = "The platform agent instance did not spawn in " \
                              "%s seconds.  gate.wait returned false. " % \
                          timeout_spawn
                    log.error(err_msg)

            except Exception as e:
                log.error(
                    "Exception while waiting for platform agent instance "
                    "(platform_id=%r) "
                    "to spawn in %s seconds: %s", platform_id, timeout_spawn,
                    str(e))  #,exc_Info=True)

            if err_msg:
                raise PlatformException(err_msg)

        log.debug(
            "_do_launch_gate: platform_id=%r: agent spawned, pid=%r "
            "(ProcessStateGate pattern used)", platform_id, pid)

        return pid
示例#40
0
    def run_external_transform(self):
        '''
        This example script illustrates how a transform can interact with the an outside process (very basic)
        it launches an external_transform example which uses the operating system command 'bc' to add 1 to the input

        Producer -> A -> 'FS.TEMP/transform_output'
        A is an external transform that spawns an OS process to increment the input by 1
        '''
        pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
        tms_cli = TransformManagementServiceClient(node=self.container.node)
        procd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        #-------------------------------
        # Process Definition
        #-------------------------------
        process_definition = ProcessDefinition(
            name='external_transform_definition')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.transform_example'
        process_definition.executable['class'] = 'ExternalTransform'
        process_definition_id = procd_cli.create_process_definition(
            process_definition=process_definition)

        #-------------------------------
        # Streams
        #-------------------------------

        input_stream_id = pubsub_cli.create_stream(name='input_stream',
                                                   original=True)

        #-------------------------------
        # Subscription
        #-------------------------------

        query = StreamQuery(stream_ids=[input_stream_id])
        input_subscription_id = pubsub_cli.create_subscription(
            query=query, exchange_name='input_queue')

        #-------------------------------
        # Launch Transform
        #-------------------------------

        transform_id = tms_cli.create_transform(
            name='external_transform',
            in_subscription_id=input_subscription_id,
            process_definition_id=process_definition_id,
            configuration={})
        tms_cli.activate_transform(transform_id)

        #-------------------------------
        # Launch Producer
        #-------------------------------

        id_p = self.container.spawn_process(
            'myproducer', 'ion.processes.data.transforms.transform_example',
            'TransformExampleProducer', {
                'process': {
                    'type': 'stream_process',
                    'publish_streams': {
                        'out_stream': input_stream_id
                    }
                },
                'stream_producer': {
                    'interval': 4000
                }
            })
        self.container.proc_manager.procs[id_p].start()
示例#41
0
class EventManagementIntTest(IonIntegrationTestCase):
    def setUp(self):
        super(EventManagementIntTest, self).setUp()

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.event_management = EventManagementServiceClient()
        self.rrc = ResourceRegistryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.pubsub = PubsubManagementServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.data_product_management = DataProductManagementServiceClient()

        self.queue_cleanup = []
        self.exchange_cleanup = []

    def tearDown(self):
        for queue in self.queue_cleanup:
            xn = self.container.ex_manager.create_xn_queue(queue)
            xn.delete()
        for exchange in self.exchange_cleanup:
            xp = self.container.ex_manager.create_xp(exchange)
            xp.delete()

    def test_create_read_update_delete_event_type(self):
        """
        Test that the CRUD method for event types work correctly
        """

        event_type = EventType(name="an event type")
        event_type.origin = 'instrument_1'

        # create
        event_type_id = self.event_management.create_event_type(event_type)
        self.assertIsNotNone(event_type_id)

        # read
        read_event_type = self.event_management.read_event_type(event_type_id)
        self.assertEquals(read_event_type.name, event_type.name)
        self.assertEquals(read_event_type.origin, event_type.origin)

        #update
        read_event_type.origin = 'instrument_2'
        read_event_type.producer = 'producer'

        self.event_management.update_event_type(read_event_type)
        updated_event_type = self.event_management.read_event_type(event_type_id)

        self.assertEquals(updated_event_type.origin, 'instrument_2')
        self.assertEquals(updated_event_type.producer, 'producer')

        # delete
        self.event_management.delete_event_type(event_type_id)

        with self.assertRaises(NotFound):
            self.event_management.read_event_type(event_type_id)

    def test_create_read_update_delete_event_process_definition(self):
        """
        Test that the CRUD methods for the event process definitions work correctly
        """

        # Create
        module = 'ion.processes.data.transforms.event_alert_transform'
        class_name = 'EventAlertTransform'
        procdef_id = self.event_management.create_event_process_definition(version='ver_1',
                                                                            module=module,
                                                                            class_name=class_name,
                                                                            uri='http://hare.com',
                                                                            arguments=['arg1', 'arg2'],
                                                                            event_types=['ExampleDetectableEvent', 'type_2'],
                                                                            sub_types=['sub_type_1'])
        # Read
        read_process_def = self.event_management.read_event_process_definition(procdef_id)
        self.assertEquals(read_process_def.executable['module'], module)
        self.assertEquals(read_process_def.executable['class'], class_name)

        # Update
        self.event_management.update_event_process_definition(event_process_definition_id=procdef_id,
                                                                class_name='StreamAlertTransform',
                                                                arguments=['arg3', 'arg4'],
                                                                event_types=['event_type_new'])

        updated_event_process_def = self.event_management.read_event_process_definition(procdef_id)
        self.assertEquals(updated_event_process_def.executable['class'], 'StreamAlertTransform')
        self.assertEquals(updated_event_process_def.arguments, ['arg3', 'arg4'])
        definition = updated_event_process_def.definition
        self.assertEquals(updated_event_process_def.definition.event_types, ['event_type_new'])

        # Delete
        self.event_management.delete_event_process_definition(procdef_id)

        with self.assertRaises(NotFound):
            self.event_management.read_event_process_definition(procdef_id)

    def test_event_in_stream_out_transform(self):
        """
        Test the event-in/stream-out transform
        """

        stream_id, _ = self.pubsub.create_stream('test_stream', exchange_point='science_data')
        self.exchange_cleanup.append('science_data')

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='EventToStreamTransform',
            description='For testing an event-in/stream-out transform')
        process_definition.executable['module']= 'ion.processes.data.transforms.event_in_stream_out_transform'
        process_definition.executable['class'] = 'EventToStreamTransform'
        proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = 'test_queue'
        config.process.exchange_point = 'science_data'
        config.process.publish_streams.output = stream_id
        config.process.event_type = 'ExampleDetectableEvent'
        config.process.variables = ['voltage', 'temperature' ]

        # Schedule the process
        pid = self.process_dispatcher.schedule_process(process_definition_id=proc_def_id, configuration=config)
        self.addCleanup(self.process_dispatcher.cancel_process,pid)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber for testing
        #---------------------------------------------------------------------------------------------

        ar_cond = gevent.event.AsyncResult()
        def subscriber_callback(m, r, s):
            ar_cond.set(m)
        sub = StandaloneStreamSubscriber('sub', subscriber_callback)
        self.addCleanup(sub.stop)
        sub_id = self.pubsub.create_subscription('subscription_cond',
            stream_ids=[stream_id],
            exchange_name='sub')
        self.pubsub.activate_subscription(sub_id)
        self.queue_cleanup.append(sub.xn.queue)
        sub.start()

        gevent.sleep(4)

        #---------------------------------------------------------------------------------------------
        # Publish an event. The transform has been configured to receive this event
        #---------------------------------------------------------------------------------------------

        event_publisher = EventPublisher("ExampleDetectableEvent")
        event_publisher.publish_event(origin = 'fake_origin', voltage = '5', temperature = '273')

        # Assert that the transform processed the event and published data on the output stream
        result_cond = ar_cond.get(timeout=10)
        self.assertTrue(result_cond)

    def test_create_read_delete_event_process(self):
        """
        Test that the CRUD methods for the event processes work correctly
        """

        #---------------------------------------------------------------------------------------------
        # Create a process definition
        #---------------------------------------------------------------------------------------------

        # Create
        module = 'ion.processes.data.transforms.event_alert_transform'
        class_name = 'EventAlertTransform'
        procdef_id = self.event_management.create_event_process_definition(version='ver_1',
            module=module,
            class_name=class_name,
            uri='http://hare.com',
            arguments=['arg1', 'arg2'],
            event_types=['ExampleDetectableEvent', 'type_2'],
            sub_types=['sub_type_1'])

        # Read
        read_process_def = self.event_management.read_event_process_definition(procdef_id)
        self.assertEquals(read_process_def.arguments, ['arg1', 'arg2'])

        #---------------------------------------------------------------------------------------------
        # Use the process definition to create a process
        #---------------------------------------------------------------------------------------------

        # Create a stream
        param_dict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)

        stream_def_id = self.pubsub.create_stream_definition('cond_stream_def', parameter_dictionary_id=param_dict_id)

        tdom, sdom = time_series_domain()
        tdom, sdom = tdom.dump(), sdom.dump()

        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        # Create a data product
        data_product_id = self.data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=stream_def_id)

        output_products = {}
        output_products['conductivity'] = data_product_id

        # Create an event process
        event_process_id = self.event_management.create_event_process(  process_definition_id=procdef_id,
                                                                        event_types=['ExampleDetectableEvent','DetectionEvent'],
                                                                        sub_types=['s1', 's2'],
                                                                        origins=['or_1', 'or_2'],
                                                                        origin_types=['or_t1', 'or_t2'],
                                                                        out_data_products = output_products)
        self.addCleanup(self.process_dispatcher.cancel_process, event_process_id)

        #---------------------------------------------------------------------------------------------
        # Read the event process object and make assertions
        #---------------------------------------------------------------------------------------------                                                                out_data_products={'conductivity': data_product_id})
        event_process_obj = self.event_management.read_event_process(event_process_id=event_process_id)

        # Get the stream associated with the data product for the sake of making assertions
        stream_ids, _ = self.rrc.find_objects(data_product_id, PRED.hasStream, id_only=True)
        stream_id = stream_ids[0]

        # Assertions!
        self.assertEquals(event_process_obj.detail.output_streams['conductivity'], stream_id)
        self.assertEquals(event_process_obj.detail.event_types, ['ExampleDetectableEvent', 'DetectionEvent'])
        self.assertEquals(event_process_obj.detail.sub_types, ['s1', 's2'])
        self.assertEquals(event_process_obj.detail.origins, ['or_1', 'or_2'])
        self.assertEquals(event_process_obj.detail.origin_types, ['or_t1', 'or_t2'])
class TestIMSDeployAsPrimaryDevice(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        self._start_container()

        # self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        self.container.start_rel_from_url("res/deploy/r2deploy.yml")

        print "started services"

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubclient = PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(node=self.container.node)
        self.omsclient = ObservatoryManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()

    def create_logger(self, name, stream_id=""):

        # logger process
        producer_definition = ProcessDefinition(name=name + "_logger")
        producer_definition.executable = {
            "module": "ion.processes.data.stream_granule_logger",
            "class": "StreamGranuleLogger",
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {"process": {"stream_id": stream_id}}
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id, configuration=configuration
        )

        return pid

    def cleanupprocs(self):
        stm = os.popen("ps -e | grep ion.agents.port.logger_process")
        procs = stm.read()
        if len(procs) > 0:
            procs = procs.split()
            if procs[0].isdigit():
                pid = int(procs[0])
                os.kill(pid, signal.SIGKILL)
        stm = os.popen("ps -e | grep ion.agents.instrument.zmq_driver_process")
        procs = stm.read()
        if len(procs) > 0:
            procs = procs.split()
            if procs[0].isdigit():
                pid = int(procs[0])
                os.kill(pid, signal.SIGKILL)

    #       stm = os.popen('rm /tmp/*.pid.txt')

    @unittest.skip("Deprecated by IngestionManagement refactor, timeout on start inst agent?")
    def test_deploy_activate_full(self):

        # ensure no processes or pids are left around by agents or Sims
        # self.cleanupprocs()

        self.loggerpids = []

        # -------------------------------
        # Create InstrumentModel
        # -------------------------------
        instModel_obj = IonObject(RT.InstrumentModel, name="SBE37IMModel", description="SBE37IMModel")
        try:
            instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentModel: %s" % ex)

        # -------------------------------
        # Create InstrumentAgent
        # -------------------------------
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name="agent007",
            description="SBE37IMAgent",
            driver_uri="http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1-py2.7.egg",
        )

        try:
            instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentAgent: %s" % ex)
        log.debug("new InstrumentAgent id = %s", instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # -------------------------------
        # Create Instrument Site
        # -------------------------------
        instrumentSite_obj = IonObject(RT.InstrumentSite, name="instrumentSite1", description="SBE37IMInstrumentSite")
        try:
            instrumentSite_id = self.omsclient.create_instrument_site(instrument_site=instrumentSite_obj, parent_id="")
        except BadRequest as ex:
            self.fail("failed to create new InstrumentSite: %s" % ex)
        print "test_deployAsPrimaryDevice: new instrumentSite id = ", instrumentSite_id

        self.omsclient.assign_instrument_model_to_instrument_site(instModel_id, instrumentSite_id)

        # -------------------------------
        # Logical Transform: Output Data Products
        # -------------------------------

        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            "ctd_parsed_param_dict", id_only=True
        )
        parsed_stream_def_id = self.pubsubclient.create_stream_definition(
            name="parsed", parameter_dictionary_id=parsed_pdict_id
        )

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name("ctd_raw_param_dict", id_only=True)
        raw_stream_def_id = self.pubsubclient.create_stream_definition(name="raw", parameter_dictionary_id=raw_pdict_id)

        # -------------------------------
        # Create Old InstrumentDevice
        # -------------------------------
        instDevice_obj = IonObject(
            RT.InstrumentDevice,
            name="SBE37IMDeviceYear1",
            description="SBE37IMDevice for the FIRST year of deployment",
            serial_number="12345",
        )
        try:
            oldInstDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
            self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, oldInstDevice_id)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentDevice: %s" % ex)

        print "test_deployAsPrimaryDevice: new Year 1 InstrumentDevice id = ", oldInstDevice_id

        self.rrclient.execute_lifecycle_transition(oldInstDevice_id, LCE.DEPLOY)
        self.rrclient.execute_lifecycle_transition(oldInstDevice_id, LCE.ENABLE)

        # -------------------------------
        # Create Raw and Parsed Data Products for the device
        # -------------------------------

        dp_obj = IonObject(
            RT.DataProduct,
            name="SiteDataProduct",
            description="SiteDataProduct",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        instrument_site_output_dp_id = self.dataproductclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id
        )

        self.damsclient.assign_data_product(
            input_resource_id=oldInstDevice_id, data_product_id=instrument_site_output_dp_id
        )

        # self.dataproductclient.activate_data_product_persistence(data_product_id=instrument_site_output_dp_id)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(instrument_site_output_dp_id, PRED.hasStream, None, True)
        log.debug("Data product streams1 = %s", stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(instrument_site_output_dp_id, PRED.hasDataset, RT.Dataset, True)
        log.debug("Data set for data_product_id1 = %s", dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger("ctd_parsed", stream_ids[0])
        self.loggerpids.append(pid)

        self.omsclient.create_site_data_product(instrumentSite_id, instrument_site_output_dp_id)

        # -------------------------------
        # Create Old Deployment
        # -------------------------------
        deployment_obj = IonObject(RT.Deployment, name="first deployment")

        oldDeployment_id = self.omsclient.create_deployment(deployment_obj)

        # deploy this device to the logical slot
        self.imsclient.deploy_instrument_device(oldInstDevice_id, oldDeployment_id)
        self.omsclient.deploy_instrument_site(instrumentSite_id, oldDeployment_id)

        # -------------------------------
        # Create InstrumentAgentInstance for OldInstrumentDevice to hold configuration information
        # cmd_port=5556, evt_port=5557, comms_method="ethernet", comms_device_address=CFG.device.sbe37.host, comms_device_port=CFG.device.sbe37.port,
        # -------------------------------

        port_agent_config = {
            "device_addr": CFG.device.sbe37.host,
            "device_port": CFG.device.sbe37.port,
            "process_type": PortAgentProcessType.UNIX,
            "binary_path": "port_agent",
            "port_agent_addr": "localhost",
            "command_port": CFG.device.sbe37.port_agent_cmd_port,
            "data_port": CFG.device.sbe37.port_agent_data_port,
            "log_level": 5,
            "type": PortAgentType.ETHERNET,
        }

        raw_config = StreamConfiguration(
            stream_name="raw",
            parameter_dictionary_name="ctd_raw_param_dict",
            records_per_granule=2,
            granule_publish_rate=5,
        )
        parsed_config = StreamConfiguration(
            stream_name="parsed",
            parameter_dictionary_name="ctd_parsed_param_dict",
            records_per_granule=2,
            granule_publish_rate=5,
        )

        instAgentInstance_obj = IonObject(
            RT.InstrumentAgentInstance,
            name="SBE37IMAgentInstanceYear1",
            description="SBE37IMAgentInstanceYear1",
            comms_device_address="sbe37-simulator.oceanobservatories.org",
            comms_device_port=4001,
            port_agent_config=port_agent_config,
            stream_configurations=[raw_config, parsed_config],
        )

        oldInstAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, oldInstDevice_id
        )

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        # -------------------------------
        # Create CTD Parsed as the Year 1 data product and attach to instrument
        # -------------------------------

        print "Creating new CDM data product with a stream definition"

        dp_obj = IonObject(
            RT.DataProduct,
            name="ctd_parsed_year1",
            description="ctd stream test year 1",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_parsed_data_product_year1 = self.dataproductclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id
        )

        print "new ctd_parsed_data_product_id = ", ctd_parsed_data_product_year1

        self.damsclient.assign_data_product(
            input_resource_id=oldInstDevice_id, data_product_id=ctd_parsed_data_product_year1
        )

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product_year1, PRED.hasStream, None, True)
        print "test_deployAsPrimaryDevice: Data product streams1 = ", stream_ids

        # -------------------------------
        # Create New InstrumentDevice
        # -------------------------------
        instDevice_obj_2 = IonObject(
            RT.InstrumentDevice,
            name="SBE37IMDeviceYear2",
            description="SBE37IMDevice for the SECOND year of deployment",
            serial_number="67890",
        )
        try:
            newInstDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj_2)
            self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, newInstDevice_id)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentDevice: %s" % ex)

        print "test_deployAsPrimaryDevice: new  Year 2 InstrumentDevice id = ", newInstDevice_id

        # set the LCSTATE
        self.rrclient.execute_lifecycle_transition(newInstDevice_id, LCE.DEPLOY)
        self.rrclient.execute_lifecycle_transition(newInstDevice_id, LCE.ENABLE)

        instDevice_obj_2 = self.rrclient.read(newInstDevice_id)
        log.debug("test_deployAsPrimaryDevice: Create New InstrumentDevice LCSTATE: %s ", str(instDevice_obj_2.lcstate))

        # -------------------------------
        # Create Old Deployment
        # -------------------------------
        deployment_obj = IonObject(RT.Deployment, name="second deployment")

        newDeployment_id = self.omsclient.create_deployment(deployment_obj)

        # deploy this device to the logical slot
        self.imsclient.deploy_instrument_device(newInstDevice_id, newDeployment_id)
        self.omsclient.deploy_instrument_site(instrumentSite_id, newDeployment_id)

        # -------------------------------
        # Create InstrumentAgentInstance for NewInstrumentDevice to hold configuration information
        # -------------------------------

        port_agent_config = {
            "device_addr": "sbe37-simulator.oceanobservatories.org",
            "device_port": 4004,
            "process_type": PortAgentProcessType.UNIX,
            "binary_path": "port_agent",
            "port_agent_addr": "localhost",
            "command_port": 4005,
            "data_port": 4006,
            "log_level": 5,
            "type": PortAgentType.ETHERNET,
        }

        instAgentInstance_obj = IonObject(
            RT.InstrumentAgentInstance,
            name="SBE37IMAgentInstanceYear2",
            description="SBE37IMAgentInstanceYear2",
            comms_device_address="sbe37-simulator.oceanobservatories.org",
            comms_device_port=4004,
            port_agent_config=port_agent_config,
        )

        newInstAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, newInstDevice_id
        )

        # -------------------------------
        # Create CTD Parsed as the Year 2 data product
        # -------------------------------

        dp_obj = IonObject(
            RT.DataProduct,
            name="ctd_parsed_year2",
            description="ctd stream test year 2",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_parsed_data_product_year2 = self.dataproductclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id
        )

        print "new ctd_parsed_data_product_id = ", ctd_parsed_data_product_year2

        self.damsclient.assign_data_product(
            input_resource_id=newInstDevice_id, data_product_id=ctd_parsed_data_product_year2
        )

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(ctd_parsed_data_product_year2, PRED.hasStream, None, True)
        print "test_deployAsPrimaryDevice: Data product streams2 = ", stream_ids

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Data Process Definition
        # -------------------------------
        log.debug("test_deployAsPrimaryDevice: create data process definition ctd_L0_all")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name="ctd_L0_all",
            description="transform ctd package into three separate L0 streams",
            module="ion.processes.data.transforms.ctd.ctd_L0_all",
            class_name="ctd_L0_all",
        )
        try:
            ctd_L0_all_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new ctd_L0_all data process definition: %s" % ex)

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Output Data Products
        # -------------------------------

        outgoing_stream_l0_conductivity_id = self.pubsubclient.create_stream_definition(
            name="L0_Conductivity", parameter_dictionary_id=parsed_pdict_id
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_conductivity_id, ctd_L0_all_dprocdef_id, binding="conductivity"
        )

        outgoing_stream_l0_pressure_id = self.pubsubclient.create_stream_definition(
            name="L0_Pressure", parameter_dictionary_id=parsed_pdict_id
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_pressure_id, ctd_L0_all_dprocdef_id, binding="pressure"
        )

        outgoing_stream_l0_temperature_id = self.pubsubclient.create_stream_definition(
            name="L0_Temperature", parameter_dictionary_id=parsed_pdict_id
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_temperature_id, ctd_L0_all_dprocdef_id, binding="temperature"
        )

        self.output_products = {}
        log.debug("test_deployAsPrimaryDevice: create output data product L0 conductivity")

        ctd_l0_conductivity_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Conductivity",
            description="transform output conductivity",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_conductivity_output_dp_id = self.dataproductclient.create_data_product(
            data_product=ctd_l0_conductivity_output_dp_obj, stream_definition_id=parsed_stream_def_id
        )
        self.output_products["conductivity"] = ctd_l0_conductivity_output_dp_id
        # self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_conductivity_output_dp_id)

        log.debug("test_deployAsPrimaryDevice: create output data product L0 pressure")

        ctd_l0_pressure_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Pressure",
            description="transform output pressure",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_pressure_output_dp_id = self.dataproductclient.create_data_product(
            data_product=ctd_l0_pressure_output_dp_obj, stream_definition_id=parsed_stream_def_id
        )

        self.output_products["pressure"] = ctd_l0_pressure_output_dp_id
        # self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_pressure_output_dp_id)

        log.debug("test_deployAsPrimaryDevice: create output data product L0 temperature")

        ctd_l0_temperature_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Temperature",
            description="transform output temperature",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_temperature_output_dp_id = self.dataproductclient.create_data_product(
            data_product=ctd_l0_temperature_output_dp_obj, stream_definition_id=parsed_stream_def_id
        )

        self.output_products["temperature"] = ctd_l0_temperature_output_dp_id
        # self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_temperature_output_dp_id)

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Create the data process, listening to  Sim1   (later: logical instrument output product)
        # -------------------------------
        log.debug("test_deployAsPrimaryDevice: create L0 all data_process start")
        try:
            ctd_l0_all_data_process_id = self.dataprocessclient.create_data_process(
                ctd_L0_all_dprocdef_id, [ctd_parsed_data_product_year1], self.output_products
            )
            self.dataprocessclient.activate_data_process(ctd_l0_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" % ex)
        log.debug("test_deployAsPrimaryDevice: create L0 all data_process return")

        # --------------------------------
        # Activate the deployment
        # --------------------------------
        self.omsclient.activate_deployment(oldDeployment_id)

        # -------------------------------
        # Launch InstrumentAgentInstance Sim1, connect to the resource agent client
        # -------------------------------
        self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=oldInstAgentInstance_id)
        self.addCleanup(
            self.imsclient.stop_instrument_agent_instance, instrument_agent_instance_id=oldInstAgentInstance_id
        )

        # wait for start
        instance_obj = self.imsclient.read_instrument_agent_instance(oldInstAgentInstance_id)
        gate = ProcessStateGate(
            self.processdispatchclient.read_process, instance_obj.agent_process_id, ProcessStateEnum.RUNNING
        )
        self.assertTrue(
            gate.await(30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" % instance_obj.agent_process_id,
        )

        inst_agent1_instance_obj = self.imsclient.read_instrument_agent_instance(oldInstAgentInstance_id)
        print "test_deployAsPrimaryDevice: Instrument agent instance obj: = ", inst_agent1_instance_obj

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client_sim1 = ResourceAgentClient(
            "iaclient Sim1", name=inst_agent1_instance_obj.agent_process_id, process=FakeProcess()
        )
        print "activate_instrument: got _ia_client_sim1 %s", self._ia_client_sim1
        log.debug(" test_deployAsPrimaryDevice:: got _ia_client_sim1 %s", str(self._ia_client_sim1))

        # -------------------------------
        # Launch InstrumentAgentInstance Sim2, connect to the resource agent client
        # -------------------------------
        self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=newInstAgentInstance_id)
        self.addCleanup(
            self.imsclient.stop_instrument_agent_instance, instrument_agent_instance_id=newInstAgentInstance_id
        )

        # wait for start
        instance_obj = self.imsclient.read_instrument_agent_instance(newInstAgentInstance_id)
        gate = ProcessStateGate(
            self.processdispatchclient.read_process, instance_obj.agent_process_id, ProcessStateEnum.RUNNING
        )
        self.assertTrue(
            gate.await(30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" % instance_obj.agent_process_id,
        )

        inst_agent2_instance_obj = self.imsclient.read_instrument_agent_instance(newInstAgentInstance_id)
        print "test_deployAsPrimaryDevice: Instrument agent instance obj: = ", inst_agent2_instance_obj

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client_sim2 = ResourceAgentClient(
            "iaclient Sim2", name=inst_agent2_instance_obj.agent_process_id, process=FakeProcess()
        )
        print "activate_instrument: got _ia_client_sim2 %s", self._ia_client_sim2
        log.debug(" test_deployAsPrimaryDevice:: got _ia_client_sim2 %s", str(self._ia_client_sim2))

        # -------------------------------
        # Streaming Sim1 (old instrument)
        # -------------------------------

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client_sim1.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: initialize %s", str(retval))

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client_sim1.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: return value from go_active %s", str(reply))
        self.assertTrue(reply)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client_sim1.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s", str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client_sim1.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: run %s", str(reply))

        gevent.sleep(2)

        cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)
        retval = self._ia_client_sim1.execute_resource(cmd)
        log.debug("test_activateInstrumentSample: return from START_AUTOSAMPLE: %s", str(retval))

        # -------------------------------
        # Streaming Sim 2 (new instrument)
        # -------------------------------

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client_sim2.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: initialize_sim2 %s", str(retval))

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client_sim2.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: return value from go_active_sim2 %s", str(reply))

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client_sim2.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active_sim2 command %s", str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client_sim2.execute_agent(cmd)
        log.debug("test_deployAsPrimaryDevice: run %s", str(reply))

        gevent.sleep(2)

        cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)
        retval = self._ia_client_sim2.execute_resource(cmd)
        log.debug("test_activateInstrumentSample: return from START_AUTOSAMPLE_sim2: %s", str(retval))

        gevent.sleep(10)

        # -------------------------------
        # Shutdown Sim1 (old instrument)
        # -------------------------------
        cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)
        retval = self._ia_client_sim1.execute_resource(cmd)
        log.debug("test_activateInstrumentSample: return from STOP_AUTOSAMPLE: %s", str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client_sim1.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s", str(reply))
        time.sleep(5)

        # -------------------------------
        # Shutdown Sim2 (old instrument)
        # -------------------------------
        cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)
        retval = self._ia_client_sim2.execute_resource(cmd)
        log.debug("test_activateInstrumentSample: return from STOP_AUTOSAMPLE_sim2: %s", str(retval))

        log.debug("test_activateInstrumentSample: calling reset_sim2 ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client_sim1.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset_sim2 %s", str(reply))
        time.sleep(5)
示例#43
0
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()
        config.bootstrap.use_es = True

        self._start_container()
        self.addCleanup(TestActivateInstrumentIntegration.es_cleanup)

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()

    @staticmethod
    def es_cleanup():
        es_host = CFG.get_safe('server.elasticsearch.host', 'localhost')
        es_port = CFG.get_safe('server.elasticsearch.port', '9200')
        es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
        indexes = STD_INDEXES.keys()
        indexes.append('%s_resources_index' % get_sys_name().lower())
        indexes.append('%s_events_index' % get_sys_name().lower())

        for index in indexes:
            IndexManagementService._es_call(es.river_couchdb_delete, index)
            IndexManagementService._es_call(es.index_delete, index)

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)

        return pid

    def _create_notification(self,
                             user_name='',
                             instrument_id='',
                             product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(
            name='notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(
            name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(
            notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(
            notification=notification_request_2, user_id=user_id)
        log.debug(
            "test_activateInstrumentSample: create_user_notifications user_id %s",
            str(user_id))

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(
            self, expected_instrument_device_id='', extended_instrument=None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.firmware_version,
                              ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_data_received_datetime,
            ComputedFloatValue)
        self.assertIsInstance(
            extended_instrument.computed.last_calibration_datetime,
            ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime,
                              ComputedStringValue)

        self.assertIsInstance(
            extended_instrument.computed.power_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.communications_status_roll_up,
            ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_instrument.computed.location_status_roll_up,
            ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        self.assertEqual(
            extended_instrument.computed.communications_status_roll_up.value,
            StatusType.STATUS_WARNING)
        self.assertEqual(
            extended_instrument.computed.data_status_roll_up.value,
            StatusType.STATUS_OK)
        self.assertEqual(
            extended_instrument.computed.power_status_roll_up.value,
            StatusType.STATUS_WARNING)

        # Verify the computed attribute for user notification requests
        self.assertEqual(
            1,
            len(extended_instrument.computed.user_notification_requests.value))
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(notification.origin, expected_instrument_device_id)
        self.assertEqual(notification.origin_type, "instrument")
        self.assertEqual(notification.event_type, 'ResourceLifecycleEvent')

    def _check_computed_attributes_of_extended_product(
            self, expected_data_product_id='', extended_data_product=None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s",
                  extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(
            extended_data_product.computed.product_download_size_estimated,
            ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.number_active_subscriptions,
            ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url,
                              ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size,
                              ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters,
                              ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events,
                              ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance,
                              ComputedDictValue)
        self.assertIsInstance(
            extended_data_product.computed.user_notification_requests,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.active_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(
            extended_data_product.computed.past_user_subscriptions,
            ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule,
                              ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted,
                              ComputedIntValue)
        self.assertIsInstance(
            extended_data_product.computed.data_contents_updated,
            ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime,
                              ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertTrue('ok' in extended_data_product.computed.last_granule.
                        value['quality_flag'])
        self.assertEqual(
            2, len(extended_data_product.computed.data_datetime.value))

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(notification.origin, expected_data_product_id)
        self.assertEqual(notification.origin_type, "data product")
        self.assertEqual(notification.event_type, 'DetectionEvent')

    @attr('LOCOINT')
    @unittest.skipIf(not use_es, 'No ElasticSearch')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 60}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('new InstrumentModel id = %s ', instModel_id)

        #Create stream alarms
        """
        test_two_sided_interval
        Test interval alarm and alarm event publishing for a closed
        inteval.
        """

        #        kwargs = {
        #            'name' : 'test_sim_warning',
        #            'stream_name' : 'parsed',
        #            'value_id' : 'temp',
        #            'message' : 'Temperature is above test range of 5.0.',
        #            'type' : StreamAlarmType.WARNING,
        #            'upper_bound' : 5.0,
        #            'upper_rel_op' : '<'
        #        }

        kwargs = {
            'name': 'temperature_warning_interval',
            'stream_name': 'parsed',
            'value_id': 'temp',
            'message':
            'Temperature is below the normal range of 50.0 and above.',
            'type': StreamAlarmType.WARNING,
            'lower_bound': 50.0,
            'lower_rel_op': '<'
        }

        # Create alarm object.
        alarm = {}
        alarm['type'] = 'IntervalAlarmDef'
        alarm['kwargs'] = kwargs

        raw_config = StreamConfiguration(
            stream_name='raw',
            parameter_dictionary_name='ctd_raw_param_dict',
            records_per_granule=2,
            granule_publish_rate=5)
        parsed_config = StreamConfiguration(
            stream_name='parsed',
            parameter_dictionary_name='ctd_parsed_param_dict',
            records_per_granule=2,
            granule_publish_rate=5,
            alarms=[alarm])

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='agent007',
            description="SBE37IMAgent",
            driver_uri=
            "http://sddevrepo.oceanobservatories.org/releases/seabird_sbe37smb_ooicore-0.0.1a-py2.7.egg",
            stream_configurations=[raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) ",
            instDevice_id)

        port_agent_config = {
            'device_addr': CFG.device.sbe37.host,
            'device_port': CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config=port_agent_config)

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()

        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(
            name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_raw_param_dict', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='the parsed data',
                           description='ctd stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id1 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug('new dp_id = %s', data_product_id1)
        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('ctd_parsed', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test',
                           temporal_domain=tdom,
                           spatial_domain=sdom)

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification(user_name='user_1',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification(user_name='user_2',
                                              instrument_id=instDevice_id,
                                              product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s', str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s', dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        #elastic search debug
        es_indexes, _ = self.container.resource_registry.find_resources(
            restype='ElasticSearchIndex')
        log.debug('ElasticSearch indexes: %s', [i.name for i in es_indexes])
        log.debug('Bootstrap %s', CFG.bootstrap.use_es)

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #setup a subscriber to alarm events from the device
        self._events_received = []
        self._event_count = 0
        self._samples_out_of_range = 0
        self._samples_complete = False
        self._async_sample_result = AsyncResult()

        def consume_event(*args, **kwargs):
            log.debug(
                'TestActivateInstrument recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            self._event_count = len(self._events_received)
            self._async_sample_result.set()

        self._event_subscriber = EventSubscriber(
            event_type=
            'StreamWarningAlarmEvent',  #'StreamWarningAlarmEvent', #  StreamAlarmEvent
            callback=consume_event,
            origin=instDevice_id)
        self._event_subscriber.start()

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        def stop_subscriber():
            self._event_subscriber.stop()
            self._event_subscriber = None

        self.addCleanup(stop_subscriber)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = ProcessStateGate(self.processdispatchclient.read_process,
                                inst_agent_instance_obj.agent_process_id,
                                ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            inst_agent_instance_obj.agent_process_id)

        log.debug('Instrument agent instance obj: = %s',
                  str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(
            instDevice_id,
            to_name=inst_agent_instance_obj.agent_process_id,
            process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s",
                  str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s",
                  str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "(L4-CI-SA-RQ-334): current state after sending go_active command %s",
            str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.STOPPED)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
        for i in xrange(10):
            retval = self._ia_client.execute_resource(cmd)
            log.debug("test_activateInstrumentSample: return from sample %s",
                      str(retval))

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s",
                  str(reply))

        self._samples_complete = True

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("test_activateInstrumentSample: RDT parsed: %s",
                  str(rdt.pretty_print()))
        temp_vals = rdt['temp']
        self.assertEquals(len(temp_vals), 10)
        log.debug("test_activateInstrumentSample: all temp_vals: %s",
                  temp_vals)

        #out_of_range_temp_vals = [i for i in temp_vals if i > 5]
        out_of_range_temp_vals = [i for i in temp_vals if i < 50.0]
        log.debug("test_activateInstrumentSample: Out_of_range_temp_vals: %s",
                  out_of_range_temp_vals)
        self._samples_out_of_range = len(out_of_range_temp_vals)

        # if no bad values were produced, then do not wait for an event
        if self._samples_out_of_range == 0:
            self._async_sample_result.set()

        log.debug("test_activateInstrumentSample: _events_received: %s",
                  self._events_received)
        log.debug("test_activateInstrumentSample: _event_count: %s",
                  self._event_count)

        self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)

        replay_data = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        log.debug("RDT raw: %s", str(rdt.pretty_print()))

        raw_vals = rdt['raw']
        self.assertEquals(len(raw_vals), 10)

        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """

        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id1, user_id=user_id_1)

        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.
                       value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id1,
            extended_data_product=extended_product)

        #--------------------------------------------------------------------------------
        #put some events into the eventsdb to test - this should set the comms and data status to WARNING
        #--------------------------------------------------------------------------------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_1)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(
            data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(
            expected_data_product_id=data_product_id2,
            extended_data_product=extended_product)

        #---------- Put some events into the eventsdb to test - this should set the comms and data status to WARNING  ---------

        t = get_ion_ts()
        self.event_publisher.publish_event(ts_created=t,
                                           event_type='DeviceStatusEvent',
                                           origin=instDevice_id,
                                           state=DeviceStatusType.OUT_OF_RANGE,
                                           values=[200])
        self.event_publisher.publish_event(
            ts_created=t,
            event_type='DeviceCommsEvent',
            origin=instDevice_id,
            state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
            lapse_interval_seconds=20)

        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(
            instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(
            expected_instrument_device_id=instDevice_id,
            extended_instrument=extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
示例#44
0
class BaseHighAvailabilityAgentTest(IonIntegrationTestCase):

    @needs_epu
    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')
        #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)
        self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher")

        self.process_definition_id = uuid4().hex
        self.process_definition_name = 'test_haagent_%s' % self.process_definition_id
        self.process_definition = ProcessDefinition(name=self.process_definition_name, executable={
                'module': 'ion.agents.cei.test.test_haagent',
                'class': 'TestProcess'
        })
        self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id)

        service_definition = SERVICE_DEFINITION_TMPL % self.process_definition_name
        sd = IonObject(RT.ServiceDefinition, {"name": self.process_definition_name,
            "definition": service_definition})
        self.service_def_id, _ = self.container.resource_registry.create(sd)

        self.resource_id = "haagent_1234"
        self._haa_name = "high_availability_agent"
        self._haa_dashi_name = "dashi_haa_" + uuid4().hex
        self._haa_dashi_uri = get_dashi_uri_from_cfg()
        self._haa_dashi_exchange = "hatests"
        self._haa_config = self._get_haagent_config()

        self._base_services, _ = self.container.resource_registry.find_resources(
                restype="Service", name=self.process_definition_name)

        self._base_procs = self.pd_cli.list_processes()

        self.waiter = ProcessStateWaiter()
        self.waiter.start()

        self.container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
        self._spawn_haagent()

        self._setup_haa_client()

    def _get_haagent_config(self):
        return {
            'highavailability': {
                'policy': {
                    'interval': 1,
                    'name': 'npreserving',
                    'parameters': {
                        'preserve_n': 0
                    }
                },
                'process_definition_id': self.process_definition_id,
                'dashi_messaging': True,
                'dashi_exchange': self._haa_dashi_exchange,
                'dashi_name': self._haa_dashi_name
            },
            'agent': {'resource_id': self.resource_id},
        }

    def _setup_haa_client(self):
        # Start a resource agent client to talk with the instrument agent.
        self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess())

        self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)

    def _spawn_haagent(self, policy_parameters=None):

        config = deepcopy(self._haa_config)
        if policy_parameters is not None:
            config['highavailability']['policy']['parameters'] = policy_parameters
        self._haa_pid = self.container_client.spawn_process(name=self._haa_name,
            module="ion.agents.cei.high_availability_agent",
            cls="HighAvailabilityAgent", config=config)

    def _kill_haagent(self):
        self.container.terminate_process(self._haa_pid)

    def tearDown(self):


        new_policy = {'preserve_n': 0}
        self.haa_client.reconfigure_policy(new_policy)

        self.assertEqual(len(self.get_running_procs()), 0)
        self.await_ha_state('STEADY')

        self.waiter.stop()
        try:
            self._kill_haagent()
        except BadRequest:
            log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)")
        self.container.resource_registry.delete(self.service_def_id, del_associations=True)
        self._stop_container()

    def get_running_procs(self):
        """returns a normalized set of running procs (removes the ones that
        were there at setup time)
        """

        base = self._base_procs
        base_pids = [proc.process_id for proc in base]
        current = self.pd_cli.list_processes()
        current_pids = [proc.process_id for proc in current]
        print "filtering base procs %s from %s" % (base_pids, current_pids)
        normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING]
        return normal

    def get_new_services(self):

        base = self._base_services
        base_names = [i.name for i in base]
        services_registered, _ = self.container.resource_registry.find_resources(
                restype="Service", name=self.process_definition_name)
        normal = [cserv for cserv in services_registered if cserv.name not in base_names]
        return normal

    def await_ha_state(self, want_state, timeout=20):

        for i in range(0, timeout):
            try:
                status = self.haa_client.status().result
                if status == want_state:
                    return
                else:
                    procs = self.get_running_procs()
                    num_procs = len(procs)
                    log.debug("assert wants state %s, got state %s, with %s procs" % (want_state,status, num_procs))
            except Exception:
                log.exception("Problem getting HA status, trying again...")
                gevent.sleep(1)

        raise Exception("Took more than %s to get to ha state %s" % (timeout, want_state))

    def await_pyon_ha_state(self, want_state, timeout=20):
        for i in range(0, timeout):
            try:
                result = self.haa_client.dump().result
                service_id = result.get('service_id')
                service = self.container.resource_registry.read(service_id)

                if service.state == want_state:
                    return
                else:
                    log.debug("want state %s, got state %s") % (want_state, service.state)

            except Exception:
                log.exception("Problem getting HA status, trying again...")
                gevent.sleep(1)

        raise Exception("Took more than %s to get to pyon ha state %s" % (timeout, want_state))
示例#45
0
    def test_raw_stream_integration(self):
        cc = self.container
        assertions = self.assertTrue

        #-----------------------------
        # Copy below here to run as a script (don't forget the imports of course!)
        #-----------------------------

        # Create some service clients...
        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(
            node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(
            node=cc.node)
        process_dispatcher = ProcessDispatcherServiceClient(node=cc.node)

        # declare some handy variables

        datastore_name = 'test_dm_integration'

        ###
        ### In the beginning there was one stream definitions...
        ###
        # create a stream definition for the data from the ctd simulator
        raw_ctd_stream_def = SBE37_RAW_stream_definition()
        raw_ctd_stream_def_id = pubsub_management_service.create_stream_definition(
            container=raw_ctd_stream_def, name='Simulated RAW CTD data')

        ###
        ### And two process definitions...
        ###
        # one for the ctd simulator...
        producer_definition = ProcessDefinition()
        producer_definition.executable = {
            'module': 'ion.processes.data.raw_stream_publisher',
            'class': 'RawStreamPublisher'
        }

        raw_ctd_sim_procdef_id = process_dispatcher.create_process_definition(
            process_definition=producer_definition)

        #---------------------------
        # Set up ingestion - this is an operator concern - not done by SA in a deployed system
        #---------------------------
        # Configure ingestion using eight workers, ingesting to test_dm_integration datastore with the SCIDATA profile
        log.debug('Calling create_ingestion_configuration')
        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id='science_data',
            couch_storage=CouchStorage(datastore_name=datastore_name,
                                       datastore_profile='SCIDATA'),
            number_of_workers=1)
        #
        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        #---------------------------
        # Set up the producer (CTD Simulator)
        #---------------------------

        # Create the stream
        raw_ctd_stream_id = pubsub_management_service.create_stream(
            stream_definition_id=raw_ctd_stream_def_id)

        # Set up the datasets
        raw_ctd_dataset_id = dataset_management_service.create_dataset(
            stream_id=raw_ctd_stream_id,
            datastore_name=datastore_name,
            view_name='datasets/stream_join_granule')

        # Configure ingestion of this dataset
        raw_ctd_dataset_config_id = ingestion_management_service.create_dataset_configuration(
            dataset_id=raw_ctd_dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=
            ingestion_configuration_id,  # you need to know the ingestion configuration id!
        )
        # Hold onto ctd_dataset_config_id if you want to stop/start ingestion of that dataset by the ingestion service

        # Start the ctd simulator to produce some data
        configuration = {
            'process': {
                'stream_id': raw_ctd_stream_id,
            }
        }
        raw_sim_pid = process_dispatcher.schedule_process(
            process_definition_id=raw_ctd_sim_procdef_id,
            configuration=configuration)

        ###
        ### Make a subscriber in the test to listen for salinity data
        ###
        raw_subscription_id = pubsub_management_service.create_subscription(
            query=StreamQuery([
                raw_ctd_stream_id,
            ]),
            exchange_name='raw_test',
            name="test raw subscription",
        )

        # this is okay - even in cei mode!
        pid = cc.spawn_process(name='dummy_process_for_test',
                               module='pyon.ion.process',
                               cls='SimpleProcess',
                               config={})
        dummy_process = cc.proc_manager.procs[pid]

        subscriber_registrar = StreamSubscriberRegistrar(process=dummy_process,
                                                         node=cc.node)

        result = gevent.event.AsyncResult()
        results = []

        def message_received(message, headers):
            # Heads
            log.warn('Raw data received!')
            results.append(message)
            if len(results) > 3:
                result.set(True)

        subscriber = subscriber_registrar.create_subscriber(
            exchange_name='raw_test', callback=message_received)
        subscriber.start()

        # after the queue has been created it is safe to activate the subscription
        pubsub_management_service.activate_subscription(
            subscription_id=raw_subscription_id)

        # Assert that we have received data
        assertions(result.get(timeout=10))

        # stop the flow parse the messages...
        process_dispatcher.cancel_process(
            raw_sim_pid
        )  # kill the ctd simulator process - that is enough data

        gevent.sleep(1)

        for message in results:

            sha1 = message.identifiables['stream_encoding'].sha1

            data = message.identifiables['data_stream'].values

            filename = FileSystem.get_hierarchical_url(FS.CACHE, sha1, ".raw")

            with open(filename, 'r') as f:

                assertions(data == f.read())
class TestActivateInstrumentIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        super(TestActivateInstrumentIntegration, self).setUp()
        config = DotDict()

        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.datasetclient =  DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()
        self.usernotificationclient = UserNotificationServiceClient()

        #setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

        self.event_publisher = EventPublisher()


    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.processdispatchclient.schedule_process(process_definition_id=logger_procdef_id,
                                                            configuration=configuration)

        return pid

    def _create_notification(self, user_name = '', instrument_id='', product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(   name= 'notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(   name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(notification=notification_request_2, user_id=user_id)
        log.debug( "test_activateInstrumentSample: create_user_notifications user_id %s", str(user_id) )

        return user_id

    def get_datastore(self, dataset_id):
        dataset = self.datasetclient.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def _check_computed_attributes_of_extended_instrument(self, expected_instrument_device_id = '',extended_instrument = None):

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_instrument.computed.last_data_received_datetime, ComputedFloatValue)
        self.assertIsInstance(extended_instrument.computed.uptime, ComputedStringValue)

        self.assertIsInstance(extended_instrument.computed.power_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.communications_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.data_status_roll_up, ComputedIntValue)
        self.assertIsInstance(extended_instrument.computed.location_status_roll_up, ComputedIntValue)

        # the following assert will not work without elasticsearch.
        #self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )

        # Verify the computed attribute for user notification requests
        self.assertEqual( 1, len(extended_instrument.computed.user_notification_requests.value) )
        notifications = extended_instrument.computed.user_notification_requests.value
        notification = notifications[0]
        self.assertEqual(expected_instrument_device_id, notification.origin)
        self.assertEqual("instrument", notification.origin_type)
        self.assertEqual('ResourceLifecycleEvent', notification.event_type)


    def _check_computed_attributes_of_extended_product(self, expected_data_product_id = '', extended_data_product = None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s", extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_data_product.computed.product_download_size_estimated, ComputedFloatValue)
        self.assertIsInstance(extended_data_product.computed.number_active_subscriptions, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events, ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.user_notification_requests, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.active_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.past_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_contents_updated, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime, ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertEqual( 2, len(extended_data_product.computed.data_datetime.value) )

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(expected_data_product_id, notification.origin)
        self.assertEqual("data product", notification.origin_type)
        self.assertEqual('DetectionEvent', notification.event_type)


    @attr('LOCOINT')
    #@unittest.skip('refactoring')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint':{'receive':{'timeout': 90}}})
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='SBE37IMModel',
                                  description="SBE37IMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug( 'new InstrumentModel id = %s ', instModel_id)




        raw_config = StreamConfiguration(stream_name='raw', parameter_dictionary_name='raw')
        parsed_config = StreamConfiguration(stream_name='parsed', parameter_dictionary_name='ctd_parsed_param_dict')


        # Create InstrumentAgent
        instAgent_obj = IonObject(RT.InstrumentAgent,
                                  name='agent007',
                                  description="SBE37IMAgent",
                                  driver_uri=DRV_URI_GOOD,
                                  stream_configurations = [raw_config, parsed_config])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('new InstrumentAgent id = %s', instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug('test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) ')
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='SBE37IMDevice',
                                   description="SBE37IMDevice",
                                   serial_number="12345" )
        instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        log.debug("test_activateInstrumentSample: new InstrumentDevice id = %s (SA Req: L4-CI-SA-RQ-241) " , instDevice_id)


        port_agent_config = {
            'device_addr':  CFG.device.sbe37.host,
            'device_port':  CFG.device.sbe37.port,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': CFG.device.sbe37.port_agent_cmd_port,
            'data_port': CFG.device.sbe37.port_agent_data_port,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance, name='SBE37IMAgentInstance',
                                          description="SBE37IMAgentInstance",
                                          port_agent_config = port_agent_config,
                                            alerts= [])


        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(instAgentInstance_obj,
                                                                               instAgent_id,
                                                                               instDevice_id)


        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()


        parsed_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        parsed_stream_def_id = self.pubsubcli.create_stream_definition(name='parsed', parameter_dictionary_id=parsed_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name('raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(name='raw', parameter_dictionary_id=raw_pdict_id)


        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
            name='the parsed data',
            description='ctd stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=parsed_stream_def_id)
        log.debug( 'new dp_id = %s' , data_product_id1)
        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)



        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug('Data product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id1 = %s' , dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]


        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)


        dp_obj = IonObject(RT.DataProduct,
            name='the raw data',
            description='raw stream test',
            temporal_domain = tdom,
            spatial_domain = sdom)

        data_product_id2 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # setup notifications for the device and parsed data product
        user_id_1 = self._create_notification( user_name='user_1', instrument_id=instDevice_id, product_id=data_product_id1)
        #---------- Create notifications for another user and verify that we see different computed subscriptions for the two users ---------
        user_id_2 = self._create_notification( user_name='user_2', instrument_id=instDevice_id, product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug('Data product streams2 = %s' , str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasDataset, RT.Dataset, True)
        log.debug('Data set for data_product_id2 = %s' , dataset_ids[0])
        self.raw_dataset = dataset_ids[0]


        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])


        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)


        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id,
                                     ProcessStateEnum.RUNNING)
        self.assertTrue(gate.await(30), "The instrument agent instance (%s) did not spawn in 30 seconds" %
                                        gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        log.debug("test_activateInstrumentSample: got ia client %s" , str(self._ia_client))

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s" , str(retval))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.INACTIVE, state)

        log.debug("(L4-CI-SA-RQ-334): Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("(L4-CI-SA-RQ-334): current state after sending go_active command %s" , str(state))

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s" , str(reply))
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.STOPPED, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.IDLE, state)

        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(ResourceAgentState.COMMAND, state)

        for i in xrange(10):
            monitor = DatasetMonitor(dataset_id=self.parsed_dataset)
            self._ia_client.execute_resource(AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE))
            if not monitor.wait():
                raise AssertionError('Failed on the %ith granule' % i)
            monitor.stop()


#        cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
#        for i in xrange(10):
#            retval = self._ia_client.execute_resource(cmd)
#            log.debug("test_activateInstrumentSample: return from sample %s" , str(retval))

        log.debug( "test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s" , str(reply))


        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()) )

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        all_raw = "".join(raw_vals)

        # look for 't' entered after a prompt -- ">t"
        t_commands = all_raw.count(">t")

        if 10 != t_commands:
            log.error("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.error("raw val %s: %s", i, [r])
            self.fail("Expected 10 't' strings in raw_vals, got %s" % t_commands)
        else:
            log.debug("%s raw_vals: ", len(raw_vals))
            for i, r in enumerate(raw_vals): log.debug("raw val %s: %s", i, [r])

        replay_data_parsed = self.dataretrieverclient.retrieve(self.parsed_dataset)
        self.assertIsInstance(replay_data_parsed, Granule)
        rdt_parsed = RecordDictionaryTool.load_from_granule(replay_data_parsed)
        log.debug("test_activateInstrumentSample: RDT parsed: %s", str(rdt_parsed.pretty_print()) )
        self.assertIn('temp', rdt_parsed)
        temp_vals = rdt_parsed['temp']
        pressure_vals  = rdt_parsed['pressure']
        if 10 != len(temp_vals):
            log.error("%s temp_vals: %s", len(temp_vals), temp_vals)
            self.fail("Expected 10 temp_vals, got %s" % len(temp_vals))


        log.debug("l4-ci-sa-rq-138")
        """
        Physical resource control shall be subject to policy

        Instrument management control capabilities shall be subject to policy

        The actor accessing the control capabilities must be authorized to send commands.

        note from maurice 2012-05-18: Talk to tim M to verify that this is policy.  If it is then talk with Stephen to
                                      get an example of a policy test and use that to create a test stub that will be
                                      completed when we have instrument policies.

        Tim M: The "actor", aka observatory operator, will access the instrument through ION.

        """


        #--------------------------------------------------------------------------------
        # Get the extended data product to see if it contains the granules
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id1, user_id=user_id_1)
        def poller(extended_product):
            return len(extended_product.computed.user_notification_requests.value) == 1

        poll(poller, extended_product, timeout=30)

        self._check_computed_attributes_of_extended_product( expected_data_product_id = data_product_id1, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_1)

        #--------------------------------------------------------------------------------
        # For the second user, check the extended data product and the extended intrument
        #--------------------------------------------------------------------------------
        extended_product = self.dpclient.get_data_product_extension(data_product_id=data_product_id2, user_id=user_id_2)
        self._check_computed_attributes_of_extended_product(expected_data_product_id = data_product_id2, extended_data_product = extended_product)


        #--------------------------------------------------------------------------------
        # Get the extended instrument
        #--------------------------------------------------------------------------------

        extended_instrument = self.imsclient.get_instrument_device_extension(instrument_device_id=instDevice_id, user_id=user_id_2)
        self._check_computed_attributes_of_extended_instrument(expected_instrument_device_id = instDevice_id, extended_instrument = extended_instrument)

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(data_product_id1)
        self.dpclient.delete_data_product(data_product_id2)
class LastUpdateCacheTest(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.datastore_name = CACHE_DATASTORE_NAME
        self.container.start_rel_from_url('res/deploy/r2dm.yml')
        self.db = self.container.datastore_manager.get_datastore(self.datastore_name,DataStore.DS_PROFILE.SCIDATA)
        self.tms_cli = TransformManagementServiceClient()
        self.pubsub_cli = PubsubManagementServiceClient()
        self.pd_cli = ProcessDispatcherServiceClient()
        self.rr_cli = ResourceRegistryServiceClient()
        xs_dot_xp = CFG.core_xps.science_data
        try:
            self.XS, xp_base = xs_dot_xp.split('.')
            self.XP = '.'.join([bootstrap.get_sys_name(), xp_base])
        except ValueError:
            raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)


    def make_points(self,definition,stream_id='I am very special', N=100):
        from prototype.sci_data.constructor_apis import PointSupplementConstructor
        import numpy as np
        import random


        definition.stream_resource_id = stream_id


        total = N
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            granule.identifiables[definition.data_stream_id].values = hdf_string
            yield granule
        return

    def start_worker(self):


        proc_def = ProcessDefinition()
        proc_def.executable['module'] = 'ion.processes.data.last_update_cache'
        proc_def.executable['class'] = 'LastUpdateCache'
        proc_def_id = self.pd_cli.create_process_definition(process_definition=proc_def)


        subscription_id = self.pubsub_cli.create_subscription(query=ExchangeQuery(), exchange_name='ingestion_aggregate')

        config = {
            'couch_storage' : {
                'datastore_name' :self.datastore_name,
                'datastore_profile' : 'SCIDATA'
            }
        }

        transform_id = self.tms_cli.create_transform(
            name='last_update_cache',
            description='LastUpdate that compiles an aggregate of metadata',
            in_subscription_id=subscription_id,
            process_definition_id=proc_def_id,
            configuration=config
        )


        self.tms_cli.activate_transform(transform_id=transform_id)
        transform = self.rr_cli.read(transform_id)
        pid = transform.process_id
        handle = self.container.proc_manager.procs[pid]
        return handle

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_last_update_cache(self):
        handle = self.start_worker()
        queue = Queue()
        o_process = handle.process
        def new_process(msg):
            o_process(msg)
            queue.put(True)
        handle.process = new_process



        definition = SBE37_CDM_stream_definition()
        publisher = Publisher()

        stream_def_id = self.pubsub_cli.create_stream_definition(container=definition)
        stream_id = self.pubsub_cli.create_stream(stream_definition_id=stream_def_id)

        time = float(0.0)

        for granule in self.make_points(definition=definition, stream_id=stream_id, N=10):

            publisher.publish(granule, to_name=(self.XP, stream_id+'.data'))
            # Determinism sucks
            try:
                queue.get(timeout=5)
            except Empty:
                self.assertTrue(False, 'Process never received the message.')

            doc = self.db.read(stream_id)
            ntime = doc.variables['time'].value
            self.assertTrue(ntime >= time, 'The documents did not sequentially get updated correctly.')
            time = ntime
示例#48
0
class CtdTransformsIntTest(IonIntegrationTestCase):
    def setUp(self):
        super(CtdTransformsIntTest, self).setUp()

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.queue_cleanup = []
        self.exchange_cleanup = []

        self.pubsub = PubsubManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()

        self.exchange_name = 'ctd_L0_all_queue'
        self.exchange_point = 'test_exchange'
        self.i = 0

    def tearDown(self):
        for queue in self.queue_cleanup:
            xn = self.container.ex_manager.create_xn_queue(queue)
            xn.delete()
        for exchange in self.exchange_cleanup:
            xp = self.container.ex_manager.create_xp(exchange)
            xp.delete()

    def test_ctd_L0_all(self):
        '''
        Test that packets are processed by the ctd_L0_all transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='ctd_L0_all', description='For testing ctd_L0_all')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L0_all'
        process_definition.executable['class'] = 'ctd_L0_all'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        stream_def_id = self.pubsub.create_stream_definition(
            'ctd_all_stream_def', parameter_dictionary_id=pdict_id)

        cond_stream_id, _ = self.pubsub.create_stream(
            'test_cond',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        pres_stream_id, _ = self.pubsub.create_stream(
            'test_pres',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        temp_stream_id, _ = self.pubsub.create_stream(
            'test_temp',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        config.process.publish_streams.conductivity = cond_stream_id
        config.process.publish_streams.pressure = pres_stream_id
        config.process.publish_streams.temperature = temp_stream_id

        # Schedule the process
        pid = self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the conductivity, temperature and pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------
        ar_cond = gevent.event.AsyncResult()

        def subscriber1(m, r, s):
            ar_cond.set(m)

        sub_cond = StandaloneStreamSubscriber('sub_cond', subscriber1)
        self.addCleanup(sub_cond.stop)

        ar_temp = gevent.event.AsyncResult()

        def subscriber2(m, r, s):
            ar_temp.set(m)

        sub_temp = StandaloneStreamSubscriber('sub_temp', subscriber2)
        self.addCleanup(sub_temp.stop)

        ar_pres = gevent.event.AsyncResult()

        def subscriber3(m, r, s):
            ar_pres.set(m)

        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
        self.addCleanup(sub_pres.stop)

        sub_cond_id = self.pubsub.create_subscription(
            'subscription_cond',
            stream_ids=[cond_stream_id],
            exchange_name='sub_cond')

        sub_temp_id = self.pubsub.create_subscription(
            'subscription_temp',
            stream_ids=[temp_stream_id],
            exchange_name='sub_temp')

        sub_pres_id = self.pubsub.create_subscription(
            'subscription_pres',
            stream_ids=[pres_stream_id],
            exchange_name='sub_pres')

        self.pubsub.activate_subscription(sub_cond_id)
        self.pubsub.activate_subscription(sub_temp_id)
        self.pubsub.activate_subscription(sub_pres_id)

        self.queue_cleanup.append(sub_cond.xn.queue)
        self.queue_cleanup.append(sub_temp.xn.queue)
        self.queue_cleanup.append(sub_pres.xn.queue)

        sub_cond.start()
        sub_temp.start()
        sub_pres.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result_cond = ar_cond.get(timeout=10)
        result_temp = ar_temp.get(timeout=10)
        result_pres = ar_pres.get(timeout=10)

        out_dict = {}
        out_dict['c'] = RecordDictionaryTool.load_from_granule(
            result_cond)['conductivity']
        out_dict['t'] = RecordDictionaryTool.load_from_granule(
            result_temp)['temp']
        out_dict['p'] = RecordDictionaryTool.load_from_granule(
            result_pres)['pressure']

        # Check that the transform algorithm was successfully executed
        self.check_granule_splitting(publish_granule, out_dict)

    def test_ctd_L1_conductivity(self):
        '''
        Test that packets are processed by the ctd_L1_conductivity transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1ConductivityTransform',
            description='For testing CTDL1ConductivityTransform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L1_conductivity'
        process_definition.executable['class'] = 'CTDL1ConductivityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'cond_stream_def', parameter_dictionary_id=pdict_id)
        cond_stream_id, _ = self.pubsub.create_stream(
            'test_conductivity',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        config.process.publish_streams.conductivity = cond_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the conductivity, temperature and pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------
        ar_cond = gevent.event.AsyncResult()

        def subscriber1(m, r, s):
            ar_cond.set(m)

        sub_cond = StandaloneStreamSubscriber('sub_cond', subscriber1)
        self.addCleanup(sub_cond.stop)

        sub_cond_id = self.pubsub.create_subscription(
            'subscription_cond',
            stream_ids=[cond_stream_id],
            exchange_name='sub_cond')

        self.pubsub.activate_subscription(sub_cond_id)

        self.queue_cleanup.append(sub_cond.xn.queue)

        sub_cond.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result_cond = ar_cond.get(timeout=10)
        self.assertTrue(isinstance(result_cond, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result_cond)
        self.assertTrue(rdt.__contains__('conductivity'))

        self.check_cond_algorithm_execution(publish_granule, result_cond)

    def check_cond_algorithm_execution(self, publish_granule,
                                       granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        output_data = output_rdt_transform['conductivity']
        input_data = input_rdt_to_transform['conductivity']

        self.assertTrue(
            numpy.array_equal(((input_data / 100000.0) - 0.5), output_data))

    def check_pres_algorithm_execution(self, publish_granule,
                                       granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        output_data = output_rdt_transform['pressure']
        input_data = input_rdt_to_transform['pressure']

        self.assertTrue(
            numpy.array_equal((input_data / 100.0) + 0.5, output_data))

    def check_temp_algorithm_execution(self, publish_granule,
                                       granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        output_data = output_rdt_transform['temp']
        input_data = input_rdt_to_transform['temp']

        self.assertTrue(
            numpy.array_equal(((input_data / 10000.0) - 10), output_data))

    def check_density_algorithm_execution(self, publish_granule,
                                          granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        longitude = input_rdt_to_transform['lon']
        latitude = input_rdt_to_transform['lat']

        sp = SP_from_cndr(r=conductivity / cte.C3515,
                          t=temperature,
                          p=pressure)
        sa = SA_from_SP(sp, pressure, longitude, latitude)
        dens_value = rho(sa, temperature, pressure)

        out_density = output_rdt_transform['density']

        log.debug("density output from the transform: %s", out_density)
        log.debug("values of density expected from the transform: %s",
                  dens_value)

        numpy.testing.assert_array_almost_equal(out_density,
                                                dens_value,
                                                decimal=3)

#        #-----------------------------------------------------------------------------
#        # Check that the output data from the transform has the correct density values
#        #-----------------------------------------------------------------------------
#        for item in zip(out_density.tolist(), dens_value.tolist()):
#            if isinstance(item[0], int) or isinstance(item[0], float):
#                self.assertEquals(item[0], item[1])

    def check_salinity_algorithm_execution(self, publish_granule,
                                           granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        sal_value = SP_from_cndr(r=conductivity / cte.C3515,
                                 t=temperature,
                                 p=pressure)

        out_salinity = output_rdt_transform['salinity']

        #-----------------------------------------------------------------------------
        # Check that the output data from the transform has the correct density values
        #-----------------------------------------------------------------------------
        self.assertTrue(numpy.array_equal(sal_value, out_salinity))

    def check_granule_splitting(self, publish_granule, out_dict):
        '''
        This checks that the ctd_L0_all transform is able to split out one of the
        granules from the whole granule
        fed into the transform
        '''

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)

        in_cond = input_rdt_to_transform['conductivity']
        in_pressure = input_rdt_to_transform['pressure']
        in_temp = input_rdt_to_transform['temp']

        out_cond = out_dict['c']
        out_pres = out_dict['p']
        out_temp = out_dict['t']

        self.assertTrue(numpy.array_equal(in_cond, out_cond))
        self.assertTrue(numpy.array_equal(in_pressure, out_pres))
        self.assertTrue(numpy.array_equal(in_temp, out_temp))

    def test_ctd_L1_pressure(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1PressureTransform',
            description='For testing CTDL1PressureTransform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L1_pressure'
        process_definition.executable['class'] = 'CTDL1PressureTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream(
            'test_pressure',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_pres = gevent.event.AsyncResult()

        def subscriber3(m, r, s):
            ar_pres.set(m)

        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
        self.addCleanup(sub_pres.stop)

        sub_pres_id = self.pubsub.create_subscription(
            'subscription_pres',
            stream_ids=[pres_stream_id],
            exchange_name='sub_pres')

        self.pubsub.activate_subscription(sub_pres_id)

        self.queue_cleanup.append(sub_pres.xn.queue)

        sub_pres.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_pres.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('pressure'))

        self.check_pres_algorithm_execution(publish_granule, result)

    def test_ctd_L1_temperature(self):
        '''
        Test that packets are processed by the ctd_L1_temperature transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1TemperatureTransform',
            description='For testing CTDL1TemperatureTransform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L1_temperature'
        process_definition.executable['class'] = 'CTDL1TemperatureTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'temp_stream_def', parameter_dictionary_id=pdict_id)
        temp_stream_id, _ = self.pubsub.create_stream(
            'test_temperature',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.temperature = temp_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscriber that will receive the temperature granule from
        # the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_temp = gevent.event.AsyncResult()

        def subscriber2(m, r, s):
            ar_temp.set(m)

        sub_temp = StandaloneStreamSubscriber('sub_temp', subscriber2)
        self.addCleanup(sub_temp.stop)

        sub_temp_id = self.pubsub.create_subscription(
            'subscription_temp',
            stream_ids=[temp_stream_id],
            exchange_name='sub_temp')

        self.pubsub.activate_subscription(sub_temp_id)

        self.queue_cleanup.append(sub_temp.xn.queue)

        sub_temp.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_temp.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('temp'))

        self.check_temp_algorithm_execution(publish_granule, result)

    def test_ctd_L2_density(self):
        '''
        Test that packets are processed by the ctd_L1_density transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='DensityTransform',
            description='For testing DensityTransform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L2_density'
        process_definition.executable['class'] = 'DensityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        config.process.interval = 1.0

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'dens_stream_def', parameter_dictionary_id=pdict_id)
        dens_stream_id, _ = self.pubsub.create_stream(
            'test_density',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')
        config.process.publish_streams.density = dens_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber that will receive the density granule from the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_dens = gevent.event.AsyncResult()

        def subscriber3(m, r, s):
            ar_dens.set(m)

        sub_dens = StandaloneStreamSubscriber('sub_dens', subscriber3)
        self.addCleanup(sub_dens.stop)

        sub_dens_id = self.pubsub.create_subscription(
            'subscription_dens',
            stream_ids=[dens_stream_id],
            exchange_name='sub_dens')

        self.pubsub.activate_subscription(sub_dens_id)

        self.queue_cleanup.append(sub_dens.xn.queue)

        sub_dens.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_dens.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('density'))

        self.check_density_algorithm_execution(publish_granule, result)

    def test_ctd_L2_salinity(self):
        '''
        Test that packets are processed by the ctd_L1_salinity transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='SalinityTransform',
            description='For testing SalinityTransform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.ctd_L2_salinity'
        process_definition.executable['class'] = 'SalinityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        stream_def_id = self.pubsub.create_stream_definition(
            'sal_stream_def', parameter_dictionary_id=pdict_id)
        sal_stream_id, _ = self.pubsub.create_stream(
            'test_salinity',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.salinity = sal_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber that will receive the salinity granule from the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_sal = gevent.event.AsyncResult()

        def subscriber3(m, r, s):
            ar_sal.set(m)

        sub_sal = StandaloneStreamSubscriber('sub_sal', subscriber3)
        self.addCleanup(sub_sal.stop)

        sub_sal_id = self.pubsub.create_subscription(
            'subscription_sal',
            stream_ids=[sal_stream_id],
            exchange_name='sub_sal')

        self.pubsub.activate_subscription(sub_sal_id)

        self.queue_cleanup.append(sub_sal.xn.queue)

        sub_sal.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        publish_granule = self._get_new_ctd_packet(
            stream_definition_id=stream_def_id, length=5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_sal.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('salinity'))

        self.check_salinity_algorithm_execution(publish_granule, result)

    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)
        rdt['time'] = numpy.arange(self.i, self.i + length)

        for field in rdt:
            if isinstance(
                    rdt._pdict.get_context(field).param_type, QuantityType):
                rdt[field] = numpy.array(
                    [random.uniform(0.0, 75.0) for i in xrange(length)])

        g = rdt.to_granule()
        self.i += length

        return g

    def test_presf_L0_splitter(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='Presf L0 Splitter',
            description='For testing Presf L0 Splitter')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.presf_L0_splitter'
        process_definition.executable['class'] = 'PresfL0Splitter'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream(
            'test_pressure',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.absolute_pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)

#        #---------------------------------------------------------------------------------------------
#        # Create subscribers that will receive the pressure granules from
#        # the ctd transform
#        #---------------------------------------------------------------------------------------------
#
#        ar_pres = gevent.event.AsyncResult()
#        def subscriber3(m,r,s):
#            ar_pres.set(m)
#        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
#        self.addCleanup(sub_pres.stop)
#
#        sub_pres_id = self.pubsub.create_subscription('subscription_pres',
#            stream_ids=[pres_stream_id],
#            exchange_name='sub_pres')
#
#        self.pubsub.activate_subscription(sub_pres_id)
#
#        self.queue_cleanup.append(sub_pres.xn.queue)
#
#        sub_pres.start()
#
#        #------------------------------------------------------------------------------------------------------
#        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
#        #------------------------------------------------------------------------------------------------------
#
#        # Do all the routing stuff for the publishing
#        routing_key = 'stream_id.stream'
#        stream_route = StreamRoute(self.exchange_point, routing_key)
#
#        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
#        xp = self.container.ex_manager.create_xp(self.exchange_point)
#        xn.bind('stream_id.stream', xp)
#
#        pub = StandaloneStreamPublisher('stream_id', stream_route)
#
#        # Build a packet that can be published
#        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)
#
#        # Publish the packet
#        pub.publish(publish_granule)
#
#        #------------------------------------------------------------------------------------------------------
#        # Make assertions about whether the ctd transform executed its algorithm and published the correct
#        # granules
#        #------------------------------------------------------------------------------------------------------
#
#        # Get the granule that is published by the ctd transform post processing
#        result = ar_pres.get(timeout=10)
#        self.assertTrue(isinstance(result, Granule))
#
#        rdt = RecordDictionaryTool.load_from_granule(result)
#        self.assertTrue(rdt.__contains__('pressure'))
#
#        self.check_pres_algorithm_execution(publish_granule, result)
#

    def test_presf_L1(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='PresfL1Transform',
            description='For testing PresfL1Transform')
        process_definition.executable[
            'module'] = 'ion.processes.data.transforms.ctd.presf_L1'
        process_definition.executable['class'] = 'PresfL1Transform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(
            process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)

        stream_def_id = self.pubsub.create_stream_definition(
            'pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream(
            'test_pressure',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.seafloor_pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(
            process_definition_id=ctd_transform_proc_def_id,
            configuration=config)
class ProcessDispatcherServiceIntTest(IonIntegrationTestCase):

    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')

        self.rr_cli  = ResourceRegistryServiceClient()
        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = ProcessDefinition(name='test_process')
        self.process_definition.executable = {'module': 'ion.services.cei.test.test_process_dispatcher',
                                              'class': 'TestProcess'}
        self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition)

        self.waiter = ProcessStateWaiter()

    def tearDown(self):
        self.waiter.stop()

    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)

        # verifies L4-CI-CEI-RQ141 and L4-CI-CEI-RQ142
        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING)

        # make sure process is readable directly from RR (mirrored)
        # verifies L4-CI-CEI-RQ63
        # verifies L4-CI-CEI-RQ64
        proc = self.rr_cli.read(pid)
        self.assertEqual(proc.process_id, pid)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # verifies L4-CI-CEI-RQ147

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.waiter.stop()

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)

    def test_schedule_with_config(self):

        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        # verifies L4-CI-CEI-RQ66

        # feed in a string that the process will return -- verifies that
        # configuration actually makes it to the instantiated process
        test_response = uuid.uuid4().hex
        configuration = {"test_response" : test_response}

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration=configuration, process_id=pid)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        test_client = TestClient()

        # verifies L4-CI-CEI-RQ139
        # assure that configuration block (which can contain inputs, outputs,
        # and arbitrary config) 1) makes it to the process and 2) is returned
        # in process queries

        self.assertEqual(test_client.query(), test_response)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, configuration)

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)

    def test_schedule_bad_config(self):

        process_schedule = ProcessSchedule()

        # a non-JSON-serializable IonObject
        o = ProcessTarget()

        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.schedule_process(self.process_definition_id,
                process_schedule, configuration={"bad": o})
        self.assertTrue(ar.exception.message.startswith("bad configuration"))

    def test_create_invalid_definition(self):
        # create process definition missing module and class
        # verifies L4-CI-CEI-RQ137
        executable = dict(url="http://somewhere.com/something.py")
        definition = ProcessDefinition(name="test_process", executable=executable)
        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.create_process_definition(definition)
class TestDataProductManagementServiceIntegration(IonIntegrationTestCase):

    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient(node=self.container.node)
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli =  PubsubManagementServiceClient(node=self.container.node)
        self.ingestclient = IngestionManagementServiceClient(node=self.container.node)
        self.process_dispatcher   = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM')

        self.process_definitions  = {}
        ingestion_worker_definition = ProcessDefinition(name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class' :'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space       = 'science_granule_ingestion'
        self.exchange_point       = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(self.process_definitions['ingestion_worker'],configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

        self.addCleanup(self.cleaning_up)

    def cleaning_up(self):
        for pid in self.pids:
            log.debug("number of pids to be terminated: %s", len(self.pids))
            try:
                self.process_dispatcher.cancel_process(pid)
                log.debug("Terminated the process: %s", pid)
            except:
                log.debug("could not terminate the process id: %s" % pid)
        IngestionManagementIntTest.clean_subscriptions()

        for xn in self.exchange_names:
            xni = self.container.ex_manager.create_xn_queue(xn)
            xni.delete()
        for xp in self.exchange_points:
            xpi = self.container.ex_manager.create_xp(xp)
            xpi.delete()

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore


    def test_create_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        parameter_dictionary_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=parameter_dictionary_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------

        # Generic time-series data domain creation
        tdom, sdom = time_series_domain()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom.dump(), 
            spatial_domain = sdom.dump())

        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 200.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = 100.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 50.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = 100.0

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product( data_product= dp_obj,
                                            stream_definition_id=ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id)

        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)
        self.assertEquals(dp_obj.geospatial_point_center.lat, 150.0)
        log.debug('Created data product %s', dp_obj)
        #------------------------------------------------------------------------------------------------
        # test creating a new data product with  a stream definition
        #------------------------------------------------------------------------------------------------
        log.debug('Creating new data product with a stream definition')
        dp_obj = IonObject(RT.DataProduct,
            name='DP2',
            description='some new dp',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        dp_id2 = self.dpsc_cli.create_data_product(dp_obj, ctd_stream_def_id)
        self.dpsc_cli.activate_data_product_persistence(dp_id2)
        log.debug('new dp_id = %s' % dp_id2)

        #------------------------------------------------------------------------------------------------
        #make sure data product is associated with stream def
        #------------------------------------------------------------------------------------------------
        streamdefs = []
        streams, _ = self.rrclient.find_objects(dp_id2, PRED.hasStream, RT.Stream, True)
        for s in streams:
            log.debug("Checking stream %s" % s)
            sdefs, _ = self.rrclient.find_objects(s, PRED.hasStreamDefinition, RT.StreamDefinition, True)
            for sd in sdefs:
                log.debug("Checking streamdef %s" % sd)
                streamdefs.append(sd)
        self.assertIn(ctd_stream_def_id, streamdefs)


        # test reading a non-existent data product
        log.debug('reading non-existent data product')

        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product('some_fake_id')

        # update a data product (tests read also)
        log.debug('Updating data product')
        # first get the existing dp object
        dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # now tweak the object
        dp_obj.description = 'the very first dp'
        dp_obj.geospatial_bounds.geospatial_latitude_limit_north = 300.0
        dp_obj.geospatial_bounds.geospatial_latitude_limit_south = 200.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_east = 150.0
        dp_obj.geospatial_bounds.geospatial_longitude_limit_west = 200.0
        # now write the dp back to the registry
        update_result = self.dpsc_cli.update_data_product(dp_obj)


        # now get the dp back to see if it was updated
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertEquals(dp_obj.description,'the very first dp')
        self.assertEquals(dp_obj.geospatial_point_center.lat, 250.0)
        log.debug('Updated data product %s', dp_obj)

        #test extension
        extended_product = self.dpsc_cli.get_data_product_extension(dp_id)
        self.assertEqual(dp_id, extended_product._id)
        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.product_download_size_estimated.status)
        self.assertEqual(0, extended_product.computed.product_download_size_estimated.value)

        self.assertEqual(ComputedValueAvailability.PROVIDED,
                         extended_product.computed.parameters.status)
        #log.debug("test_create_data_product: parameters %s" % extended_product.computed.parameters.value)

        # now 'delete' the data product
        log.debug("deleting data product: %s" % dp_id)
        self.dpsc_cli.delete_data_product(dp_id)
        self.dpsc_cli.force_delete_data_product(dp_id)

        # now try to get the deleted dp object
        with self.assertRaises(NotFound):
            dp_obj = self.dpsc_cli.read_data_product(dp_id)

        # Get the events corresponding to the data product
        ret = self.unsc.get_recent_events(resource_id=dp_id)
        events = ret.value

        for event in events:
            log.debug("event time: %s" % event.ts_created)

        self.assertTrue(len(events) > 0)

    def test_data_product_stream_def(self):
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)

        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)
        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        stream_def_id = self.dpsc_cli.get_data_product_stream_definition(dp_id)
        self.assertEquals(ctd_stream_def_id, stream_def_id)



    def test_activate_suspend_data_product(self):

        #------------------------------------------------------------------------------------------------
        # create a stream definition for the data from the ctd simulator
        #------------------------------------------------------------------------------------------------
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='Simulated CTD data', parameter_dictionary_id=pdict_id)
        log.debug("Created stream def id %s" % ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test creating a new data product w/o a stream definition
        #------------------------------------------------------------------------------------------------
        # Construct temporal and spatial Coordinate Reference System objects
        tdom, sdom = time_series_domain()

        sdom = sdom.dump()
        tdom = tdom.dump()



        dp_obj = IonObject(RT.DataProduct,
            name='DP1',
            description='some new dp',
            temporal_domain = tdom,
            spatial_domain = sdom)

        log.debug("Created an IonObject for a data product: %s" % dp_obj)

        #------------------------------------------------------------------------------------------------
        # Create a set of ParameterContext objects to define the parameters in the coverage, add each to the ParameterDictionary
        #------------------------------------------------------------------------------------------------

        dp_id = self.dpsc_cli.create_data_product(data_product= dp_obj,
            stream_definition_id=ctd_stream_def_id)

        #------------------------------------------------------------------------------------------------
        # test activate and suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.activate_data_product_persistence(dp_id)
        
        dp_obj = self.dpsc_cli.read_data_product(dp_id)
        self.assertIsNotNone(dp_obj)

        dataset_ids, _ = self.rrclient.find_objects(subject=dp_id, predicate=PRED.hasDataset, id_only=True)
        if not dataset_ids:
            raise NotFound("Data Product %s dataset  does not exist" % str(dp_id))
        self.get_datastore(dataset_ids[0])


        # Check that the streams associated with the data product are persisted with
        stream_ids, _ =  self.rrclient.find_objects(dp_id,PRED.hasStream,RT.Stream,True)
        for stream_id in stream_ids:
            self.assertTrue(self.ingestclient.is_persisted(stream_id))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_ids[0])
        self.assertIsInstance(replay_data, Granule)

        log.debug("The data retriever was able to replay the dataset that was attached to the data product "
                  "we wanted to be persisted. Therefore the data product was indeed persisted with "
                  "otherwise we could not have retrieved its dataset using the data retriever. Therefore "
                  "this demonstration shows that L4-CI-SA-RQ-267 is satisfied: 'Data product management shall persist data products'")

        data_product_object = self.rrclient.read(dp_id)
        self.assertEquals(data_product_object.name,'DP1')
        self.assertEquals(data_product_object.description,'some new dp')

        log.debug("Towards L4-CI-SA-RQ-308: 'Data product management shall persist data product metadata'. "
                  " Attributes in create for the data product obj, name= '%s', description='%s', match those of object from the "
                  "resource registry, name='%s', desc='%s'" % (dp_obj.name, dp_obj.description,data_product_object.name,
                                                           data_product_object.description))

        #------------------------------------------------------------------------------------------------
        # test suspend data product persistence
        #------------------------------------------------------------------------------------------------
        self.dpsc_cli.suspend_data_product_persistence(dp_id)

        self.dpsc_cli.force_delete_data_product(dp_id)
        # now try to get the deleted dp object

        with self.assertRaises(NotFound):
            dp_obj = self.rrclient.read(dp_id)
class ProcessDispatcherEEAgentIntTest(ProcessDispatcherServiceIntTest):
    """Run the basic int tests again, with a different environment
    """

    def setUp(self):
        self.dashi = None
        self._start_container()
        self.container_client = ContainerAgentClient(node=self.container.node,
            name=self.container.name)
        self.container = self.container_client._get_container_instance()

        app = dict(name="process_dispatcher", processapp=("process_dispatcher",
                               "ion.services.cei.process_dispatcher_service",
                               "ProcessDispatcherService"))
        self.container.start_app(app, config=pd_config)

        self.rr_cli = self.container.resource_registry

        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = ProcessDefinition(name='test_process')
        self.process_definition.executable = {'module': 'ion.services.cei.test.test_process_dispatcher',
                                              'class': 'TestProcess'}
        self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition)

        self._eea_pids = []
        self._tmpdirs = []

        self.dashi = get_dashi(uuid.uuid4().hex,
            pd_config['processdispatcher']['dashi_uri'],
            pd_config['processdispatcher']['dashi_exchange'])

        #send a fake node_state message to PD's dashi binding.
        self.node1_id = uuid.uuid4().hex
        self._send_node_state("engine1", self.node1_id)
        self._start_eeagent(self.node1_id)

        self.waiter = ProcessStateWaiter()

    def _send_node_state(self, engine_id, node_id=None):
        node_id = node_id or uuid.uuid4().hex
        node_state = dict(node_id=node_id, state=InstanceState.RUNNING,
            domain_id=domain_id_from_engine(engine_id))
        self.dashi.fire(get_pd_dashi_name(), "node_state", args=node_state)

    def _start_eeagent(self, node_id):
        persistence_dir = tempfile.mkdtemp()
        self._tmpdirs.append(persistence_dir)
        agent_config = _get_eeagent_config(node_id, persistence_dir)
        pid = self.container_client.spawn_process(name="eeagent",
            module="ion.agents.cei.execution_engine_agent",
            cls="ExecutionEngineAgent", config=agent_config)
        log.info('Agent pid=%s.', str(pid))
        self._eea_pids.append(pid)

    def tearDown(self):
        for pid in self._eea_pids:
            self.container.terminate_process(pid)
        for d in self._tmpdirs:
            shutil.rmtree(d)

        self.waiter.stop()
        if self.dashi:
            self.dashi.cancel()


    def test_requested_ee(self):

        # request non-default engine

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.WAITING)


        # request unknown engine, with NEVER queuing mode. The request
        # should be rejected.
        # verifies L4-CI-CEI-RQ52

        process_target = ProcessTarget(execution_engine_id="not-a-real-ee")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        rejected_pid = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=rejected_pid)

        self.waiter.await_state_event(rejected_pid, ProcessStateEnum.REJECTED)

        # now add a node and eeagent for engine2. original process should leave
        # queue and start running
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine2", node2_id)
        self._start_eeagent(node2_id)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # spawn another process. it should start immediately.

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid2 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid2)

        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # one more with node exclusive

        process_target = ProcessTarget(execution_engine_id="engine2",
            node_exclusive="hats")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid3 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid3)

        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)

    def test_node_exclusive(self):

        # the node_exclusive constraint is used to ensure multiple processes
        # of the same "kind" each get a VM exclusive of each other. Other
        # processes may run on these VMs, just not processes with the same
        # node_exclusive tag. Since we cannot directly query the contents
        # of each node in this test, we prove the capability by scheduling
        # processes one by one and checking their state.

        # verifies L4-CI-CEI-RQ121
        # verifies L4-CI-CEI-RQ57

        # first off, setUp() created a single node and eeagent.
        # We schedule two processes with the same "abc" node_exclusive
        # tag. Since there is only one node, the first process should run
        # and the second should be queued.

        process_target = ProcessTarget(execution_engine_id="engine1")
        process_target.node_exclusive = "abc"
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid1 = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid1)

        self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING)

        pid2 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING)

        # now demonstrate that the node itself is not full by launching
        # a third process without a node_exclusive tag -- it should start
        # immediately

        process_target.node_exclusive = None
        pid3 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # finally, add a second node to the engine. pid2 should be started
        # since there is an exclusive "abc" node free.
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine1", node2_id)
        self._start_eeagent(node2_id)
        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid1)
        self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)

    def test_code_download(self):
        # create a process definition that has no URL; only module and class.
        process_definition_no_url = ProcessDefinition(name='test_process_nodownload')
        process_definition_no_url.executable = {'module': 'ion.my.test.process',
                'class': 'TestProcess'}
        process_definition_id_no_url = self.pd_cli.create_process_definition(process_definition_no_url)

        # create another that has a URL of the python file (this very file)
        # verifies L4-CI-CEI-RQ114
        url = "file://%s" % os.path.join(os.path.dirname(__file__), 'test_process_dispatcher.py')
        process_definition = ProcessDefinition(name='test_process_download')
        process_definition.executable = {'module': 'ion.my.test.process',
                'class': 'TestProcess', 'url': url}
        process_definition_id = self.pd_cli.create_process_definition(process_definition)

        process_target = ProcessTarget()
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        self.waiter.start()

        # Test a module with no download fails
        pid_no_url = self.pd_cli.create_process(process_definition_id_no_url)

        self.pd_cli.schedule_process(process_definition_id_no_url,
            process_schedule, process_id=pid_no_url)

        self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED)

        # Test a module with a URL runs
        pid = self.pd_cli.create_process(process_definition_id)

        self.pd_cli.schedule_process(process_definition_id,
            process_schedule, process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
示例#52
0
class TestActivateRSNVel3DInstrument(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        super(TestActivateRSNVel3DInstrument, self).setUp()
        config = DotDict()

        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml', config)

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(
            node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(
            node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(
            node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(
            node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(
            node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(
            node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(
            node=self.container.node)
        self.dataretrieverclient = DataRetrieverServiceClient(
            node=self.container.node)
        self.dataset_management = DatasetManagementServiceClient()

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name + '_logger')
        producer_definition.executable = {
            'module': 'ion.processes.data.stream_granule_logger',
            'class': 'StreamGranuleLogger'
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(
            process_definition=producer_definition)
        configuration = {
            'process': {
                'stream_id': stream_id,
            }
        }
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id,
            configuration=configuration)
        return pid

    @attr('LOCOINT')
    @unittest.skip('under construction')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    @patch.dict(CFG, {'endpoint': {'receive': {'timeout': 180}}})
    def test_activate_rsn_vel3d(self):

        log.info(
            "--------------------------------------------------------------------------------------------------------"
        )
        # load_parameter_scenarios
        self.container.spawn_process(
            "Loader",
            "ion.processes.bootstrap.ion_loader",
            "IONLoader",
            config=dict(
                op="load",
                scenario="BETA",
                path="master",
                categories=
                "ParameterFunctions,ParameterDefs,ParameterDictionary,StreamDefinition",
                clearcols="owner_id,org_ids",
                assets="res/preload/r2_ioc/ooi_assets",
                parseooi="True",
            ))

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(RT.InstrumentModel,
                                  name='Vel3DMModel',
                                  description="Vel3DMModel")
        instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        log.debug('test_activate_rsn_vel3d new InstrumentModel id = %s ',
                  instModel_id)

        raw_config = StreamConfiguration(stream_name='raw',
                                         parameter_dictionary_name='raw')
        vel3d_b_sample = StreamConfiguration(
            stream_name='vel3d_b_sample',
            parameter_dictionary_name='vel3d_b_sample')
        vel3d_b_engineering = StreamConfiguration(
            stream_name='vel3d_b_engineering',
            parameter_dictionary_name='vel3d_b_engineering')

        RSN_VEL3D_01 = {
            'DEV_ADDR': "10.180.80.6",
            'DEV_PORT': 2101,
            'DATA_PORT': 1026,
            'CMD_PORT': 1025,
            'PA_BINARY': "port_agent"
        }

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name='Vel3DAgent',
            description="Vel3DAgent",
            driver_uri=
            "http://sddevrepo.oceanobservatories.org/releases/nobska_mavs4_ooicore-0.0.7-py2.7.egg",
            stream_configurations=[
                raw_config, vel3d_b_sample, vel3d_b_engineering
            ])
        instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        log.debug('test_activate_rsn_vel3d new InstrumentAgent id = %s',
                  instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(
            instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            'test_activate_rsn_vel3d: Create instrument resource to represent the Vel3D '
        )
        instDevice_obj = IonObject(RT.InstrumentDevice,
                                   name='Vel3DDevice',
                                   description="Vel3DDevice",
                                   serial_number="12345")
        instDevice_id = self.imsclient.create_instrument_device(
            instrument_device=instDevice_obj)
        self.imsclient.assign_instrument_model_to_instrument_device(
            instModel_id, instDevice_id)
        log.debug("test_activate_rsn_vel3d: new InstrumentDevice id = %s  ",
                  instDevice_id)

        port_agent_config = {
            'device_addr': '10.180.80.6',
            'device_port': 2101,
            'process_type': PortAgentProcessType.UNIX,
            'binary_path': "port_agent",
            'port_agent_addr': 'localhost',
            'command_port': 1025,
            'data_port': 1026,
            'log_level': 5,
            'type': PortAgentType.ETHERNET
        }

        instAgentInstance_obj = IonObject(RT.InstrumentAgentInstance,
                                          name='Vel3DAgentInstance',
                                          description="Vel3DAgentInstance",
                                          port_agent_config=port_agent_config,
                                          alerts=[])

        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id)

        parsed_sample_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'vel3d_b_sample', id_only=True)
        parsed_sample_stream_def_id = self.pubsubcli.create_stream_definition(
            name='vel3d_b_sample',
            parameter_dictionary_id=parsed_sample_pdict_id)

        parsed_eng_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'vel3d_b_engineering', id_only=True)
        parsed_eng_stream_def_id = self.pubsubcli.create_stream_definition(
            name='vel3d_b_engineering',
            parameter_dictionary_id=parsed_eng_pdict_id)

        raw_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'raw', id_only=True)
        raw_stream_def_id = self.pubsubcli.create_stream_definition(
            name='raw', parameter_dictionary_id=raw_pdict_id)

        #-------------------------------
        # Create Raw and Parsed Data Products for the device
        #-------------------------------

        dp_obj = IonObject(RT.DataProduct,
                           name='vel3d_b_sample',
                           description='vel3d_b_sample')

        sample_data_product_id = self.dpclient.create_data_product(
            data_product=dp_obj,
            stream_definition_id=parsed_sample_stream_def_id)
        log.debug('new dp_id = %s', sample_data_product_id)
        self.dpclient.activate_data_product_persistence(
            data_product_id=sample_data_product_id)

        self.damsclient.assign_data_product(
            input_resource_id=instDevice_id,
            data_product_id=sample_data_product_id)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(sample_data_product_id,
                                                   PRED.hasStream, None, True)
        log.debug('sample_data_product streams1 = %s', stream_ids)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(sample_data_product_id,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('Data set for sample_data_product = %s', dataset_ids[0])
        self.parsed_dataset = dataset_ids[0]

        pid = self.create_logger('vel3d_b_sample', stream_ids[0])
        self.loggerpids.append(pid)

        dp_obj = IonObject(RT.DataProduct,
                           name='vel3d_b_engineering',
                           description='vel3d_b_engineering')

        eng_data_product_id = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=parsed_eng_stream_def_id)
        log.debug('new dp_id = %s', eng_data_product_id)
        self.dpclient.activate_data_product_persistence(
            data_product_id=eng_data_product_id)

        self.damsclient.assign_data_product(
            input_resource_id=instDevice_id,
            data_product_id=eng_data_product_id)

        dp_obj = IonObject(RT.DataProduct,
                           name='the raw data',
                           description='raw stream test')

        data_product_id2 = self.dpclient.create_data_product(
            data_product=dp_obj, stream_definition_id=raw_stream_def_id)
        log.debug('new dp_id = %s', data_product_id2)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id,
                                            data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(
            data_product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                   PRED.hasStream, None, True)
        log.debug('test_activate_rsn_vel3d Data product streams2 = %s',
                  str(stream_ids))

        # Retrieve the id of the OUTPUT stream from the out Data Product
        dataset_ids, _ = self.rrclient.find_objects(data_product_id2,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        log.debug('test_activate_rsn_vel3d Data set for data_product_id2 = %s',
                  dataset_ids[0])
        self.raw_dataset = dataset_ids[0]

        def start_instrument_agent():
            self.imsclient.start_instrument_agent_instance(
                instrument_agent_instance_id=instAgentInstance_id)

        gevent.joinall([gevent.spawn(start_instrument_agent)])

        #cleanup
        self.addCleanup(self.imsclient.stop_instrument_agent_instance,
                        instrument_agent_instance_id=instAgentInstance_id)

        #wait for start
        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(
            instAgentInstance_id)
        gate = AgentProcessStateGate(self.processdispatchclient.read_process,
                                     instDevice_id, ProcessStateEnum.RUNNING)
        self.assertTrue(
            gate. await (30),
            "The instrument agent instance (%s) did not spawn in 30 seconds" %
            gate.process_id)

        #log.trace('Instrument agent instance obj: = %s' , str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = ResourceAgentClient(instDevice_id,
                                              to_name=gate.process_id,
                                              process=FakeProcess())

        def check_state(label, desired_state):
            actual_state = self._ia_client.get_agent_state()
            log.debug("%s instrument agent is in state '%s'", label,
                      actual_state)
            self.assertEqual(desired_state, actual_state)

        log.debug("test_activate_rsn_vel3d: got ia client %s",
                  str(self._ia_client))

        check_state("just-spawned", ResourceAgentState.UNINITIALIZED)

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activate_rsn_vel3d: initialize %s", str(retval))
        check_state("initialized", ResourceAgentState.INACTIVE)

        log.debug("test_activate_rsn_vel3d Sending go_active command ")
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activate_rsn_vel3d: return value from go_active %s",
                  str(reply))
        check_state("activated", ResourceAgentState.IDLE)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("current state after sending go_active command %s",
                  str(state))
        #
        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activate_rsn_vel3d: run %s", str(reply))
        check_state("commanded", ResourceAgentState.COMMAND)

        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("current state after sending run command %s", str(state))

        #        cmd = AgentCommand(command=ProtocolEvent.START_AUTOSAMPLE)
        #        reply = self._ia_client.execute_agent(cmd)
        #        log.debug("test_activate_rsn_vel3d: run %s" , str(reply))
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.COMMAND, state)
        #
        #        gevent.sleep(5)
        #
        #        cmd = AgentCommand(command=ProtocolEvent.STOP_AUTOSAMPLE)
        #        reply = self._ia_client.execute_agent(cmd)
        #        log.debug("test_activate_rsn_vel3d: run %s" , str(reply))
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.COMMAND, state)
        #
        #        cmd = AgentCommand(command=ResourceAgentEvent.GET_RESOURCE_STATE)
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = retval.result
        #        log.debug("current state after sending STOP_AUTOSAMPLE command %s" , str(state))

        #
        #        cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.STOPPED, state)
        #
        #        cmd = AgentCommand(command=ResourceAgentEvent.RESUME)
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.COMMAND, state)
        #
        #        cmd = AgentCommand(command=ResourceAgentEvent.CLEAR)
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.IDLE, state)
        #
        #        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        #        retval = self._ia_client.execute_agent(cmd)
        #        state = self._ia_client.get_agent_state()
        #        self.assertEqual(ResourceAgentState.COMMAND, state)

        log.debug("test_activate_rsn_vel3d: calling reset ")
        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activate_rsn_vel3d: return from reset %s", str(reply))

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data_raw = self.dataretrieverclient.retrieve(self.raw_dataset)
        self.assertIsInstance(replay_data_raw, Granule)
        rdt_raw = RecordDictionaryTool.load_from_granule(replay_data_raw)
        log.debug("RDT raw: %s", str(rdt_raw.pretty_print()))

        self.assertIn('raw', rdt_raw)
        raw_vals = rdt_raw['raw']

        #--------------------------------------------------------------------------------
        # Deactivate loggers
        #--------------------------------------------------------------------------------

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

        self.dpclient.delete_data_product(sample_data_product_id)
        self.dpclient.delete_data_product(eng_data_product_id)
        self.dpclient.delete_data_product(data_product_id2)
class UserNotificationService(BaseUserNotificationService):
    """
    A service that provides users with an API for CRUD methods for notifications.
    """
    def __init__(self, *args, **kwargs):
        self._schedule_ids = []
        BaseUserNotificationService.__init__(self, *args, **kwargs)

    def on_start(self):
        self.ION_NOTIFICATION_EMAIL_ADDRESS = CFG.get_safe('server.smtp.sender')

        # Create an event processor
        self.event_processor = EmailEventProcessor()

        # Dictionaries that maintain information asetting_up_smtp_clientbout users and their subscribed notifications
        self.user_info = {}

        # The reverse_user_info is calculated from the user_info dictionary
        self.reverse_user_info = {}

        # Get the clients
        # @TODO: Why are these not dependencies in the service YML???
        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()

        self.event_publisher = EventPublisher()
        self.datastore = self.container.datastore_manager.get_datastore('events')

        self.start_time = get_ion_ts()

        #------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        #------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            """
            Callback method for the subscriber to ReloadUserInfoEvent
            """

            notification_id =  event_msg.notification_id
            log.debug("(UNS instance received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(
            event_type=OT.ReloadUserInfoEvent,
            origin='UserNotificationService',
            callback=reload_user_info
        )
        self.add_endpoint(self.reload_user_info_subscriber)

    def on_quit(self):
        """
        Handles stop/terminate.

        Cleans up subscribers spawned here, terminates any scheduled tasks to the scheduler.
        """
        for sid in self._schedule_ids:
            try:
                self.clients.scheduler.cancel_timer(sid)
            except IonException as ex:
                log.info("Ignoring exception while cancelling schedule id (%s): %s: %s", sid, ex.__class__.__name__, ex)

        super(UserNotificationService, self).on_quit()

    def set_process_batch_key(self, process_batch_key = ''):
        """
        This method allows an operator to set the process_batch_key, a string.
        Once this method is used by the operator, the UNS will start listening for timer events
        published by the scheduler with origin = process_batch_key.

        @param process_batch_key str
        """
        def process(event_msg, headers):
            self.end_time = get_ion_ts()

            # run the process_batch() method
            self.process_batch(start_time=self.start_time, end_time=self.end_time)
            self.start_time = self.end_time

        # the subscriber for the batch processing
        # To trigger the batch notification, have the scheduler create a timer with event_origin = process_batch_key
        self.batch_processing_subscriber = EventSubscriber(
            event_type=OT.TimerEvent,
            origin=process_batch_key,
            callback=process
        )
        self.add_endpoint(self.batch_processing_subscriber)

    def create_notification(self, notification=None, user_id=''):
        """
        Persists the provided NotificationRequest object for the specified Origin id.
        Associate the Notification resource with the user_id string.
        returned id is the internal id by which NotificationRequest will be identified
        in the data store.

        @param notification        NotificationRequest
        @param user_id             str
        @retval notification_id    str
        @throws BadRequest    if object passed has _id or _rev attribute
        """
        if not user_id:
            raise BadRequest("User id not provided.")

        log.debug("Create notification called for user_id: %s, and notification: %s", user_id, notification)

        #---------------------------------------------------------------------------------------------------
        # Persist Notification object as a resource if it has already not been persisted
        #---------------------------------------------------------------------------------------------------

        notification_id = None
        # if the notification has already been registered, simply use the old id
        existing_user_notifications = self.get_user_notifications(user_info_id=user_id)
        if existing_user_notifications:
            notification_id = self._notification_in_notifications(notification, existing_user_notifications)

        # since the notification has not been registered yet, register it and get the id

        temporal_bounds = TemporalBounds()
        temporal_bounds.start_datetime = get_ion_ts()
        temporal_bounds.end_datetime = ''

        if not notification_id:
            notification.temporal_bounds = temporal_bounds
            notification_id, rev = self.clients.resource_registry.create(notification)
        else:
            log.debug("Notification object has already been created in resource registry before. No new id to be generated. notification_id: %s", notification_id)
            # Read the old notification already in the resource registry
            notification = self.clients.resource_registry.read(notification_id)

            # Update the temporal bounds of the old notification resource
            notification.temporal_bounds = temporal_bounds

            # Update the notification in the resource registry
            self.clients.resource_registry.update(notification)

            log.debug("The temporal bounds for this resubscribed notification object with id: %s, is: %s", notification._id,notification.temporal_bounds)


        # Link the user and the notification with a hasNotification association
        assocs= self.clients.resource_registry.find_associations(subject=user_id,
                                                                    predicate=PRED.hasNotification,
                                                                    object=notification_id,
                                                                    id_only=True)


        if assocs:
            log.debug("Got an already existing association: %s, between user_id: %s, and notification_id: %s", assocs,user_id,notification_id)
            return notification_id
        else:
            log.debug("Creating association between user_id: %s, and notification_id: %s", user_id, notification_id )
            self.clients.resource_registry.create_association(user_id, PRED.hasNotification, notification_id)

        # read the registered notification request object because this has an _id and is more useful
        notification = self.clients.resource_registry.read(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id)

        self.event_publisher.publish_event( event_type= OT.ReloadUserInfoEvent,
            origin="UserNotificationService",
            description= "A notification has been created.",
            notification_id = notification_id)

        return notification_id

    def update_notification(self, notification=None, user_id = ''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        raise NotImplementedError("This method needs to be worked out in terms of implementation")

#        #-------------------------------------------------------------------------------------------------------------------
#        # Get the old notification
#        #-------------------------------------------------------------------------------------------------------------------
#
#        old_notification = self.clients.resource_registry.read(notification._id)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the notifications dict
#        #-------------------------------------------------------------------------------------------------------------------
#
#
#        self._update_notification_in_notifications_dict(new_notification=notification,
#                                                        notifications=self.notifications)
#        #-------------------------------------------------------------------------------------------------------------------
#        # Update the notification in the registry
#        #-------------------------------------------------------------------------------------------------------------------
#        '''
#        Since one user should not be able to update the notification request resource without the knowledge of other users
#        who have subscribed to the same notification request, we do not update the resource in the resource registry
#        '''
#
##        self.clients.resource_registry.update(notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # reading up the notification object to make sure we have the newly registered notification request object
#        #-------------------------------------------------------------------------------------------------------------------
#
#        notification_id = notification._id
#        notification = self.clients.resource_registry.read(notification_id)
#
#        #------------------------------------------------------------------------------------
#        # Update the UserInfo object
#        #------------------------------------------------------------------------------------
#
#        user = self.update_user_info_object(user_id, notification)
#
#        #-------------------------------------------------------------------------------------------------------------------
#        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
#        #-------------------------------------------------------------------------------------------------------------------
#        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")
#
#        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
#            origin="UserNotificationService",
#            description= "A notification has been updated.",
#            notification_id = notification_id
#        )

    def read_notification(self, notification_id=''):
        """Returns the NotificationRequest object for the specified notification id.
        Throws exception if id does not match any persisted NotificationRequest
        objects.

        @param notification_id    str
        @retval notification    NotificationRequest
        @throws NotFound    object with specified id does not exist
        """
        notification = self.clients.resource_registry.read(notification_id)

        return notification

    def delete_notification(self, notification_id=''):
        """For now, permanently deletes NotificationRequest object with the specified
        id. Throws exception if id does not match any persisted NotificationRequest.

        @param notification_id    str
        @throws NotFound    object with specified id does not exist
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Stop the event subscriber for the notification
        #-------------------------------------------------------------------------------------------------------------------
        notification_request = self.clients.resource_registry.read(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the resource registry
        #-------------------------------------------------------------------------------------------------------------------

        notification_request.temporal_bounds.end_datetime = get_ion_ts()

        self.clients.resource_registry.update(notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Find users who are interested in the notification and update the notification in the list maintained by the UserInfo object
        #-------------------------------------------------------------------------------------------------------------------
#        user_ids, _ = self.clients.resource_registry.find_subjects(RT.UserInfo, PRED.hasNotification, notification_id, True)
#
#        for user_id in user_ids:
#            self.update_user_info_object(user_id, notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id)

        self.event_publisher.publish_event( event_type= OT.ReloadUserInfoEvent,
            origin="UserNotificationService",
            description= "A notification has been deleted.",
            notification_id = notification_id)

#    def delete_notification_from_user_info(self, notification_id):
#        """
#        Helper method to delete the notification from the user_info dictionary
#
#        @param notification_id str
#        """
#
#        user_ids, assocs = self.clients.resource_registry.find_subjects(object=notification_id, predicate=PRED.hasNotification, id_only=True)
#
#        for assoc in assocs:
#            self.clients.resource_registry.delete_association(assoc)
#
#        for user_id in user_ids:
#
#            value = self.user_info[user_id]
#
#            for notif in value['notifications']:
#                if notification_id == notif._id:
#                    # remove the notification
#                    value['notifications'].remove(notif)
#
#        self.reverse_user_info = calculate_reverse_user_info(self.user_info)

    def find_events(self, origin='', type='', min_datetime=0, max_datetime=0, limit=-1, descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        event_tuples = []

        try:
            event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin, start_ts=min_datetime, end_ts=max_datetime, limit=limit, descending=descending)
        except Exception as exc:
            log.warning("The UNS find_events operation for event origin = %s and type = %s failed. Error message = %s", origin, type, exc.message)

        events = [item[2] for item in event_tuples]
        log.debug("(find_events) UNS found the following relevant events: %s", events)

        return events


    #todo Uses Elastic Search. Later extend this to a larger search criteria
    def find_events_extended(self, origin='', type='', min_time= 0, max_time=0, limit=-1, descending=False):
        """Uses Elastic Search. Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param type           str
        @param min_time   int seconds
        @param max_time   int seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """

        query = []

        if min_time and max_time:
            query.append( "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (min_time, max_time))

        if origin:
            query.append( 'search "origin" is "%s" from "events_index"' % origin)

        if type:
            query.append( 'search "type_" is "%s" from "events_index"' % type)

        search_string = ' and '.join(query)


        # get the list of ids corresponding to the events
        ret_vals = self.discovery.parse(search_string)
        if len(query) > 1:
            events = self.datastore.read_mult(ret_vals)
        else:
            events = [i['_source'] for i in ret_vals]

        log.debug("(find_events_extended) Discovery search returned the following event ids: %s", ret_vals)


        log.debug("(find_events_extended) UNS found the following relevant events: %s", events)

        if limit > 0:
            return events[:limit]

        #todo implement time ordering: ascending or descending

        return events

    def publish_event_object(self, event=None):
        """
        This service operation would publish the given event from an event object.

        @param event    !Event
        @retval event   !Event
        """
        event = self.event_publisher.publish_event_object(event_object=event)
        log.info("The publish_event_object(event) method of UNS was used to publish the event: %s", event )

        return event

    def publish_event(self, event_type='', origin='', origin_type='', sub_type='', description='', event_attrs=None):
        """
        This service operation assembles a new Event object based on event_type 
        (e.g. via the pyon Event publisher) with optional additional attributes from a event_attrs
        dict of arbitrary attributes.
        
        
        @param event_type   str
        @param origin       str
        @param origin_type  str
        @param sub_type     str
        @param description  str
        @param event_attrs  dict
        @retval event       !Event
        """
        event_attrs = event_attrs or {}

        event = self.event_publisher.publish_event(
            event_type = event_type,
            origin = origin,
            origin_type = origin_type,
            sub_type = sub_type,
            description = description,
            **event_attrs
            )
        log.info("The publish_event() method of UNS was used to publish an event: %s", event)

        return event

    def get_recent_events(self, resource_id='', limit=100):
        """
        Get recent events for use in extended resource computed attribute
        @param resource_id str
        @param limit int
        @retval ComputedListValue with value list of 4-tuple with Event objects
        """

        now = get_ion_ts()
        events = self.find_events(origin=resource_id, limit=limit, max_datetime=now, descending=True)

        ret = IonObject(OT.ComputedEventListValue)
        if events:
            ret.value = events
            ret.computed_list = [get_event_computed_attributes(event) for event in events]
            ret.status = ComputedValueAvailability.PROVIDED
        else:
            ret.status = ComputedValueAvailability.NOTAVAILABLE

        return ret


    def get_user_notifications(self, user_info_id=''):
        """
        Get the notification request objects that are subscribed to by the user

        @param user_info_id str

        @retval notifications list of NotificationRequest objects
        """
        notifications = []
        user_notif_req_objs, _ = self.clients.resource_registry.find_objects(
            subject=user_info_id, predicate=PRED.hasNotification, object_type=RT.NotificationRequest, id_only=False)

        log.debug("Got %s notifications, for the user: %s", len(user_notif_req_objs), user_info_id)

        for notif in user_notif_req_objs:
            # do not include notifications that have expired
            if notif.temporal_bounds.end_datetime == '':
                    notifications.append(notif)

        return notifications


    def create_worker(self, number_of_workers=1):
        """
        Creates notification workers

        @param number_of_workers int
        @retval pids list

        """

        pids = []

        for n in xrange(number_of_workers):

            process_definition = ProcessDefinition( name='notification_worker_%s' % n)

            process_definition.executable = {
                'module': 'ion.processes.data.transforms.notification_worker',
                'class':'NotificationWorker'
            }
            process_definition_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

            # ------------------------------------------------------------------------------------
            # Process Spawning
            # ------------------------------------------------------------------------------------

            pid2 = self.process_dispatcher.create_process(process_definition_id)

            #@todo put in a configuration
            configuration = {}
            configuration['process'] = dict({
                'name': 'notification_worker_%s' % n,
                'type':'simple',
                'queue_name': 'notification_worker_queue'
            })

            pid  = self.process_dispatcher.schedule_process(
                process_definition_id,
                configuration = configuration,
                process_id=pid2
            )

            pids.append(pid)

        return pids

    def process_batch(self, start_time = '', end_time = ''):
        """
        This method is launched when an process_batch event is received. The user info dictionary maintained
        by the User Notification Service is used to query the event repository for all events for a particular
        user that have occurred in a provided time interval, and then an email is sent to the user containing
        the digest of all the events.

        @param start_time int milliseconds
        @param end_time int milliseconds
        """
        self.smtp_client = setting_up_smtp_client()

        if end_time <= start_time:
            return

        for user_id, value in self.user_info.iteritems():

            notifications = self.get_user_notifications(user_info_id=user_id)
            notifications_disabled = value['notifications_disabled']
            notifications_daily_digest = value['notifications_daily_digest']


            # Ignore users who do NOT want batch notifications or who have disabled the delivery switch
            # However, if notification preferences have not been set for the user, use the default mechanism and do not bother
            if notifications_disabled or not notifications_daily_digest:
                    continue

            events_for_message = []

            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (start_time, end_time)

            for notification in notifications:
                # If the notification request has expired, then do not use it in the search
                if notification.temporal_bounds.end_datetime:
                    continue

                if CFG_ELASTIC_SEARCH:
                    if notification.origin:
                        search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin
                    else:
                        search_origin = 'search "origin" is "*" from "events_index"'

                    if notification.origin_type:
                        search_origin_type= 'search "origin_type" is "%s" from "events_index"' % notification.origin_type
                    else:
                        search_origin_type= 'search "origin_type" is "*" from "events_index"'

                    if notification.event_type:
                        search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type
                    else:
                        search_event_type = 'search "type_" is "*" from "events_index"'

                    search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type

                    # get the list of ids corresponding to the events
                    log.debug('process_batch  search_string: %s', search_string)
                    ret_vals = self.discovery.parse(search_string)

                    events_for_message.extend(self.datastore.read_mult(ret_vals))

                else:
                    # Adding a branch
                    event_tuples = self.container.event_repository.find_events(
                        origin=notification.origin,
                        event_type=notification.event_type,
                        start_ts=start_time,
                        end_ts=end_time)
                    events = [item[2] for item in event_tuples]

                    events_for_message.extend(events)

            log.debug("Found following events of interest to user, %s: %s", user_id, events_for_message)

            # send a notification email to each user using a _send_email() method
            if events_for_message:
                self.format_and_send_email(events_for_message = events_for_message,
                                            user_id = user_id,
                                            smtp_client=self.smtp_client)

        self.smtp_client.quit()


    def format_and_send_email(self, events_for_message=None, user_id=None, smtp_client=None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """
        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s", user_id, message)

        msg = convert_events_to_email_message(events_for_message, self.clients.resource_registry)
        msg["Subject"] = "(SysName: " + get_sys_name() + ") ION event "
        msg["To"] = self.user_info[user_id]['user_contact'].email
        self.send_batch_email(msg, smtp_client)


    def send_batch_email(self, msg=None, smtp_client=None):
        """
        Send the email

        @param msg MIMEText object of email message
        @param smtp_client object
        """

        if msg is None: msg = {}
        for f in ["Subject", "To"]:
            if not f in msg: raise BadRequest("'%s' not in msg %s" % (f, msg))

        msg_subject = msg["Subject"]
        msg_recipient = msg["To"]

        msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS
        log.debug("UNS sending batch (digest) email from %s to %s",
                  self.ION_NOTIFICATION_EMAIL_ADDRESS,
                  msg_recipient)

        smtp_sender = CFG.get_safe('server.smtp.sender')

        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())

    def update_user_info_object(self, user_id, new_notification):
        """
        Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification

        @param user_id str
        @param new_notification NotificationRequest
        """

        #this is not necessary if notifiactions are not stored in the userinfo object
        raise NotImplementedError("This method is not necessary because Notifications are not stored in the userinfo object")

        #------------------------------------------------------------------------------------
        # read the user
        #------------------------------------------------------------------------------------

#        user = self.clients.resource_registry.read(user_id)
#
#        if not user:
#            raise BadRequest("No user with the provided user_id: %s" % user_id)
#
#        for item in user.variables:
#            if type(item) is dict and item.has_key('name') and item['name'] == 'notifications':
#                for notif in item['value']:
#                    if notif._id == new_notification._id:
#                        log.debug("came here for updating notification")
#                        notifications = item['value']
#                        notifications.remove(notif)
#                        notifications.append(new_notification)
#                break
#            else:
#                log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)
#
#
#        #------------------------------------------------------------------------------------
#        # update the resource registry
#        #------------------------------------------------------------------------------------
#
#        log.debug("user.variables::: %s", user.variables)
#
#        self.clients.resource_registry.update(user)
#
#        return user


    def get_subscriptions(self, resource_id='', user_id = '', include_nonactive=False):
        """
        @param resource_id  a resource id (or other origin) that is the origin of events for notifications
        @param user_id  a UserInfo ID that owns the NotificationRequest
        @param include_nonactive  if False, filter to active NotificationRequest only
        Return all NotificationRequest resources where origin is given resource_id.
        """
        notif_reqs, _ = self.clients.resource_registry.find_resources_ext(
            restype=RT.NotificationRequest, attr_name="origin", attr_value=resource_id, id_only=False)

        log.debug("Got %s active and past NotificationRequests for resource/origin %s", len(notif_reqs), resource_id)

        if not include_nonactive:
            notif_reqs = [nr for nr in notif_reqs if nr.temporal_bounds.end_datetime == '']
            log.debug("Filtered to %s active NotificationRequests", len(notif_reqs))

        if user_id:
            # Get all NotificationRequests (ID) that are associated to given UserInfo_id
            user_notif_req_ids, _ = self.clients.resource_registry.find_objects(
                subject=user_id, predicate=PRED.hasNotification, object_type=RT.NotificationRequest, id_only=True)

            notif_reqs = [nr for nr in notif_reqs if nr._id in user_notif_req_ids]
            log.debug("Filtered to %s NotificationRequests associated to user %s", len(notif_reqs), user_id)

        return notif_reqs

    def get_subscriptions_attribute(self, resource_id='', user_id = '', include_nonactive=False):
        retval = self.get_subscriptions(resource_id=resource_id, user_id=user_id, include_nonactive=include_nonactive)
        container = ComputedListValue(value=retval)
        return container


    def _notification_in_notifications(self, notification = None, notifications = None):

        for notif in notifications:
            if notif.name == notification.name and \
            notif.origin == notification.origin and \
            notif.origin_type == notification.origin_type and \
            notif.event_type == notification.event_type:
                return notif._id
        return None



    def load_user_info(self):
        """
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        """
        users, _ = self.clients.resource_registry.find_resources(restype=RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications = []
            notifications_disabled = False
            notifications_daily_digest = False

            #retrieve all the active notifications assoc to this user
            notifications = self.get_user_notifications(user_info_id=user)
            log.debug('load_user_info notifications:   %s', notifications)

            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']

                else:
                    log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)

            user_info[user._id] = { 'user_contact' : user.contact, 'notifications' : notifications,
                                    'notifications_daily_digest' : notifications_daily_digest, 'notifications_disabled' : notifications_disabled}

        return user_info


    ##
    ##
    ##  GOVERNANCE FUNCTIONS
    ##
    ##


    def check_subscription_policy(self, process, message, headers):

        try:
            gov_values = GovernanceHeaderValues(headers=headers, process=process, resource_id_required=False)

        except Inconsistent, ex:
            return False, ex.message

        if gov_values.op == 'delete_notification':
            return True, ''

        notification = message['notification']
        resource_id = notification.origin

        if notification.origin_type == RT.Org:
            org = self.clients.resource_registry.read(resource_id)
            if (has_org_role(gov_values.actor_roles, org.org_governance_name, [ORG_MEMBER_ROLE])):
                    return True, ''
        else:
            orgs,_ = self.clients.resource_registry.find_subjects(subject_type=RT.Org, predicate=PRED.hasResource, object=resource_id, id_only=False)
            for org in orgs:
                if (has_org_role(gov_values.actor_roles, org.org_governance_name, [ORG_MEMBER_ROLE])):
                    return True, ''

        return False, '%s(%s) has been denied since the user is not a member in any org to which the resource id %s belongs ' % (process.name, gov_values.op, resource_id)
class BulkIngestBase(object):

    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.pubsub_management    = PubsubManagementServiceClient()
        self.dataset_management   = DatasetManagementServiceClient()
        self.data_product_management = DataProductManagementServiceClient()
        self.data_acquisition_management = DataAcquisitionManagementServiceClient()
        self.data_retriever = DataRetrieverServiceClient()
        self.process_dispatch_client = ProcessDispatcherServiceClient(node=self.container.node)
        self.resource_registry       = self.container.resource_registry
        self.context_ids = self.build_param_contexts()
        self.setup_resources()

    def build_param_contexts(self):
        raise NotImplementedError('build_param_contexts must be implemented in child classes')

    def create_external_dataset(self):
        raise NotImplementedError('create_external_dataset must be implemented in child classes')

    def get_dvr_config(self):
        raise NotImplementedError('get_dvr_config must be implemented in child classes')

    def get_retrieve_client(self, dataset_id=''):
        raise NotImplementedError('get_retrieve_client must be implemented in child classes')

    def test_data_ingest(self):
        self.pdict_id = self.create_parameter_dict(self.name)
        self.stream_def_id = self.create_stream_def(self.name, self.pdict_id)
        self.data_product_id = self.create_data_product(self.name, self.description, self.stream_def_id)
        self.dataset_id = self.get_dataset_id(self.data_product_id)
        self.stream_id, self.route = self.get_stream_id_and_route(self.data_product_id)
        self.external_dataset_id = self.create_external_dataset()
        self.data_producer_id = self.register_external_dataset(self.external_dataset_id)
        self.start_agent()

    def create_parameter_dict(self, name=''):
        return self.dataset_management.create_parameter_dictionary(name=name, parameter_context_ids=self.context_ids, temporal_context='time')

    def create_stream_def(self, name='', pdict_id=''):
        return self.pubsub_management.create_stream_definition(name=name, parameter_dictionary_id=pdict_id)

    def create_data_product(self, name='', description='', stream_def_id=''):
        tdom, sdom = time_series_domain()
        tdom = tdom.dump()
        sdom = sdom.dump()
        dp_obj = DataProduct(
            name=name,
            description=description,
            processing_level_code='Parsed_Canonical',
            temporal_domain=tdom,
            spatial_domain=sdom)

        data_product_id = self.data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=stream_def_id)
        self.data_product_management.activate_data_product_persistence(data_product_id)
        return data_product_id

    def register_external_dataset(self, external_dataset_id=''):
        return self.data_acquisition_management.register_external_data_set(external_dataset_id=external_dataset_id)

    def get_dataset_id(self, data_product_id=''):
        dataset_ids, assocs = self.resource_registry.find_objects(subject=data_product_id, predicate='hasDataset', id_only=True)
        return dataset_ids[0]

    def get_stream_id_and_route(self, data_product_id):
        stream_ids, _ = self.resource_registry.find_objects(data_product_id, PRED.hasStream, RT.Stream, id_only=True)
        stream_id = stream_ids[0]
        route = self.pubsub_management.read_stream_route(stream_id)
        #self.create_logger(self.name, stream_id)
        return stream_id, route

    def start_agent(self):
        agent_config = {
            'driver_config': self.get_dvr_config(),
            'stream_config': {},
            'agent': {'resource_id': self.external_dataset_id},
            'test_mode': True
        }

        _ia_pid = self.container.spawn_process(
            name=self.EDA_NAME,
            module=self.EDA_MOD,
            cls=self.EDA_CLS,
            config=agent_config)

        self._ia_client = ResourceAgentClient(self.external_dataset_id, process=FakeProcess())

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
        self._ia_client.execute_resource(command=cmd)

        self.start_listener(self.dataset_id)

    def stop_agent(self):
        cmd = AgentCommand(command=DriverEvent.STOP_AUTOSAMPLE)
        self._ia_client.execute_resource(cmd)

        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        self._ia_client.execute_agent(cmd)

    def start_listener(self, dataset_id=''):
        dataset_modified = Event()
        #callback to use retrieve to get data from the coverage
        def cb(*args, **kwargs):
            self.get_retrieve_client(dataset_id=dataset_id)

        #callback to keep execution going once dataset has been fully ingested
        def cb2(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id)
        es.start()

        es2 = EventSubscriber(event_type=OT.DeviceCommonLifecycleEvent, callback=cb2, origin='BaseDataHandler._acquire_sample')
        es2.start()

        self.addCleanup(es.stop)
        self.addCleanup(es2.stop)

        #let it go for up to 120 seconds, then stop the agent and reset it
        dataset_modified.wait(120)
        self.stop_agent()

    def create_logger(self, name, stream_id=''):

        # logger process
        producer_definition = ProcessDefinition(name=name+'_logger')
        producer_definition.executable = {
            'module':'ion.processes.data.stream_granule_logger',
            'class':'StreamGranuleLogger'
        }

        logger_procdef_id = self.process_dispatch_client.create_process_definition(process_definition=producer_definition)
        configuration = {
            'process':{
                'stream_id':stream_id,
                }
        }
        pid = self.process_dispatch_client.schedule_process(process_definition_id=logger_procdef_id, configuration=configuration)

        return pid
class ProcessDispatcherServiceIntTest(IonIntegrationTestCase):

    def setUp(self):
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2cei.yml')

        self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node)

        self.process_definition = ProcessDefinition(name='test_process')
        self.process_definition.executable = {'module': 'ion.services.cei.test.test_process_dispatcher',
                                              'class':'TestProcess'}
        self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition)
        self.event_queue = queue.Queue()

        self.event_sub = None

    def tearDown(self):
        if self.event_sub:
            self.event_sub.deactivate()

    def _event_callback(self, event, *args, **kwargs):
        self.event_queue.put(event)

    def subscribe_events(self, origin):
        self.event_sub =  EventSubscriber(event_type="ProcessLifecycleEvent",
            callback=self._event_callback, origin=origin, origin_type="DispatchedProcess")
        self.event_sub.activate()

    def await_state_event(self, pid, state):
        event = self.event_queue.get(timeout=5)
        log.debug("Got event: %s", event)
        self.assertEqual(event.origin, pid)
        self.assertEqual(event.state, state)
        return event

    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        # now try communicating with the process to make sure it is really running
        test_client =  TestClient()
        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i+1, test_client.count(timeout=1))

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i+1, test_client.count(timeout=1))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

    def test_schedule_bad_config(self):

        process_schedule = ProcessSchedule()

        # a non-JSON-serializable IonObject
        o = ProcessTarget()

        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.schedule_process(self.process_definition_id,
                process_schedule, configuration={"bad" : o})
        self.assertTrue(ar.exception.message.startswith("bad configuration"))
class CtdTransformsIntTest(IonIntegrationTestCase):
    def setUp(self):
        super(CtdTransformsIntTest, self).setUp()

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.queue_cleanup = []
        self.exchange_cleanup = []


        self.pubsub             = PubsubManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()

        self.exchange_name = 'ctd_L0_all_queue'
        self.exchange_point = 'test_exchange'
        self.i = 0

    def tearDown(self):
        for queue in self.queue_cleanup:
            xn = self.container.ex_manager.create_xn_queue(queue)
            xn.delete()
        for exchange in self.exchange_cleanup:
            xp = self.container.ex_manager.create_xp(exchange)
            xp.delete()


    def test_ctd_L0_all(self):
        '''
        Test that packets are processed by the ctd_L0_all transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='ctd_L0_all',
            description='For testing ctd_L0_all')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L0_all'
        process_definition.executable['class'] = 'ctd_L0_all'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        stream_def_id =  self.pubsub.create_stream_definition('ctd_all_stream_def', parameter_dictionary_id=pdict_id)

        cond_stream_id, _ = self.pubsub.create_stream('test_cond',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        pres_stream_id, _ = self.pubsub.create_stream('test_pres',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        temp_stream_id, _ = self.pubsub.create_stream('test_temp',
            exchange_point='science_data',
            stream_definition_id=stream_def_id)

        config.process.publish_streams.conductivity = cond_stream_id
        config.process.publish_streams.pressure = pres_stream_id
        config.process.publish_streams.temperature = temp_stream_id

        # Schedule the process
        pid = self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the conductivity, temperature and pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------
        ar_cond = gevent.event.AsyncResult()
        def subscriber1(m, r, s):
            ar_cond.set(m)
        sub_cond = StandaloneStreamSubscriber('sub_cond', subscriber1)
        self.addCleanup(sub_cond.stop)

        ar_temp = gevent.event.AsyncResult()
        def subscriber2(m,r,s):
            ar_temp.set(m)
        sub_temp = StandaloneStreamSubscriber('sub_temp', subscriber2)
        self.addCleanup(sub_temp.stop)

        ar_pres = gevent.event.AsyncResult()
        def subscriber3(m,r,s):
            ar_pres.set(m)
        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
        self.addCleanup(sub_pres.stop)

        sub_cond_id= self.pubsub.create_subscription('subscription_cond',
                                stream_ids=[cond_stream_id],
                                exchange_name='sub_cond')

        sub_temp_id = self.pubsub.create_subscription('subscription_temp',
            stream_ids=[temp_stream_id],
            exchange_name='sub_temp')

        sub_pres_id = self.pubsub.create_subscription('subscription_pres',
            stream_ids=[pres_stream_id],
            exchange_name='sub_pres')

        self.pubsub.activate_subscription(sub_cond_id)
        self.pubsub.activate_subscription(sub_temp_id)
        self.pubsub.activate_subscription(sub_pres_id)

        self.queue_cleanup.append(sub_cond.xn.queue)
        self.queue_cleanup.append(sub_temp.xn.queue)
        self.queue_cleanup.append(sub_pres.xn.queue)

        sub_cond.start()
        sub_temp.start()
        sub_pres.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result_cond = ar_cond.get(timeout=10)
        result_temp = ar_temp.get(timeout=10)
        result_pres = ar_pres.get(timeout=10)

        out_dict = {}
        out_dict['c'] = RecordDictionaryTool.load_from_granule(result_cond)['conductivity']
        out_dict['t'] = RecordDictionaryTool.load_from_granule(result_temp)['temp']
        out_dict['p'] = RecordDictionaryTool.load_from_granule(result_pres)['pressure']

        # Check that the transform algorithm was successfully executed
        self.check_granule_splitting(publish_granule, out_dict)

    def test_ctd_L1_conductivity(self):
        '''
        Test that packets are processed by the ctd_L1_conductivity transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1ConductivityTransform',
            description='For testing CTDL1ConductivityTransform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L1_conductivity'
        process_definition.executable['class'] = 'CTDL1ConductivityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('cond_stream_def', parameter_dictionary_id=pdict_id)
        cond_stream_id, _ = self.pubsub.create_stream('test_conductivity',
                                                        exchange_point='science_data',
                                                        stream_definition_id=stream_def_id)

        config.process.publish_streams.conductivity = cond_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the conductivity, temperature and pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------
        ar_cond = gevent.event.AsyncResult()
        def subscriber1(m, r, s):
            ar_cond.set(m)
        sub_cond = StandaloneStreamSubscriber('sub_cond', subscriber1)
        self.addCleanup(sub_cond.stop)

        sub_cond_id = self.pubsub.create_subscription('subscription_cond',
            stream_ids=[cond_stream_id],
            exchange_name='sub_cond')

        self.pubsub.activate_subscription(sub_cond_id)

        self.queue_cleanup.append(sub_cond.xn.queue)

        sub_cond.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result_cond = ar_cond.get(timeout=10)
        self.assertTrue(isinstance(result_cond, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result_cond)
        self.assertTrue(rdt.__contains__('conductivity'))

        self.check_cond_algorithm_execution(publish_granule, result_cond)

    def check_cond_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['conductivity']
        input_data = input_rdt_to_transform['conductivity']

        self.assertTrue(((input_data / 100000.0) - 0.5).all() == output_data.all())

    def check_pres_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['pressure']
        input_data = input_rdt_to_transform['pressure']

        self.assertTrue(input_data.all() == output_data.all())

    def check_temp_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['temp']
        input_data = input_rdt_to_transform['temp']

        self.assertTrue(((input_data / 10000.0) - 10).all() == output_data.all())

    def check_density_algorithm_execution(self, publish_granule, granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        longitude = input_rdt_to_transform['lon']
        latitude = input_rdt_to_transform['lat']

        sp = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)
        sa = SA_from_SP(sp, pressure, longitude, latitude)
        dens_value = rho(sa, temperature, pressure)

        out_density = output_rdt_transform['density']

        #-----------------------------------------------------------------------------
        # Check that the output data from the transform has the correct density values
        #-----------------------------------------------------------------------------
        self.assertTrue(dens_value.all() == out_density.all())

    def check_salinity_algorithm_execution(self, publish_granule, granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        sal_value = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)

        out_salinity = output_rdt_transform['salinity']

        #-----------------------------------------------------------------------------
        # Check that the output data from the transform has the correct density values
        #-----------------------------------------------------------------------------
        self.assertTrue(sal_value.all() == out_salinity.all())

    def check_granule_splitting(self, publish_granule, out_dict):
        '''
        This checks that the ctd_L0_all transform is able to split out one of the
        granules from the whole granule
        fed into the transform
        '''

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)

        in_cond = input_rdt_to_transform['conductivity']
        in_pressure = input_rdt_to_transform['pressure']
        in_temp = input_rdt_to_transform['temp']

        out_cond = out_dict['c']
        out_pres = out_dict['p']
        out_temp = out_dict['t']

        self.assertTrue(in_cond.all() == out_cond.all())
        self.assertTrue(in_pressure.all() == out_pres.all())
        self.assertTrue(in_temp.all() == out_temp.all())

    def test_ctd_L1_pressure(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1PressureTransform',
            description='For testing CTDL1PressureTransform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L1_pressure'
        process_definition.executable['class'] = 'CTDL1PressureTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream('test_pressure',
                                                        stream_definition_id=stream_def_id,
                                                        exchange_point='science_data')

        config.process.publish_streams.pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscribers that will receive the pressure granules from
        # the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_pres = gevent.event.AsyncResult()
        def subscriber3(m,r,s):
            ar_pres.set(m)
        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
        self.addCleanup(sub_pres.stop)

        sub_pres_id = self.pubsub.create_subscription('subscription_pres',
            stream_ids=[pres_stream_id],
            exchange_name='sub_pres')

        self.pubsub.activate_subscription(sub_pres_id)

        self.queue_cleanup.append(sub_pres.xn.queue)

        sub_pres.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_pres.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('pressure'))

        self.check_pres_algorithm_execution(publish_granule, result)

    def test_ctd_L1_temperature(self):
        '''
        Test that packets are processed by the ctd_L1_temperature transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='CTDL1TemperatureTransform',
            description='For testing CTDL1TemperatureTransform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L1_temperature'
        process_definition.executable['class'] = 'CTDL1TemperatureTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('temp_stream_def', parameter_dictionary_id=pdict_id)
        temp_stream_id, _ = self.pubsub.create_stream('test_temperature', stream_definition_id=stream_def_id,
                                                        exchange_point='science_data')

        config.process.publish_streams.temperature = temp_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create subscriber that will receive the temperature granule from
        # the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_temp = gevent.event.AsyncResult()
        def subscriber2(m,r,s):
            ar_temp.set(m)
        sub_temp = StandaloneStreamSubscriber('sub_temp', subscriber2)
        self.addCleanup(sub_temp.stop)

        sub_temp_id = self.pubsub.create_subscription('subscription_temp',
            stream_ids=[temp_stream_id],
            exchange_name='sub_temp')

        self.pubsub.activate_subscription(sub_temp_id)

        self.queue_cleanup.append(sub_temp.xn.queue)

        sub_temp.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_temp.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('temp'))

        self.check_temp_algorithm_execution(publish_granule, result)

    def test_ctd_L2_density(self):
        '''
        Test that packets are processed by the ctd_L1_density transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='DensityTransform',
            description='For testing DensityTransform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L2_density'
        process_definition.executable['class'] = 'DensityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        config.process.interval = 1.0

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('dens_stream_def', parameter_dictionary_id=pdict_id)
        dens_stream_id, _ = self.pubsub.create_stream('test_density', stream_definition_id=stream_def_id,
            exchange_point='science_data')
        config.process.publish_streams.density = dens_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber that will receive the density granule from the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_dens = gevent.event.AsyncResult()
        def subscriber3(m,r,s):
            ar_dens.set(m)
        sub_dens = StandaloneStreamSubscriber('sub_dens', subscriber3)
        self.addCleanup(sub_dens.stop)

        sub_dens_id = self.pubsub.create_subscription('subscription_dens',
            stream_ids=[dens_stream_id],
            exchange_name='sub_dens')

        self.pubsub.activate_subscription(sub_dens_id)

        self.queue_cleanup.append(sub_dens.xn.queue)

        sub_dens.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_dens.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('density'))

        self.check_density_algorithm_execution(publish_granule, result)

    def test_ctd_L2_salinity(self):
        '''
        Test that packets are processed by the ctd_L1_salinity transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='SalinityTransform',
            description='For testing SalinityTransform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.ctd_L2_salinity'
        process_definition.executable['class'] = 'SalinityTransform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        stream_def_id =  self.pubsub.create_stream_definition('sal_stream_def', parameter_dictionary_id=pdict_id)
        sal_stream_id, _ = self.pubsub.create_stream('test_salinity', stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.salinity = sal_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

        #---------------------------------------------------------------------------------------------
        # Create a subscriber that will receive the salinity granule from the ctd transform
        #---------------------------------------------------------------------------------------------

        ar_sal = gevent.event.AsyncResult()
        def subscriber3(m,r,s):
            ar_sal.set(m)
        sub_sal = StandaloneStreamSubscriber('sub_sal', subscriber3)
        self.addCleanup(sub_sal.stop)

        sub_sal_id = self.pubsub.create_subscription('subscription_sal',
            stream_ids=[sal_stream_id],
            exchange_name='sub_sal')

        self.pubsub.activate_subscription(sub_sal_id)

        self.queue_cleanup.append(sub_sal.xn.queue)

        sub_sal.start()

        #------------------------------------------------------------------------------------------------------
        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
        #------------------------------------------------------------------------------------------------------

        # Do all the routing stuff for the publishing
        routing_key = 'stream_id.stream'
        stream_route = StreamRoute(self.exchange_point, routing_key)

        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
        xp = self.container.ex_manager.create_xp(self.exchange_point)
        xn.bind('stream_id.stream', xp)

        pub = StandaloneStreamPublisher('stream_id', stream_route)

        # Build a packet that can be published
        self.px_ctd = SimpleCtdPublisher()
        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)

        # Publish the packet
        pub.publish(publish_granule)

        #------------------------------------------------------------------------------------------------------
        # Make assertions about whether the ctd transform executed its algorithm and published the correct
        # granules
        #------------------------------------------------------------------------------------------------------

        # Get the granule that is published by the ctd transform post processing
        result = ar_sal.get(timeout=10)
        self.assertTrue(isinstance(result, Granule))

        rdt = RecordDictionaryTool.load_from_granule(result)
        self.assertTrue(rdt.__contains__('salinity'))

        self.check_salinity_algorithm_execution(publish_granule, result)


    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)
        rdt['time'] = numpy.arange(self.i, self.i+length)

        for field in rdt:
            if isinstance(rdt._pdict.get_context(field).param_type, QuantityType):
                rdt[field] = numpy.array([random.uniform(0.0,75.0)  for i in xrange(length)])

        g = rdt.to_granule()
        self.i+=length

        return g


    def test_presf_L0_splitter(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='Presf L0 Splitter',
            description='For testing Presf L0 Splitter')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.presf_L0_splitter'
        process_definition.executable['class'] = 'PresfL0Splitter'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream('test_pressure',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.absolute_pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)

#        #---------------------------------------------------------------------------------------------
#        # Create subscribers that will receive the pressure granules from
#        # the ctd transform
#        #---------------------------------------------------------------------------------------------
#
#        ar_pres = gevent.event.AsyncResult()
#        def subscriber3(m,r,s):
#            ar_pres.set(m)
#        sub_pres = StandaloneStreamSubscriber('sub_pres', subscriber3)
#        self.addCleanup(sub_pres.stop)
#
#        sub_pres_id = self.pubsub.create_subscription('subscription_pres',
#            stream_ids=[pres_stream_id],
#            exchange_name='sub_pres')
#
#        self.pubsub.activate_subscription(sub_pres_id)
#
#        self.queue_cleanup.append(sub_pres.xn.queue)
#
#        sub_pres.start()
#
#        #------------------------------------------------------------------------------------------------------
#        # Use a StandaloneStreamPublisher to publish a packet that can be then picked up by a ctd transform
#        #------------------------------------------------------------------------------------------------------
#
#        # Do all the routing stuff for the publishing
#        routing_key = 'stream_id.stream'
#        stream_route = StreamRoute(self.exchange_point, routing_key)
#
#        xn = self.container.ex_manager.create_xn_queue(self.exchange_name)
#        xp = self.container.ex_manager.create_xp(self.exchange_point)
#        xn.bind('stream_id.stream', xp)
#
#        pub = StandaloneStreamPublisher('stream_id', stream_route)
#
#        # Build a packet that can be published
#        self.px_ctd = SimpleCtdPublisher()
#        publish_granule = self._get_new_ctd_packet(stream_definition_id=stream_def_id, length = 5)
#
#        # Publish the packet
#        pub.publish(publish_granule)
#
#        #------------------------------------------------------------------------------------------------------
#        # Make assertions about whether the ctd transform executed its algorithm and published the correct
#        # granules
#        #------------------------------------------------------------------------------------------------------
#
#        # Get the granule that is published by the ctd transform post processing
#        result = ar_pres.get(timeout=10)
#        self.assertTrue(isinstance(result, Granule))
#
#        rdt = RecordDictionaryTool.load_from_granule(result)
#        self.assertTrue(rdt.__contains__('pressure'))
#
#        self.check_pres_algorithm_execution(publish_granule, result)
#

    def test_presf_L1(self):
        '''
        Test that packets are processed by the ctd_L1_pressure transform
        '''

        #---------------------------------------------------------------------------------------------
        # Launch a ctd transform
        #---------------------------------------------------------------------------------------------
        # Create the process definition
        process_definition = ProcessDefinition(
            name='PresfL1Transform',
            description='For testing PresfL1Transform')
        process_definition.executable['module']= 'ion.processes.data.transforms.ctd.presf_L1'
        process_definition.executable['class'] = 'PresfL1Transform'
        ctd_transform_proc_def_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

        # Build the config
        config = DotDict()
        config.process.queue_name = self.exchange_name
        config.process.exchange_point = self.exchange_point

        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)

        stream_def_id =  self.pubsub.create_stream_definition('pres_stream_def', parameter_dictionary_id=pdict_id)
        pres_stream_id, _ = self.pubsub.create_stream('test_pressure',
            stream_definition_id=stream_def_id,
            exchange_point='science_data')

        config.process.publish_streams.seafloor_pressure = pres_stream_id

        # Schedule the process
        self.process_dispatcher.schedule_process(process_definition_id=ctd_transform_proc_def_id, configuration=config)
class TestActivateInstrumentIntegration(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        self._start_container()

        self.container.start_rel_from_url("res/deploy/r2deploy.yml")

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
        self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.pubsubcli = PubsubManagementServiceClient(node=self.container.node)
        self.imsclient = InstrumentManagementServiceClient(node=self.container.node)
        self.dpclient = DataProductManagementServiceClient(node=self.container.node)
        self.datasetclient = DatasetManagementServiceClient(node=self.container.node)
        self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node)
        self.dataprocessclient = DataProcessManagementServiceClient(node=self.container.node)
        self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)

        # setup listerner vars
        self._data_greenlets = []
        self._no_samples = None
        self._samples_received = []

    def create_logger(self, name, stream_id=""):

        # logger process
        producer_definition = ProcessDefinition(name=name + "_logger")
        producer_definition.executable = {
            "module": "ion.processes.data.stream_granule_logger",
            "class": "StreamGranuleLogger",
        }

        logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition)
        configuration = {"process": {"stream_id": stream_id}}
        pid = self.processdispatchclient.schedule_process(
            process_definition_id=logger_procdef_id, configuration=configuration
        )

        return pid

    @unittest.skip("TBD")
    def test_activateInstrumentSample(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(
            RT.InstrumentModel, name="SBE37IMModel", description="SBE37IMModel", model="SBE37IMModel"
        )
        try:
            instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentModel: %s" % ex)
        log.debug("new InstrumentModel id = %s ", instModel_id)

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name="agent007",
            description="SBE37IMAgent",
            driver_module="ion.agents.instrument.instrument_agent",
            driver_class="InstrumentAgent",
        )
        try:
            instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentAgent: %s" % ex)
        log.debug("new InstrumentAgent id = %s", instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            "test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) "
        )
        instDevice_obj = IonObject(
            RT.InstrumentDevice, name="SBE37IMDevice", description="SBE37IMDevice", serial_number="12345"
        )
        try:
            instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
            self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentDevice: %s" % ex)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s    (SA Req: L4-CI-SA-RQ-241) ", instDevice_id
        )

        instAgentInstance_obj = IonObject(
            RT.InstrumentAgentInstance,
            name="SBE37IMAgentInstance",
            description="SBE37IMAgentInstance",
            driver_module="mi.instrument.seabird.sbe37smb.ooicore.driver",
            driver_class="SBE37Driver",
            comms_device_address="sbe37-simulator.oceanobservatories.org",
            comms_device_port=4001,
            port_agent_work_dir="/tmp/",
            port_agent_delimeter=["<<", ">>"],
        )
        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id
        )

        # create a stream definition for the data from the ctd simulator
        ctd_stream_def = SBE37_CDM_stream_definition()
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(container=ctd_stream_def)

        log.debug("new Stream Definition id = %s", instDevice_id)

        log.debug("Creating new CDM data product with a stream definition")

        craft = CoverageCraft
        sdom, tdom = craft.create_domains()
        sdom = sdom.dump()
        tdom = tdom.dump()
        parameter_dictionary = craft.create_parameters()
        parameter_dictionary = parameter_dictionary.dump()

        dp_obj = IonObject(
            RT.DataProduct,
            name="the parsed data",
            description="ctd stream test",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        data_product_id1 = self.dpclient.create_data_product(dp_obj, ctd_stream_def_id, parameter_dictionary)

        log.debug("new dp_id = %s", data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug("Data product streams1 = %s", stream_ids)

        pid = self.create_logger("ctd_parsed", stream_ids[0])
        self.loggerpids.append(pid)

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Data Process Definition
        # -------------------------------
        log.debug("test_activateInstrumentSample: create data process definition ctd_L0_all")
        dpd_obj = IonObject(
            RT.DataProcessDefinition,
            name="ctd_L0_all",
            description="transform ctd package into three separate L0 streams",
            module="ion.processes.data.transforms.ctd.ctd_L0_all",
            class_name="ctd_L0_all",
            process_source="some_source_reference",
        )
        try:
            ctd_L0_all_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except BadRequest as ex:
            self.fail("failed to create new ctd_L0_all data process definition: %s" % ex)

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Output Data Products
        # -------------------------------

        outgoing_stream_l0_conductivity = L0_conductivity_stream_definition()
        outgoing_stream_l0_conductivity_id = self.pubsubcli.create_stream_definition(
            container=outgoing_stream_l0_conductivity, name="L0_Conductivity"
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_conductivity_id, ctd_L0_all_dprocdef_id
        )

        outgoing_stream_l0_pressure = L0_pressure_stream_definition()
        outgoing_stream_l0_pressure_id = self.pubsubcli.create_stream_definition(
            container=outgoing_stream_l0_pressure, name="L0_Pressure"
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_pressure_id, ctd_L0_all_dprocdef_id
        )

        outgoing_stream_l0_temperature = L0_temperature_stream_definition()
        outgoing_stream_l0_temperature_id = self.pubsubcli.create_stream_definition(
            container=outgoing_stream_l0_temperature, name="L0_Temperature"
        )
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(
            outgoing_stream_l0_temperature_id, ctd_L0_all_dprocdef_id
        )

        self.output_products = {}
        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 conductivity")

        ctd_l0_conductivity_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Conductivity",
            description="transform output conductivity",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_conductivity_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_conductivity_output_dp_obj, outgoing_stream_l0_conductivity_id, parameter_dictionary
        )
        self.output_products["conductivity"] = ctd_l0_conductivity_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_conductivity_output_dp_id)

        stream_ids, _ = self.rrclient.find_objects(ctd_l0_conductivity_output_dp_id, PRED.hasStream, None, True)
        log.debug(" ctd_l0_conductivity stream id =  %s", str(stream_ids))
        pid = self.create_logger(" ctd_l1_conductivity", stream_ids[0])
        self.loggerpids.append(pid)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 pressure")

        ctd_l0_pressure_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Pressure",
            description="transform output pressure",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_pressure_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_pressure_output_dp_obj, outgoing_stream_l0_pressure_id, parameter_dictionary
        )
        self.output_products["pressure"] = ctd_l0_pressure_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_pressure_output_dp_id)

        stream_ids, _ = self.rrclient.find_objects(ctd_l0_pressure_output_dp_id, PRED.hasStream, None, True)
        log.debug(" ctd_l0_pressure stream id =  %s", str(stream_ids))
        pid = self.create_logger(" ctd_l0_pressure", stream_ids[0])
        self.loggerpids.append(pid)

        log.debug("test_createTransformsThenActivateInstrument: create output data product L0 temperature")

        ctd_l0_temperature_output_dp_obj = IonObject(
            RT.DataProduct,
            name="L0_Temperature",
            description="transform output temperature",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        ctd_l0_temperature_output_dp_id = self.dataproductclient.create_data_product(
            ctd_l0_temperature_output_dp_obj, outgoing_stream_l0_temperature_id, parameter_dictionary
        )
        self.output_products["temperature"] = ctd_l0_temperature_output_dp_id
        self.dataproductclient.activate_data_product_persistence(data_product_id=ctd_l0_temperature_output_dp_id)

        stream_ids, _ = self.rrclient.find_objects(ctd_l0_temperature_output_dp_id, PRED.hasStream, None, True)
        log.debug(" ctd_l0_temperature stream id =  %s", str(stream_ids))
        pid = self.create_logger(" ctd_l0_temperature", stream_ids[0])
        self.loggerpids.append(pid)

        # -------------------------------
        # L0 Conductivity - Temperature - Pressure: Create the data process
        # -------------------------------
        log.debug("test_activateInstrumentSample: create L0 all data_process start")
        try:
            ctd_l0_all_data_process_id = self.dataprocessclient.create_data_process(
                ctd_L0_all_dprocdef_id, [data_product_id1], self.output_products
            )
            self.dataprocessclient.activate_data_process(ctd_l0_all_data_process_id)
        except BadRequest as ex:
            self.fail("failed to create new data process: %s" % ex)

        log.debug("test_createTransformsThenActivateInstrument: create L0 all data_process return")

        log.debug("Creating new RAW data product with a stream definition")
        raw_stream_def = SBE37_RAW_stream_definition()
        raw_stream_def_id = self.pubsubcli.create_stream_definition(container=raw_stream_def)

        dp_obj = IonObject(
            RT.DataProduct,
            name="the raw data",
            description="raw stream test",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        data_product_id2 = self.dpclient.create_data_product(dp_obj, raw_stream_def_id, parameter_dictionary)
        log.debug("new dp_id = %s", str(data_product_id2))

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug("Data product streams2 = %s", str(stream_ids))

        self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        log.debug("Instrument agent instance obj: = %s", str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        # self._ia_client = ResourceAgentClient('123xyz', name=inst_agent_instance_obj.agent_process_id,  process=FakeProcess())
        self._ia_client = ResourceAgentClient(instDevice_id, process=FakeProcess())
        log.debug("test_activateInstrumentSample: got ia client %s", str(self._ia_client))

        cmd = AgentCommand(command="initialize")
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: initialize %s", str(retval))

        time.sleep(1)

        log.debug("test_activateInstrumentSample: Sending go_active command (L4-CI-SA-RQ-334)")
        cmd = AgentCommand(command="go_active")
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrument: return value from go_active %s", str(reply))
        time.sleep(1)
        cmd = AgentCommand(command="get_current_state")
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "test_activateInstrumentSample: current state after sending go_active command %s    (L4-CI-SA-RQ-334)",
            str(state),
        )

        cmd = AgentCommand(command="run")
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: run %s", str(reply))
        time.sleep(1)

        log.debug("test_activateInstrumentSample: calling acquire_sample ")
        cmd = AgentCommand(command="acquire_sample")
        reply = self._ia_client.execute(cmd)
        log.debug("test_activateInstrumentSample: return from acquire_sample %s", str(reply))
        time.sleep(1)

        log.debug("test_activateInstrumentSample: calling acquire_sample 2")
        cmd = AgentCommand(command="acquire_sample")
        reply = self._ia_client.execute(cmd)
        log.debug("test_activateInstrumentSample: return from acquire_sample 2   %s", str(reply))
        time.sleep(1)

        log.debug("test_activateInstrumentSample: calling acquire_sample 3")
        cmd = AgentCommand(command="acquire_sample")
        reply = self._ia_client.execute(cmd)
        log.debug("test_activateInstrumentSample: return from acquire_sample 3   %s", str(reply))
        time.sleep(2)

        log.debug("test_activateInstrumentSample: calling reset ")
        cmd = AgentCommand(command="reset")
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentSample: return from reset %s", str(reply))
        time.sleep(1)

        # -------------------------------
        # Deactivate InstrumentAgentInstance
        # -------------------------------
        self.imsclient.stop_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)

    @unittest.skip("TBD")
    def test_activateInstrumentStream(self):

        self.loggerpids = []

        # Create InstrumentModel
        instModel_obj = IonObject(
            RT.InstrumentModel, name="SBE37IMModel", description="SBE37IMModel", model="SBE37IMModel"
        )
        try:
            instModel_id = self.imsclient.create_instrument_model(instModel_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentModel: %s" % ex)
        log.debug("new InstrumentModel id = %s ", instModel_id)

        # Create InstrumentAgent
        instAgent_obj = IonObject(
            RT.InstrumentAgent,
            name="agent007",
            description="SBE37IMAgent",
            driver_module="ion.agents.instrument.instrument_agent",
            driver_class="InstrumentAgent",
        )
        try:
            instAgent_id = self.imsclient.create_instrument_agent(instAgent_obj)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentAgent: %s" % ex)
        log.debug("new InstrumentAgent id = %s", instAgent_id)

        self.imsclient.assign_instrument_model_to_instrument_agent(instModel_id, instAgent_id)

        # Create InstrumentDevice
        log.debug(
            "test_activateInstrumentSample: Create instrument resource to represent the SBE37 (SA Req: L4-CI-SA-RQ-241) "
        )
        instDevice_obj = IonObject(
            RT.InstrumentDevice, name="SBE37IMDevice", description="SBE37IMDevice", serial_number="12345"
        )
        try:
            instDevice_id = self.imsclient.create_instrument_device(instrument_device=instDevice_obj)
            self.imsclient.assign_instrument_model_to_instrument_device(instModel_id, instDevice_id)
        except BadRequest as ex:
            self.fail("failed to create new InstrumentDevice: %s" % ex)

        log.debug(
            "test_activateInstrumentSample: new InstrumentDevice id = %s    (SA Req: L4-CI-SA-RQ-241) ", instDevice_id
        )

        instAgentInstance_obj = IonObject(
            RT.InstrumentAgentInstance,
            name="SBE37IMAgentInstance",
            description="SBE37IMAgentInstance",
            driver_module="mi.instrument.seabird.sbe37smb.ooicore.driver",
            driver_class="SBE37Driver",
            comms_device_address="sbe37-simulator.oceanobservatories.org",
            comms_device_port=4001,
            port_agent_work_dir="/tmp/",
            port_agent_delimeter=["<<", ">>"],
        )
        instAgentInstance_id = self.imsclient.create_instrument_agent_instance(
            instAgentInstance_obj, instAgent_id, instDevice_id
        )

        # create a stream definition for the data from the ctd simulator
        ctd_stream_def = SBE37_CDM_stream_definition()
        ctd_stream_def_id = self.pubsubcli.create_stream_definition(container=ctd_stream_def)

        log.debug("new Stream Definition id = %s", instDevice_id)

        log.debug("Creating new CDM data product with a stream definition")

        craft = CoverageCraft
        sdom, tdom = craft.create_domains()
        sdom = sdom.dump()
        tdom = tdom.dump()
        parameter_dictionary = craft.create_parameters()
        parameter_dictionary = parameter_dictionary.dump()

        dp_obj = IonObject(
            RT.DataProduct,
            name="the parsed data",
            description="ctd stream test",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        data_product_id1 = self.dpclient.create_data_product(dp_obj, ctd_stream_def_id, parameter_dictionary)
        log.debug("new dp_id = %s", data_product_id1)

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id1)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug("Data product streams1 = %s", stream_ids)

        pid = self.create_logger("ctd_parsed", stream_ids[0])
        self.loggerpids.append(pid)

        #        simdata_subscription_id = self.pubsubcli.create_subscription(
        #            query=StreamQuery([stream_ids[0]]),
        #            exchange_name='Sim_data_queue',
        #            name='SimDataSubscription',
        #            description='SimData SubscriptionDescription'
        #        )
        #
        #
        #        def simdata_message_received(message, headers):
        #            input = str(message)
        #            log.debug("test_activateInstrumentStream: granule received: %s", input)
        #
        #
        #        subscriber_registrar = StreamSubscriberRegistrar(process=self.container, container=self.container)
        #        simdata_subscriber = subscriber_registrar.create_subscriber(exchange_name='Sim_data_queue', callback=simdata_message_received)
        #
        #        # Start subscribers
        #        simdata_subscriber.start()
        #
        #        # Activate subscriptions
        #        self.pubsubcli.activate_subscription(simdata_subscription_id)

        log.debug("Creating new RAW data product with a stream definition")
        raw_stream_def = SBE37_RAW_stream_definition()
        raw_stream_def_id = self.pubsubcli.create_stream_definition(container=raw_stream_def)

        dp_obj = IonObject(
            RT.DataProduct,
            name="the raw data",
            description="raw stream test",
            temporal_domain=tdom,
            spatial_domain=sdom,
        )

        data_product_id2 = self.dpclient.create_data_product(dp_obj, raw_stream_def_id, parameter_dictionary)
        log.debug("new dp_id = %s", str(data_product_id2))

        self.damsclient.assign_data_product(input_resource_id=instDevice_id, data_product_id=data_product_id2)

        self.dpclient.activate_data_product_persistence(data_product_id=data_product_id2)

        # Retrieve the id of the OUTPUT stream from the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id2, PRED.hasStream, None, True)
        log.debug("Data product streams2 = %s", str(stream_ids))

        self.imsclient.start_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)

        inst_agent_instance_obj = self.imsclient.read_instrument_agent_instance(instAgentInstance_id)
        log.debug("test_activateInstrumentStream Instrument agent instance obj: = %s", str(inst_agent_instance_obj))

        # Start a resource agent client to talk with the instrument agent.
        # self._ia_client = ResourceAgentClient('123xyz', name=inst_agent_instance_obj.agent_process_id,  process=FakeProcess())
        self._ia_client = ResourceAgentClient(instDevice_id, process=FakeProcess())
        log.debug("test_activateInstrumentStream: got ia client %s", str(self._ia_client))

        cmd = AgentCommand(command="initialize")
        retval = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: initialize %s", str(retval))

        time.sleep(2)

        log.debug("test_activateInstrumentStream: Sending go_active command (L4-CI-SA-RQ-334)")
        cmd = AgentCommand(command="go_active")
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: return value from go_active %s", str(reply))
        time.sleep(2)
        cmd = AgentCommand(command="get_current_state")
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug(
            "test_activateInstrumentStream: current state after sending go_active command %s    (L4-CI-SA-RQ-334)",
            str(state),
        )

        cmd = AgentCommand(command="run")
        reply = self._ia_client.execute_agent(cmd)
        time.sleep(2)
        cmd = AgentCommand(command="get_current_state")
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from run state: %s", str(state))

        # Make sure the sampling rate and transmission are sane.
        params = {SBE37Parameter.NAVG: 1, SBE37Parameter.INTERVAL: 5, SBE37Parameter.TXREALTIME: True}
        self._ia_client.set_param(params)
        time.sleep(2)

        log.debug("test_activateInstrumentStream: calling go_streaming ")
        cmd = AgentCommand(command="go_streaming")
        reply = self._ia_client.execute_agent(cmd)
        time.sleep(2)
        cmd = AgentCommand(command="get_current_state")
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from go_streaming state: %s", str(state))

        time.sleep(5)

        log.debug("test_activateInstrumentStream: calling go_observatory")
        cmd = AgentCommand(command="go_observatory")
        reply = self._ia_client.execute_agent(cmd)
        cmd = AgentCommand(command="get_current_state")
        retval = self._ia_client.execute_agent(cmd)
        state = retval.result
        log.debug("test_activateInstrumentStream: return from go_observatory state  %s", str(state))

        log.debug("test_activateInstrumentStream: calling reset ")
        cmd = AgentCommand(command="reset")
        reply = self._ia_client.execute_agent(cmd)
        log.debug("test_activateInstrumentStream: return from reset state:%s", str(reply.result))
        time.sleep(2)

        # -------------------------------
        # Deactivate InstrumentAgentInstance
        # -------------------------------
        self.imsclient.stop_instrument_agent_instance(instrument_agent_instance_id=instAgentInstance_id)
        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)
class UserNotificationService(BaseUserNotificationService):
    """
    A service that provides users with an API for CRUD methods for notifications.
    """

    def __init__(self, *args, **kwargs):
        self._subscribers = []
        self._schedule_ids = []
        BaseUserNotificationService.__init__(self, *args, **kwargs)

    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

        self.event_repo = self.container.instance.event_repository

        self.smtp_client = setting_up_smtp_client()

        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor(self.smtp_client)

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.notifications = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.event_publisher = EventPublisher()

        self.start_time = UserNotificationService.makeEpochTime(self.__now())

    def on_quit(self):
        """
        Handles stop/terminate.

        Cleans up subscribers spawned here, terminates any scheduled tasks to the scheduler.
        """
        for sub in self._subscribers:
            sub.stop()

        for sid in self._schedule_ids:
            try:
                self.clients.scheduler.cancel_timer(sid)
            except IonException as ex:
                log.info("Ignoring exception while cancelling schedule id (%s): %s: %s", sid, ex.__class__.__name__, ex)

        # Clean up the notification subscriptions' subscribers created in EmailEventProcessor object
        self.event_processor.cleanup()

        super(UserNotificationService, self).on_quit()

    def __now(self):
        """
        This method defines what the UNS uses as its "current" time
        """
        return datetime.utcnow()

    def set_process_batch_key(self, process_batch_key = ''):
        """
        This method allows an operator to set the process_batch_key, a string.
        Once this method is used by the operator, the UNS will start listening for timer events
        published by the scheduler with origin = process_batch_key.

        @param process_batch_key str
        """

        def process(event_msg, headers):
            assert event_msg.origin == process_batch_key

            self.end_time = UserNotificationService.makeEpochTime(self.__now())

            # run the process_batch() method
            self.process_batch(start_time=self.start_time, end_time=self.end_time)
            self.start_time = self.end_time

        # the subscriber for the batch processing
        """
        To trigger the batch notification, have the scheduler create a timer with event_origin = process_batch_key
        """
        self.batch_processing_subscriber = EventSubscriber(
            event_type="ResourceEvent",
            origin=process_batch_key,
            callback=process
        )
        self.batch_processing_subscriber.start()
        self._subscribers.append(self.batch_processing_subscriber)

    def create_notification(self, notification=None, user_id=''):
        """
        Persists the provided NotificationRequest object for the specified Origin id.
        Associate the Notification resource with the user_id string.
        returned id is the internal id by which NotificationRequest will be identified
        in the data store.

        @param notification        NotificationRequest
        @param user_id             str
        @retval notification_id    str
        @throws BadRequest    if object passed has _id or _rev attribute

        """

        if not user_id:
            raise BadRequest("User id not provided.")

        #---------------------------------------------------------------------------------------------------
        # Persist Notification object as a resource if it has already not been persisted
        #---------------------------------------------------------------------------------------------------

        # if the notification has already been registered, simply use the old id

        id = self._notification_in_notifications(notification, self.notifications)

        if id:
            log.debug("Notification object has already been created in resource registry before. No new id to be generated.")
            notification_id = id
        else:

            # since the notification has not been registered yet, register it and get the id
            notification.temporal_bounds = TemporalBounds()
            notification.temporal_bounds.start_datetime = self.makeEpochTime(self.__now())
            notification.temporal_bounds.end_datetime = ''

            notification_id, _ = self.clients.resource_registry.create(notification)
            self.notifications[notification_id] = notification

            # Link the user and the notification with a hasNotification association
            self.clients.resource_registry.create_association(user_id, PRED.hasNotification, notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # read the registered notification request object because this has an _id and is more useful
        #-------------------------------------------------------------------------------------------------------------------

        notification = self.clients.resource_registry.read(notification_id)

        #-----------------------------------------------------------------------------------------------------------
        # Create an event processor for user. This sets up callbacks etc.
        # As a side effect this updates the UserInfo object and also the user info and reverse user info dictionaries.
        #-----------------------------------------------------------------------------------------------------------

        user = self.event_processor.add_notification_for_user(notification_request=notification, user_id=user_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been created.",
            notification_id = notification_id)

        return notification_id

    def update_notification(self, notification=None, user_id = ''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Get the old notification
        #-------------------------------------------------------------------------------------------------------------------

        old_notification = self.clients.resource_registry.read(notification._id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the notification in the notifications dict
        #-------------------------------------------------------------------------------------------------------------------


        self._update_notification_in_notifications_dict(new_notification=notification,
                                                        old_notification=old_notification,
                                                        notifications=self.notifications)
        #-------------------------------------------------------------------------------------------------------------------
        # Update the notification in the registry
        #-------------------------------------------------------------------------------------------------------------------

        self.clients.resource_registry.update(notification)

        #-------------------------------------------------------------------------------------------------------------------
        # reading up the notification object to make sure we have the newly registered notification request object
        #-------------------------------------------------------------------------------------------------------------------

        notification_id = notification._id
        notification = self.clients.resource_registry.read(notification_id)

        #------------------------------------------------------------------------------------
        # Update the UserInfo object
        #------------------------------------------------------------------------------------

        user = self.update_user_info_object(user_id, notification, old_notification)

        #------------------------------------------------------------------------------------
        # Update the user_info dictionary maintained by UNS
        #------------------------------------------------------------------------------------

        self.update_user_info_dictionary(user_id, notification, old_notification)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been updated."
        )

    def read_notification(self, notification_id=''):
        """Returns the NotificationRequest object for the specified notification id.
        Throws exception if id does not match any persisted NotificationRequest
        objects.

        @param notification_id    str
        @retval notification    NotificationRequest
        @throws NotFound    object with specified id does not exist
        """
        notification = self.clients.resource_registry.read(notification_id)

        return notification

    def delete_notification(self, notification_id=''):
        """For now, permanently deletes NotificationRequest object with the specified
        id. Throws exception if id does not match any persisted NotificationRequest.

        @param notification_id    str
        @throws NotFound    object with specified id does not exist
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Stop the event subscriber for the notification
        #-------------------------------------------------------------------------------------------------------------------
        notification_request = self.clients.resource_registry.read(notification_id)
        old_notification = notification_request

        self.event_processor.stop_notification_subscriber(notification_request=notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the resource registry
        #-------------------------------------------------------------------------------------------------------------------

        notification_request.temporal_bounds.end_datetime = self.makeEpochTime(self.__now())

        self.clients.resource_registry.update(notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the user info dictionaries
        #-------------------------------------------------------------------------------------------------------------------

        for user_id in self.event_processor.user_info.iterkeys():
            self.update_user_info_dictionary(user_id, notification_request, old_notification)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been deleted.",
            notification_id = notification_id)

    def delete_notification_from_user_info(self, notification_id):
        """
        Helper method to delete the notification from the user_info dictionary

        @param notification_id str
        """

        user_ids, assocs = self.clients.resource_registry.find_subjects(object=notification_id, predicate=PRED.hasNotification, id_only=True)

        for assoc in assocs:
            self.clients.resource_registry.delete_association(assoc)

        for user_id in user_ids:

            value = self.event_processor.user_info[user_id]

            for notif in value['notifications']:
                if notification_id == notif._id:
                    # remove the notification
                    value['notifications'].remove(notif)
                    # remove the notification_subscription
                    self.event_processor.user_info[user_id]['notification_subscriptions'].pop(notification_id)

        self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info)

    def find_events(self, origin='', type='', min_datetime=0, max_datetime=0, limit= -1, descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param event_type     str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        datastore = self.container.datastore_manager.get_datastore('events')


        # The reason for the if-else below is that couchdb query_view does not support passing in Null or -1 for limit
        # If the opreator does not want to set a limit for the search results in find_events, and does not therefore
        # provide a limit, one has to just omit it from the opts dictionary and pass that into the query_view() method.
        # Passing a null or negative for the limit to query view through opts results in a ServerError so we cannot do that.
        if limit > -1:
            opts = dict(
                start_key = [origin, type or 0, min_datetime or 0],
                end_key   = [origin, type or {}, max_datetime or {}],
                descending = descending,
                limit = limit,
                include_docs = True
            )

        else:
            opts = dict(
                start_key = [origin, type or 0, min_datetime or 0],
                end_key   = [origin, type or {}, max_datetime or {}],
                descending = descending,
                include_docs = True
            )
        if descending:
            t = opts['start_key']
            opts['start_key'] = opts['end_key']
            opts['end_key'] = t

        results = datastore.query_view('event/by_origintype',opts=opts)

        events = []
        for res in results:
            event_obj = res['doc']
            events.append(event_obj)

        log.debug("(find_events) UNS found the following relevant events: %s" % events)

        if -1 < limit < len(events):
            list = []
            for i in xrange(limit):
                list.append(events[i])
            return list

        return events


    #todo Uses Elastic Search. Later extend this to a larger search criteria
    def find_events_extended(self, origin='', type='', min_time= 0, max_time=0, limit=-1, descending=False):
        """Uses Elastic Search. Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param type           str
        @param min_time   int seconds
        @param max_time   int seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """

        if min_time and max_time:
            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (min_time, max_time)
        else:
            search_time = 'search "ts_created" is "*" from "events_index"'

        if origin:
            search_origin = 'search "origin" is "%s" from "events_index"' % origin
        else:
            search_origin = 'search "origin" is "*" from "events_index"'

        if type:
            search_type = 'search "type_" is "%s" from "events_index"' % type
        else:
            search_type = 'search "type_" is "*" from "events_index"'

        search_string = search_time + ' and ' + search_origin + ' and ' + search_type

        # get the list of ids corresponding to the events
        ret_vals = self.discovery.parse(search_string)
        log.debug("(find_events_extended) Discovery search returned the following event ids: %s" % ret_vals)

        events = []
        for event_id in ret_vals:
            datastore = self.container.datastore_manager.get_datastore('events')
            event_obj = datastore.read(event_id)
            events.append(event_obj)

        log.debug("(find_events_extended) UNS found the following relevant events: %s" % events)

        if limit > -1:
            list = []
            for i in xrange(limit):
                list.append(events[i])
            return list

        #todo implement time ordering: ascending or descending

        return events

    def publish_event(self, event=None):
        """
        Publish a general event at a certain time using the UNS

        @param event Event
        """

        self.event_publisher._publish_event( event_msg = event,
            origin=event.origin,
            event_type = event.type_)
        log.info("The publish_event() method of UNS was used to publish an event.")

    def get_recent_events(self, resource_id='', limit = 100):
        """
        Get recent events

        @param resource_id str
        @param limit int

        @retval events list of Event objects
        """

        now = self.makeEpochTime(datetime.utcnow())
        events = self.find_events(origin=resource_id,limit=limit, max_datetime=now, descending=True)

        ret = IonObject(OT.ComputedListValue)
        if events:
            ret.value = events
            ret.status = ComputedValueAvailability.PROVIDED
        else:
            ret.status = ComputedValueAvailability.NOTAVAILABLE

        return ret

    def get_user_notifications(self, user_id=''):
        """
        Get the notification request objects that are subscribed to by the user

        @param user_id str

        @retval notifications list of NotificationRequest objects
        """

        if self.event_processor.user_info.has_key(user_id):
            notifications = self.event_processor.user_info[user_id]['notifications']
            ret = IonObject(OT.ComputedListValue)

            if notifications:
                ret.value = notifications
                ret.status = ComputedValueAvailability.PROVIDED
            else:
                ret.status = ComputedValueAvailability.NOTAVAILABLE
            return ret
        else:
            return None

    def create_worker(self, number_of_workers=1):
        """
        Creates notification workers

        @param number_of_workers int
        @retval pids list

        """

        pids = []

        for n in xrange(number_of_workers):

            process_definition = ProcessDefinition( name='notification_worker_%s' % n)

            process_definition.executable = {
                'module': 'ion.processes.data.transforms.notification_worker',
                'class':'NotificationWorker'
            }
            process_definition_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

            # ------------------------------------------------------------------------------------
            # Process Spawning
            # ------------------------------------------------------------------------------------

            pid2 = self.process_dispatcher.create_process(process_definition_id)

            #@todo put in a configuration
            configuration = {}
            configuration['process'] = dict({
                'name': 'notification_worker_%s' % n,
                'type':'simple'
            })

            pid  = self.process_dispatcher.schedule_process(
                process_definition_id,
                configuration = configuration,
                process_id=pid2
            )

            pids.append(pid)

        return pids

    @staticmethod
    def makeEpochTime(date_time):
        """
        provides the seconds since epoch give a python datetime object.

        @param date_time Python datetime object
        @retval seconds_since_epoch int
        """
        date_time = date_time.isoformat().split('.')[0].replace('T',' ')
        #'2009-07-04 18:30:47'
        pattern = '%Y-%m-%d %H:%M:%S'
        seconds_since_epoch = int(time.mktime(time.strptime(date_time, pattern)))

        return seconds_since_epoch


    def process_batch(self, start_time = 0, end_time = 0):
        """
        This method is launched when an process_batch event is received. The user info dictionary maintained
        by the User Notification Service is used to query the event repository for all events for a particular
        user that have occurred in a provided time interval, and then an email is sent to the user containing
        the digest of all the events.

        @param start_time int
        @param end_time int
        """

        if end_time <= start_time:
            return

        for user_id, value in self.event_processor.user_info.iteritems():

            notifications = value['notifications']

            events_for_message = []

            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (start_time, end_time)

            for notification in notifications:

                # If the notification request has expired, then do not use it in the search
                if notification.temporal_bounds.end_datetime:
                    continue

                if notification.origin:
                    search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin
                else:
                    search_origin = 'search "origin" is "*" from "events_index"'

                if notification.origin_type:
                    search_origin_type= 'search "origin_type" is "%s" from "events_index"' % notification.origin_type
                else:
                    search_origin_type= 'search "origin_type" is "*" from "events_index"'

                if notification.event_type:
                    search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type
                else:
                    search_event_type = 'search "type_" is "*" from "events_index"'

                search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type

                # get the list of ids corresponding to the events
                ret_vals = self.discovery.parse(search_string)

                for event_id in ret_vals:
                    datastore = self.container.datastore_manager.get_datastore('events')
                    event_obj = datastore.read(event_id)
                    events_for_message.append(event_obj)

            log.debug("Found following events of interest to user, %s: %s" % (user_id, events_for_message))

            # send a notification email to each user using a _send_email() method
            if events_for_message:
                self.format_and_send_email(events_for_message, user_id)

    def format_and_send_email(self, events_for_message, user_id):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """

        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s" % (user_id, message))

        msg_body = ''
        count = 1
        for event in events_for_message:
            # build the email from the event content
            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                     "",
                                     "Originator: %s" %  event.origin,
                                     "",
                                     "Description: %s" % event.description ,
                                     "",
                                     "Event time stamp: %s" %  event.ts_created,
                                     "\r\n",
                                     "------------------------"
                                     "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " +\
                    "notified about this event from this source. " +\
                    "To modify or remove notifications about this event, " +\
                    "please access My Notifications Settings in the ION Web UI. " +\
                    "Do not reply to this email.  This email address is not monitored " +\
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s" % msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
            msg_subject = msg_subject,
            msg_recipient=self.event_processor.user_info[user_id]['user_contact'].email,
            smtp_client=self.smtp_client )

    def send_batch_email(self, msg_body, msg_subject, msg_recipient, smtp_client):
        """
        Send the email

        @param msg_body str
        @param msg_subject str
        @param msg_recipient str
        @param smtp_client object
        """

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS
        msg['To'] = msg_recipient
        log.debug("EventProcessor.subscription_callback(): sending email to %s"\
                  %msg_recipient)

        smtp_sender = CFG.get_safe('server.smtp.sender')

        smtp_client.sendmail(smtp_sender, msg_recipient, msg.as_string())

    def update_user_info_object(self, user_id, new_notification, old_notification):
        """
        Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification

        @param user_id str
        @param new_notification NotificationRequest
        @param old_notification NotificationRequest
        """

        #------------------------------------------------------------------------------------
        # read the user
        #------------------------------------------------------------------------------------

        user = self.clients.resource_registry.read(user_id)

        if not user:
            raise BadRequest("No user with the provided user_id: %s" % user_id)

        notifications = []
        for item in user.variables:
            if item['name'] == 'notifications':
                if old_notification and old_notification in item['value']:

                    notifications = item['value']
                    # remove the old notification
                    notifications.remove(old_notification)

                # put in the new notification
                notifications.append(new_notification)

                item['value'] = notifications

                break

        #------------------------------------------------------------------------------------
        # update the resource registry
        #------------------------------------------------------------------------------------

        self.clients.resource_registry.update(user)

        return user

    def update_user_info_dictionary(self, user_id, new_notification, old_notification):

        #------------------------------------------------------------------------------------
        # Remove the old notifications
        #------------------------------------------------------------------------------------

        if old_notification in self.event_processor.user_info[user_id]['notifications']:

            # remove from notifications list
            self.event_processor.user_info[user_id]['notifications'].remove(old_notification)

            #------------------------------------------------------------------------------------
            # update the notification subscription object
            #------------------------------------------------------------------------------------

            # get the old notification_subscription
            notification_subscription = self.event_processor.user_info[user_id]['notification_subscriptions'].pop(old_notification._id)

            # update that old notification subscription
            notification_subscription._res_obj = new_notification

            # feed the updated notification subscription back into the user info dictionary
            self.event_processor.user_info[user_id]['notification_subscriptions'][old_notification._id] = notification_subscription

        #------------------------------------------------------------------------------------
        # find the already existing notifications for the user
        #------------------------------------------------------------------------------------

        notifications = self.event_processor.user_info[user_id]['notifications']
        notifications.append(new_notification)

        #------------------------------------------------------------------------------------
        # update the user info - contact information, notifications
        #------------------------------------------------------------------------------------
        user = self.clients.resource_registry.read(user_id)

        self.event_processor.user_info[user_id]['user_contact'] = user.contact
        self.event_processor.user_info[user_id]['notifications'] = notifications

        self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info)

    def get_subscriptions(self, resource_id='', include_nonactive=False):
        """
        This method is used to get the subscriptions to a data product. The method will return a list of NotificationRequest
        objects for whom the origin is set to this data product. This way all the users who were interested in listening to
        events with origin equal to this data product, will be known and all their subscriptions will be known.

        @param resource_id
        @param include_nonactive
        @return notification_requests []

        """

        search_origin = 'search "origin" is "%s" from "resources_index"' % resource_id
        ret_vals = self.discovery.parse(search_origin)
        log.debug("Returned results: %s" % ret_vals)

        notifications_all = set()
        notifications_active = set()

        for item in ret_vals:

            if item['_type'] == 'NotificationRequest':
                notif = self.clients.resource_registry.read(item['_id'])

                if include_nonactive:
                    # Add active or retired notification
                    notifications_all.add(notif)

                elif notif.temporal_bounds.end_datetime == '':
                    # Add the active notification
                    notifications_active.add(notif)

        if include_nonactive:
            return list(notifications_all)
        else:
            return list(notifications_active)


    def _notification_in_notifications(self, notification = None, notifications = None):

        for id, notif in notifications.iteritems():
            if notif.name == notification.name and \
            notif.origin == notification.origin and \
            notif.origin_type == notification.origin_type and \
            notif.event_type == notification.event_type:
                return id
        return None

    def _update_notification_in_notifications_dict(self, new_notification = None, old_notification = None, notifications = None ):

        for id, notif in notifications.iteritems():
            if notif.name == old_notification.name and\
               notif.origin == old_notification.origin and\
               notif.origin_type == old_notification.origin_type and\
               notif.event_type == old_notification.event_type:
                notifications.pop(id)
                notifications[id] = new_notification
                break
示例#59
0
class TestCoverageModelRecoveryInt(IonIntegrationTestCase):
    def setUp(self):
        # Start container
        #print 'instantiating container'
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.dpsc_cli = DataProductManagementServiceClient()
        self.rrclient = ResourceRegistryServiceClient()
        self.damsclient = DataAcquisitionManagementServiceClient()
        self.pubsubcli = PubsubManagementServiceClient()
        self.ingestclient = IngestionManagementServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.unsc = UserNotificationServiceClient()
        self.data_retriever = DataRetrieverServiceClient()

        #------------------------------------------
        # Create the environment
        #------------------------------------------

        datastore_name = CACHE_DATASTORE_NAME
        self.db = self.container.datastore_manager.get_datastore(
            datastore_name)
        self.stream_def_id = self.pubsubcli.create_stream_definition(
            name='SBE37_CDM')

        self.process_definitions = {}
        ingestion_worker_definition = ProcessDefinition(
            name='ingestion worker')
        ingestion_worker_definition.executable = {
            'module':
            'ion.processes.data.ingestion.science_granule_ingestion_worker',
            'class': 'ScienceGranuleIngestionWorker'
        }
        process_definition_id = self.process_dispatcher.create_process_definition(
            process_definition=ingestion_worker_definition)
        self.process_definitions['ingestion_worker'] = process_definition_id

        self.pids = []
        self.exchange_points = []
        self.exchange_names = []

        #------------------------------------------------------------------------------------------------
        # First launch the ingestors
        #------------------------------------------------------------------------------------------------
        self.exchange_space = 'science_granule_ingestion'
        self.exchange_point = 'science_data'
        config = DotDict()
        config.process.datastore_name = 'datasets'
        config.process.queue_name = self.exchange_space

        self.exchange_names.append(self.exchange_space)
        self.exchange_points.append(self.exchange_point)

        pid = self.process_dispatcher.schedule_process(
            self.process_definitions['ingestion_worker'], configuration=config)
        log.debug("the ingestion worker process id: %s", pid)
        self.pids.append(pid)

    def get_datastore(self, dataset_id):
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def load_data_product(self):
        dset_i = 0
        dataset_management = DatasetManagementServiceClient()
        pubsub_management = PubsubManagementServiceClient()
        data_product_management = DataProductManagementServiceClient()
        resource_registry = self.container.instance.resource_registry

        tdom, sdom = time_series_domain()
        tdom = tdom.dump()
        sdom = sdom.dump()
        dp_obj = DataProduct(name='instrument_data_product_%i' % dset_i,
                             description='ctd stream test',
                             processing_level_code='Parsed_Canonical',
                             temporal_domain=tdom,
                             spatial_domain=sdom)
        pdict_id = dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        stream_def_id = pubsub_management.create_stream_definition(
            name='parsed', parameter_dictionary_id=pdict_id)
        self.addCleanup(pubsub_management.delete_stream_definition,
                        stream_def_id)
        data_product_id = data_product_management.create_data_product(
            data_product=dp_obj, stream_definition_id=stream_def_id)
        self.addCleanup(data_product_management.delete_data_product,
                        data_product_id)
        data_product_management.activate_data_product_persistence(
            data_product_id)
        self.addCleanup(
            data_product_management.suspend_data_product_persistence,
            data_product_id)

        stream_ids, assocs = resource_registry.find_objects(
            subject=data_product_id, predicate='hasStream', id_only=True)
        stream_id = stream_ids[0]
        route = pubsub_management.read_stream_route(stream_id)

        dataset_ids, assocs = resource_registry.find_objects(
            subject=data_product_id, predicate='hasDataset', id_only=True)
        dataset_id = dataset_ids[0]

        return data_product_id, stream_id, route, stream_def_id, dataset_id

    def populate_dataset(self, dataset_id, hours):
        import time
        cov = DatasetManagementService._get_simplex_coverage(
            dataset_id=dataset_id, mode='w')
        # rcov = vcov.reference_coverage
        # cov = AbstractCoverage.load(rcov.persistence_dir, mode='a')
        dt = hours * 3600

        cov.insert_timesteps(dt)
        now = time.time()
        cov.set_parameter_values('time', np.arange(now - dt, now) + 2208988800)
        cov.set_parameter_values('temp',
                                 np.sin(np.arange(dt) * 2 * np.pi / 60))
        cov.set_parameter_values('lat', np.zeros(dt))
        cov.set_parameter_values('lon', np.zeros(dt))

        cov.close()
        gevent.sleep(1)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    @unittest.skipIf(not_have_h5stat,
                     'h5stat is not accessible in current PATH')
    @unittest.skipIf(not not_have_h5stat and not h5stat_correct_version,
                     'HDF is the incorrect version: %s' % version_str)
    def test_coverage_recovery(self):
        # Create the coverage
        dp_id, stream_id, route, stream_def_id, dataset_id = self.load_data_product(
        )
        self.populate_dataset(dataset_id, 36)
        dset = self.dataset_management.read_dataset(dataset_id)
        dprod = self.dpsc_cli.read_data_product(dp_id)
        cov = DatasetManagementService._get_simplex_coverage(dataset_id)
        cov_pth = cov.persistence_dir
        cov.close()

        num_params = len(cov.list_parameters())
        num_bricks = 8
        total = num_params + num_bricks + 1

        # Analyze the valid coverage
        dr = CoverageDoctor(cov_pth, dprod, dset)

        dr_result = dr.analyze()

        # TODO: Turn these into meaningful Asserts
        self.assertEqual(len(dr_result.get_brick_corruptions()), 0)
        self.assertEqual(len(dr_result.get_brick_size_ratios()), num_bricks)
        self.assertEqual(len(dr_result.get_corruptions()), 0)
        self.assertEqual(len(dr_result.get_master_corruption()), 0)
        self.assertEqual(len(dr_result.get_param_corruptions()), 0)
        self.assertEqual(len(dr_result.get_param_size_ratios()), num_params)
        self.assertEqual(len(dr_result.get_master_size_ratio()), 1)
        self.assertEqual(len(dr_result.get_size_ratios()), total)
        self.assertEqual(dr_result.master_status[1], 'NORMAL')

        self.assertFalse(dr_result.is_corrupt)
        self.assertEqual(dr_result.param_file_count, num_params)
        self.assertEqual(dr_result.brick_file_count, num_bricks)
        self.assertEqual(dr_result.total_file_count, total)

        # Get original values (mock)
        orig_cov = AbstractCoverage.load(cov_pth)
        time_vals_orig = orig_cov.get_time_values()
        orig_cov.close()

        # Corrupt the Master File
        fo = open(cov._persistence_layer.master_manager.file_path, "wb")
        fo.write('Junk')
        fo.close()
        # Corrupt the lon Parameter file
        fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path,
                  "wb")
        fo.write('Junk')
        fo.close()

        corrupt_res = dr.analyze(reanalyze=True)
        self.assertTrue(corrupt_res.is_corrupt)

        # Repair the metadata files
        dr.repair(reanalyze=True)

        fixed_res = dr.analyze(reanalyze=True)
        self.assertFalse(fixed_res.is_corrupt)

        fixed_cov = AbstractCoverage.load(cov_pth)
        self.assertIsInstance(fixed_cov, AbstractCoverage)

        time_vals_fixed = fixed_cov.get_time_values()
        fixed_cov.close()
        self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))