class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''

    def on_initial_bootstrap(self, process, config, **kwargs):
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
                end_time=-1, #Run FOREVER
                interval=3600*24,
                event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta])
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()
    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
                end_time='-1', #Run FOREVER
                interval=3600*self.run_interval,
                event_origin=interval_key)
예제 #4
0
 def on_initial_bootstrap(self, process, config, **kwargs):
     self.pds_client = ProcessDispatcherServiceProcessClient(
         process=process)
     self.resource_registry = ResourceRegistryServiceProcessClient(
         process=process)
     self.ingestion_worker(process, config)
     self.replay_defs(process, config)
     self.notification_worker(process, config)
     self.registration_worker(process, config)
     self.pydap_server(process, config)
예제 #5
0
class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''
    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval',
                                         24)

        interval_key = uuid4().hex  # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(
            name='qc_post_processor',
            executable={
                'module': 'ion.processes.data.transforms.qc_post_processing',
                'class': 'QCPostProcessing'
            })
        process_definition_id = self.process_dispatcher.create_process_definition(
            process_definition)

        process_id = self.process_dispatcher.create_process(
            process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id,
                                                 process_id=process_id,
                                                 configuration=config)

        timer_id = self.scheduler_service.create_interval_timer(
            start_time=str(time.time()),
            end_time='-1',  #Run FOREVER
            interval=3600 * self.run_interval,
            event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(
            restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta if p['name']])
    def on_initial_bootstrap(self, process, config, **kwargs):
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
                end_time=-1, #Run FOREVER
                interval=3600*24,
                event_origin=interval_key)
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        #preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()
    def on_restart(self, process, config, **kwargs):
        pds_client = ProcessDispatcherServiceProcessClient(process=process)

        def restart_transform(transform_id):
            transform = process.container.resource_registry.read(transform_id)
            configuration = transform.configuration
            proc_def_ids, other = process.container.resource_registry.find_objects(
                subject=transform_id,
                predicate=PRED.hasProcessDefinition,
                id_only=True)

            if len(proc_def_ids) < 1:
                log.warning(
                    'Transform did not have a correct process definition.')
                return

            pid = pds_client.schedule_process(
                process_definition_id=proc_def_ids[0],
                configuration=configuration)

            transform.process_id = pid
            process.container.resource_registry.update(transform)

        restart_flag = config.get_safe('service.transform_management.restart',
                                       False)
        if restart_flag:
            transform_ids, meta = process.container.resource_registry.find_resources(
                restype=RT.Transform, id_only=True)
            for transform_id in transform_ids:
                restart_transform(transform_id)
 def on_initial_bootstrap(self, process, config, **kwargs):
     self.pds_client = ProcessDispatcherServiceProcessClient(process=process)
     self.resource_registry = ResourceRegistryServiceProcessClient(process=process)
     self.ingestion_worker(process,config)
     self.replay_defs(process,config)
     self.notification_worker(process,config)
     self.registration_worker(process,config)
class BootstrapQCPostProcessor(BootstrapPlugin):
    """
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    """

    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get("PYCC_MODE"):
            # This environment is an ion integration test
            log.info("PYCC_MODE: skipping qc_post_processor launch")
            return
        if self.process_exists(process, "qc_post_processor"):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe("service.qc_processing.run_interval", 24)

        interval_key = uuid4().hex  # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(
            name="qc_post_processor",
            executable={"module": "ion.processes.data.transforms.qc_post_processing", "class": "QCPostProcessing"},
        )
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)

        timer_id = self.scheduler_service.create_interval_timer(
            start_time=str(time.time()),
            end_time="-1",  # Run FOREVER
            interval=3600 * self.run_interval,
            event_origin=interval_key,
        )

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p["name"] for p in meta if p["name"]])
class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''

    def on_initial_bootstrap(self, process, config, **kwargs):
        # TODO: Temporary skip while refactoring QC work for M088
        return

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
                end_time='-1', #Run FOREVER
                interval=3600*self.run_interval,
                event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta if p['name']])
    def on_initial_bootstrap(self, process, config, **kwargs):
        pds_client = ProcessDispatcherServiceProcessClient(process=process)

        ingestion_module    = config.get_safe('bootstrap.processes.ingestion.module','ion.processes.data.ingestion.science_granule_ingestion_worker')
        ingestion_class     = config.get_safe('bootstrap.processes.ingestion.class' ,'ScienceGranuleIngestionWorker')
        ingestion_datastore = config.get_safe('bootstrap.processes.ingestion.datastore_name', 'datasets')
        ingestion_queue     = config.get_safe('bootstrap.processes.ingestion.queue' , 'science_granule_ingestion')
        ingestion_workers   = config.get_safe('bootstrap.processes.ingestion.workers', 2)

        replay_module       = config.get_safe('bootstrap.processes.replay.module', 'ion.processes.data.replay.replay_process')
        replay_class        = config.get_safe('bootstrap.processes.replay.class' , 'ReplayProcess')

        process_definition = ProcessDefinition(
            name='ingestion_worker_process',
            description='Worker transform process for ingestion of datasets')
        process_definition.executable['module']= ingestion_module
        process_definition.executable['class'] = ingestion_class
        ingestion_procdef_id = pds_client.create_process_definition(process_definition=process_definition)

        #--------------------------------------------------------------------------------
        # Simulate a HA ingestion worker by creating two of them
        #--------------------------------------------------------------------------------
        config = DotDict()
        config.process.datastore_name = ingestion_datastore
        config.process.queue_name     = ingestion_queue

        for i in xrange(ingestion_workers):
            pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config)



        process_definition = ProcessDefinition(name='data_replay_process', description='Process for the replay of datasets')
        process_definition.executable['module']= replay_module
        process_definition.executable['class'] = replay_class
        pds_client.create_process_definition(process_definition=process_definition)
예제 #13
0
 def await_agent_ready(self, replay_timeout=5):
     '''
     Determines if the process has been started
     @param replay_timeout Time to wait before raising a timeout
     @retval               True if the process has been started
     '''
     if self.process:
         pd_cli = ProcessDispatcherServiceProcessClient(
             process=self.process)
     else:
         pd_cli = ProcessDispatcherServiceClient()
     process_gate = ProcessStateGate(pd_cli.read_process,
                                     self.replay_process_id,
                                     ProcessStateEnum.RUNNING)
     return process_gate. await (replay_timeout)
예제 #14
0
    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        #preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)




    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    def test_realtime_visualization(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        ## Create the Highcharts workflow definition since there is no preload for the test
        #workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        vis_params ={}
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id, visualization_parameters=simplejson.dumps(vis_params))
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        result = gevent.event.AsyncResult()

        def get_vis_messages(get_data_count=7):  #SHould be an odd number for round robbin processing by service workers


            get_cnt = 0
            while get_cnt < get_data_count:

                vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
                if (vis_data):
                    self.validate_highcharts_transform_results(vis_data)

                get_cnt += 1
                gevent.sleep(5) # simulates the polling from UI

            result.set(get_cnt)

        gevent.spawn(get_vis_messages)

        result.get(timeout=90)

        #Trying to continue to receive messages in the queue
        gevent.sleep(2.0)  # Send some messages - don't care how many


        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @patch.dict(CFG, {'user_queue_monitor_size': 25})
    @attr('CLEANUP')
    @unittest.skipIf(os.getenv('PYCC_MODE', False),'Not integrated for CEI')
    def test_realtime_visualization_cleanup(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names
            original_queue_count = len(q_names)
        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass

        ## Create the highcharts workflow definition since there is no preload for the test
        #workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #Start up a number of requests - and queues - to start accumulating messages. THe test will not clean them up
        #but instead check to see if the monitoring thread will.
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token1 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token2 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token3 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        #Get the default exchange space
        exchange = self.container.ex_manager.default_xs.exchange


        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names

            self.assertIn(exchange + "." + bad_vis_token1, q_names)
            self.assertIn(exchange + "." + bad_vis_token2, q_names)
            self.assertIn(exchange + "." + bad_vis_token3, q_names)
            self.assertIn(exchange + "." + vis_token, q_names)

        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass
예제 #16
0
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):
    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)

        #Instantiate a process to represent the test
        process = WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.pubsubclient = PubsubManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.datasetclient = DatasetManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(
            node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(
            ctd_L2_salinity_dprocdef_id, ctd_parsed_data_product_id,
            'salinity')

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(
            ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)

        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition(
        )

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(
            salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id,
            'salinity')

        stream_ids, _ = self.rrclient.find_objects(
            salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(
            salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(
            l2_salinity_all_data_process_id)

        #Validate the data from each of the messages along the way
        self.validate_messages(results)

    @attr('LOCOINT')
    @attr('SMOKE')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_transform_workflow(self):

        assertions = self.assertTrue

        log.debug("Building the workflow definition")

        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Salinity_Test_Workflow',
            description='tests a workflow of multiple transform data processes'
        )

        workflow_data_product_name = 'TEST-Workflow_Output_Product'  #Set a specific output product name

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug("Adding a transformation process definition for salinity")
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=ctd_L2_salinity_dprocdef_id,
            persist_process_output_data=False
        )  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug(
            "Adding a transformation process definition for salinity doubler")
        #-------------------------------------------------------------------------------------------------------------------------

        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=salinity_doubler_dprocdef_id,
        )
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        log.debug("Creating workflow def in the resource registry")
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id,
                                               PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        log.debug("Creating the input data product")
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        log.debug("Creating and starting the workflow")
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id,
            ctd_parsed_data_product_id,
            persist_workflow_data_product=True,
            output_data_product_name=workflow_data_product_name,
            timeout=300)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        log.debug("persisting the output product")
        #self.dataproductclient.activate_data_product_persistence(workflow_product_id)
        dataset_ids, _ = self.rrclient.find_objects(workflow_product_id,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        assertions(len(dataset_ids) == 1)
        dataset_id = dataset_ids[0]

        log.debug(
            "Verifying the output data product name matches what was specified in the workflow definition"
        )
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(
            workflow_product.name.startswith(workflow_data_product_name),
            'Nope: %s != %s' %
            (workflow_product.name, workflow_data_product_name))

        log.debug(
            "Walking the associations to find the appropriate output data streams to validate the messages"
        )

        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        log.debug("data_product_stream_ids: %s" % data_product_stream_ids)

        log.debug(
            "Starting the output stream listener to monitor to collect messages"
        )
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        log.debug("results::: %s" % results)

        log.debug("Stopping the workflow processes")
        self.workflowclient.terminate_data_process_workflow(
            workflow_id, False, timeout=250)  # Should test true at some point

        log.debug("Making sure the Workflow object was removed")
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        log.debug(
            "Validating the data from each of the messages along the way")
        self.validate_messages(results)

        log.debug(
            "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...."
            % dataset_id)
        self.validate_data_ingest_retrieve(dataset_id)

        log.debug("Cleaning up to make sure delete is correct.")
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_highcharts_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='HighCharts_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to HighCharts')

        #Add a transformation process definition
        highcharts_procdef_id = self.create_highcharts_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=highcharts_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id, timeout=60)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_highcharts_transform_results(results)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)

        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Mpl_Graphs_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to Matplotlib graphs'
        )

        #Add a transformation process definition
        mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=mpl_graphs_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id,
            ctd_parsed_data_product_id,
            persist_workflow_data_product=True,
            timeout=60)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(results)

        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids, _ = self.rrclient.find_objects(
            workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset,
            RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve_last_data_points(
            ds_ids[0], 10)

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(retrieved_granule)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Multiple_Test_Workflow',
            description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        highcharts_procdef_id = self.create_highcharts_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=highcharts_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product(
            'ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id1, timeout=60)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product(
            'ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the second workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id2, timeout=60)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2)

        #Start the first input stream process
        ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(
            ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self.start_output_stream_and_listen(
            None, data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid1
        )  # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id1, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id2, delete_data_products=False,
            timeout=60)  # Should test true at some point

        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
예제 #17
0
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)


        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(ctd_L2_salinity_dprocdef_id,ctd_parsed_data_product_id, 'salinity' )

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)


        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity' )

        stream_ids, _ = self.rrclient.find_objects(salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)


        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)


        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(l2_salinity_all_data_process_id)

        #Validate the data from each of the messages along the way
        self.validate_messages(results)


    @attr('LOCOINT')
    @attr('SMOKE')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_transform_workflow(self):

        assertions = self.assertTrue

        log.debug("Building the workflow definition")

        workflow_def_obj = IonObject(RT.WorkflowDefinition,
                                     name='Salinity_Test_Workflow',
                                     description='tests a workflow of multiple transform data processes')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity")
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=ctd_L2_salinity_dprocdef_id,
                                      persist_process_output_data=False)  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity doubler")
        #-------------------------------------------------------------------------------------------------------------------------

        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=salinity_doubler_dprocdef_id, )
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        log.debug( "Creating workflow def in the resource registry")
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        log.debug( "Creating the input data product")
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        log.debug( "Creating and starting the workflow")
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id,
                                                                                            ctd_parsed_data_product_id,
                persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        log.debug( "persisting the output product")
        #self.dataproductclient.activate_data_product_persistence(workflow_product_id)
        dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True)
        assertions(len(dataset_ids) == 1 )
        dataset_id = dataset_ids[0]

        log.debug( "Verifying the output data product name matches what was specified in the workflow definition")
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name))

        log.debug( "Walking the associations to find the appropriate output data streams to validate the messages")

        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        log.debug( "data_product_stream_ids: %s" % data_product_stream_ids)

        log.debug( "Starting the output stream listener to monitor to collect messages")
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        log.debug( "results::: %s" % results)

        log.debug( "Stopping the workflow processes")
        self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250)  # Should test true at some point

        log.debug( "Making sure the Workflow object was removed")
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        log.debug( "Validating the data from each of the messages along the way")
        self.validate_messages(results)

        log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id)
        self.validate_data_ingest_retrieve(dataset_id)

        log.debug( "Cleaning up to make sure delete is correct.")
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_google_dt_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='GoogleDT_Test_Workflow',description='Tests the workflow of converting stream data to Google DT')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(results)

        """
        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve(ds_ids[0])

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(retrieved_granule)
        """

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Mpl_Graphs_Test_Workflow',description='Tests the workflow of converting stream data to Matplotlib graphs')

        #Add a transformation process definition
        mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=mpl_graphs_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id,
            persist_workflow_data_product=True, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(results)

        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve_last_data_points(ds_ids[0], 10)

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(retrieved_granule)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )


    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Multiple_Test_Workflow',description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product('ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id1, timeout=60)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product('ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the second workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id2, timeout=60)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2 )


        #Start the first input stream process
        ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self.start_output_stream_and_listen(None, data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(ctd_sim_pid1) # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id1,delete_data_products=False, timeout=60)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id2,delete_data_products=False, timeout=60)  # Should test true at some point

        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0 )

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )

        aid_list = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aid_list) == 0 )
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)


    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
        user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @attr('SMOKE')
    def test_realtime_visualization(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        # Create the Highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        vis_params ={}
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id, visualization_parameters=simplejson.dumps(vis_params))
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        result = gevent.event.AsyncResult()

        def get_vis_messages(get_data_count=7):  #SHould be an odd number for round robbin processing by service workers


            get_cnt = 0
            while get_cnt < get_data_count:

                vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
                if (vis_data):
                    self.validate_highcharts_transform_results(vis_data)

                get_cnt += 1
                gevent.sleep(5) # simulates the polling from UI

            result.set(get_cnt)

        gevent.spawn(get_vis_messages)

        result.get(timeout=90)

        #Trying to continue to receive messages in the queue
        gevent.sleep(2.0)  # Send some messages - don't care how many


        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @patch.dict(CFG, {'user_queue_monitor_size': 25})
    @attr('CLEANUP')
    @unittest.skipIf(os.getenv('PYCC_MODE', False),'Not integrated for CEI')
    def test_realtime_visualization_cleanup(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names
            original_queue_count = len(q_names)
        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass

        # Create the highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #Start up a number of requests - and queues - to start accumulating messages. THe test will not clean them up
        #but instead check to see if the monitoring thread will.
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token1 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token2 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token3 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        #Get the default exchange space
        exchange = self.container.ex_manager.default_xs.exchange


        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names

            self.assertIn(exchange + "." + bad_vis_token1, q_names)
            self.assertIn(exchange + "." + bad_vis_token2, q_names)
            self.assertIn(exchange + "." + bad_vis_token3, q_names)
            self.assertIn(exchange + "." + vis_token, q_names)

        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass
예제 #19
0
class BootstrapProcessDispatcher(BootstrapPlugin):
    """
    Bootstrap process for process dispatcher.
    """
    def on_initial_bootstrap(self, process, config, **kwargs):
        self.pds_client = ProcessDispatcherServiceProcessClient(
            process=process)
        self.resource_registry = ResourceRegistryServiceProcessClient(
            process=process)
        self.ingestion_worker(process, config)
        self.replay_defs(process, config)
        self.notification_worker(process, config)
        self.registration_worker(process, config)
        self.pydap_server(process, config)
        self.eoi_services(process, config)

    def eoi_services(self, process, config):
        eoi_module = config.get_safe(
            'bootstrap.processes.registration.module',
            'ion.processes.data.registration.eoi_registration_process')
        eoi_class = config.get_safe('bootstrap.processes.registration.class',
                                    'EOIRegistrationProcess')

        process_definition = ProcessDefinition(
            name='eoi_server', description='Process for eoi data sources')
        process_definition.executable['module'] = eoi_module
        process_definition.executable['class'] = eoi_class

        self._create_and_launch(process_definition)

    def pydap_server(self, process, config):
        pydap_module = config.get_safe(
            'bootstrap.processes.pydap.module',
            'ion.processes.data.externalization.lightweight_pydap')
        pydap_class = config.get_safe('bootstrap.processes.pydap.class',
                                      'LightweightPyDAP')

        use_pydap = config.get_safe('bootstrap.launch_pydap', False)

        process_definition = ProcessDefinition(
            name='pydap_server',
            description='Lightweight WSGI Server for PyDAP')
        process_definition.executable['module'] = pydap_module
        process_definition.executable['class'] = pydap_class

        self._create_and_launch(process_definition, use_pydap)

    def registration_worker(self, process, config):
        res, meta = self.resource_registry.find_resources(
            name='registration_worker', restype=RT.ProcessDefinition)
        if len(res):
            return

        registration_module = config.get_safe(
            'bootstrap.processes.registration.module',
            'ion.processes.data.registration.registration_process')
        registration_class = config.get_safe(
            'bootstrap.processes.registration.class', 'RegistrationProcess')
        use_pydap = True

        process_definition = ProcessDefinition(
            name='registration_worker',
            description='For registering datasets with ERDDAP')
        process_definition.executable['module'] = registration_module
        process_definition.executable['class'] = registration_class

        self._create_and_launch(process_definition, use_pydap)

    def _create_and_launch(self, process_definition, conditional=True):
        proc_def_id = self.pds_client.create_process_definition(
            process_definition=process_definition)

        if conditional:
            process_res_id = self.pds_client.create_process(
                process_definition_id=proc_def_id)
            self.pds_client.schedule_process(process_definition_id=proc_def_id,
                                             process_id=process_res_id)

    def ingestion_worker(self, process, config):
        # ingestion
        ingestion_module = config.get_safe(
            'bootstrap.processes.ingestion.module',
            'ion.processes.data.ingestion.science_granule_ingestion_worker')
        ingestion_class = config.get_safe(
            'bootstrap.processes.ingestion.class',
            'ScienceGranuleIngestionWorker')
        ingestion_datastore = config.get_safe(
            'bootstrap.processes.ingestion.datastore_name', 'datasets')
        ingestion_queue = config.get_safe(
            'bootstrap.processes.ingestion.queue', 'science_granule_ingestion')
        ingestion_workers = config.get_safe(
            'bootstrap.processes.ingestion.workers', 1)
        #--------------------------------------------------------------------------------
        # Create ingestion workers
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(
            name='ingestion_worker_process',
            description='Worker transform process for ingestion of datasets')
        process_definition.executable['module'] = ingestion_module
        process_definition.executable['class'] = ingestion_class
        ingestion_procdef_id = self.pds_client.create_process_definition(
            process_definition=process_definition)

        #--------------------------------------------------------------------------------
        # Simulate a HA ingestion worker by creating two of them
        #--------------------------------------------------------------------------------
#        config = DotDict()
#        config.process.datastore_name = ingestion_datastore
#        config.process.queue_name     = ingestion_queue
#
#        for i in xrange(ingestion_workers):
#            self.pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config)

    def notification_worker(self, process, config):
        # user notifications
        notification_module = config.get_safe(
            'bootstrap.processes.user_notification.module',
            'ion.processes.data.transforms.notification_worker')
        notification_class = config.get_safe(
            'bootstrap.processes.user_notification.class',
            'NotificationWorker')
        notification_workers = config.get_safe(
            'bootstrap.processes.user_notification.workers', 1)

        #--------------------------------------------------------------------------------
        # Create notification workers
        #--------------------------------------------------------------------------------

        # set up the process definition
        process_definition_uns = ProcessDefinition(
            name='notification_worker_process',
            description='Worker transform process for user notifications')
        process_definition_uns.executable['module'] = notification_module
        process_definition_uns.executable['class'] = notification_class
        uns_procdef_id = self.pds_client.create_process_definition(
            process_definition=process_definition_uns)

        config = DotDict()
        config.process.type = 'simple'

        for i in xrange(notification_workers):
            config.process.name = 'notification_worker_%s' % i
            config.process.queue_name = 'notification_worker_queue'
            self.pds_client.schedule_process(
                process_definition_id=uns_procdef_id, configuration=config)

    def replay_defs(self, process, config):
        replay_module = config.get_safe(
            'bootstrap.processes.replay.module',
            'ion.processes.data.replay.replay_process')
        replay_class = config.get_safe('bootstrap.processes.replay.class',
                                       'ReplayProcess')
        #--------------------------------------------------------------------------------
        # Create replay process definition
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(
            name=DataRetrieverService.REPLAY_PROCESS,
            description='Process for the replay of datasets')
        process_definition.executable['module'] = replay_module
        process_definition.executable['class'] = replay_class
        self.pds_client.create_process_definition(
            process_definition=process_definition)

    def on_restart(self, process, config, **kwargs):
        pass
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        #preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process = VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.pubsubclient = PubsubManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.datasetclient = DatasetManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(
            node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(
            node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs

        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']

    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name=user_queue_name,
            name="user visualization queue")

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

        #        for x in range(msg_count):
        #            mo = subscriber.get_one_msg(timeout=1)
        #            print mo.body
        #            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
        # print msgs[x].body

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Should see more messages in the queue
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    def test_realtime_visualization(self):

        #        #Start up multiple vis service workers if not a CEI launch
        #        if not os.getenv('CEI_LAUNCH_TEST', False):
        #            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
        #            self.addCleanup(self.container.terminate_process, vpid1)
        #            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
        #            self.addCleanup(self.container.terminate_process, vpid2)

        ## Create the Highcharts workflow definition since there is no preload for the test
        #workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        vis_params = {}
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(
            data_product_id=ctd_parsed_data_product_id,
            visualization_parameters=simplejson.dumps(vis_params))
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        result = gevent.event.AsyncResult()

        def get_vis_messages(
            get_data_count=7
        ):  #SHould be an odd number for round robbin processing by service workers

            get_cnt = 0
            while get_cnt < get_data_count:

                vis_data = self.vis_client.get_realtime_visualization_data(
                    vis_token)
                if (vis_data):
                    self.validate_highcharts_transform_results(vis_data)

                get_cnt += 1
                gevent.sleep(5)  # simulates the polling from UI

            result.set(get_cnt)

        gevent.spawn(get_vis_messages)

        result.get(timeout=90)

        #Trying to continue to receive messages in the queue
        gevent.sleep(2.0)  # Send some messages - don't care how many

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @patch.dict(CFG, {'user_queue_monitor_size': 25})
    @attr('CLEANUP')
    @unittest.skipIf(os.getenv('PYCC_MODE', False), 'Not integrated for CEI')
    def test_realtime_visualization_cleanup(self):

        #        #Start up multiple vis service workers if not a CEI launch
        #        if not os.getenv('CEI_LAUNCH_TEST', False):
        #            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
        #            self.addCleanup(self.container.terminate_process, vpid1)
        #            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
        #            self.addCleanup(self.container.terminate_process, vpid2)

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(
                name=USER_VISUALIZATION_QUEUE,
                return_columns=['name', 'messages'])
            q_names = [q['name'] for q in queues
                       if q['name']]  #Get a list of only the queue names
            original_queue_count = len(q_names)
        except Exception, e:
            log.warn(
                'Unable to get queue information from broker management plugin: '
                + e.message)
            pass

        ## Create the highcharts workflow definition since there is no preload for the test
        #workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        #Start up a number of requests - and queues - to start accumulating messages. THe test will not clean them up
        #but instead check to see if the monitoring thread will.
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(
            data_product_id=ctd_parsed_data_product_id)
        bad_vis_token1 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(
            data_product_id=ctd_parsed_data_product_id)
        bad_vis_token2 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(
            data_product_id=ctd_parsed_data_product_id)
        bad_vis_token3 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(
            data_product_id=ctd_parsed_data_product_id)
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        #Get the default exchange space
        exchange = self.container.ex_manager.default_xs.exchange

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(
                name=USER_VISUALIZATION_QUEUE,
                return_columns=['name', 'messages'])
            q_names = [q['name'] for q in queues
                       if q['name']]  #Get a list of only the queue names

            self.assertIn(exchange + "." + bad_vis_token1, q_names)
            self.assertIn(exchange + "." + bad_vis_token2, q_names)
            self.assertIn(exchange + "." + bad_vis_token3, q_names)
            self.assertIn(exchange + "." + vis_token, q_names)

        except Exception, e:
            log.warn(
                'Unable to get queue information from broker management plugin: '
                + e.message)
            pass
class BootstrapProcessDispatcher(BootstrapPlugin):
    """
    Bootstrap process for process dispatcher.
    """

    def on_initial_bootstrap(self, process, config, **kwargs):
        self.pds_client = ProcessDispatcherServiceProcessClient(process=process)
        self.resource_registry = ResourceRegistryServiceProcessClient(process=process)
        self.ingestion_worker(process,config)
        self.replay_defs(process,config)
        self.notification_worker(process,config)
        self.registration_worker(process,config)
        self.pydap_server(process,config)

    def pydap_server(self, process, config):
        pydap_module = config.get_safe('bootstrap.processes.pydap.module', 'ion.processes.data.externalization.lightweight_pydap')
        pydap_class  = config.get_safe('bootstrap.processes.pydap.class', 'LightweightPyDAP')

        use_pydap = config.get_safe('bootstrap.launch_pydap', False)


        process_definition = ProcessDefinition(
                name = 'pydap_server',
                description = 'Lightweight WSGI Server for PyDAP')
        process_definition.executable['module'] = pydap_module
        process_definition.executable['class'] = pydap_class

        self._create_and_launch(process_definition,use_pydap)



    def registration_worker(self, process, config):
        res, meta = self.resource_registry.find_resources(name='registration_worker', restype=RT.ProcessDefinition)
        if len(res):
            return

        registration_module = config.get_safe('bootstrap.processes.registration.module', 'ion.processes.data.registration.registration_process')
        registration_class  = config.get_safe('bootstrap.processes.registration.class', 'RegistrationProcess')
        use_pydap = config.get_safe('bootstrap.use_pydap', False)


        process_definition = ProcessDefinition(
                name='registration_worker',
                description='For registering datasets with ERDDAP')
        process_definition.executable['module'] = registration_module
        process_definition.executable['class']  = registration_class


        self._create_and_launch(process_definition, use_pydap)

    def _create_and_launch(self, process_definition, conditional=True):
        proc_def_id = self.pds_client.create_process_definition(process_definition=process_definition)

        if conditional:

            process_res_id = self.pds_client.create_process(process_definition_id=proc_def_id)
            self.pds_client.schedule_process(process_definition_id=proc_def_id, process_id=process_res_id)

    def ingestion_worker(self, process, config):
        # ingestion
        ingestion_module    = config.get_safe('bootstrap.processes.ingestion.module','ion.processes.data.ingestion.science_granule_ingestion_worker')
        ingestion_class     = config.get_safe('bootstrap.processes.ingestion.class' ,'ScienceGranuleIngestionWorker')
        ingestion_datastore = config.get_safe('bootstrap.processes.ingestion.datastore_name', 'datasets')
        ingestion_queue     = config.get_safe('bootstrap.processes.ingestion.queue' , 'science_granule_ingestion')
        ingestion_workers   = config.get_safe('bootstrap.processes.ingestion.workers', 1)
        #--------------------------------------------------------------------------------
        # Create ingestion workers
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(
            name='ingestion_worker_process',
            description='Worker transform process for ingestion of datasets')
        process_definition.executable['module']= ingestion_module
        process_definition.executable['class'] = ingestion_class
        ingestion_procdef_id = self.pds_client.create_process_definition(process_definition=process_definition)

        #--------------------------------------------------------------------------------
        # Simulate a HA ingestion worker by creating two of them
        #--------------------------------------------------------------------------------
#        config = DotDict()
#        config.process.datastore_name = ingestion_datastore
#        config.process.queue_name     = ingestion_queue
#
#        for i in xrange(ingestion_workers):
#            self.pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config)


    def notification_worker(self, process, config):
        # user notifications
        notification_module    = config.get_safe('bootstrap.processes.user_notification.module','ion.processes.data.transforms.notification_worker')
        notification_class     = config.get_safe('bootstrap.processes.user_notification.class' ,'NotificationWorker')
        notification_workers = config.get_safe('bootstrap.processes.user_notification.workers', 1)

        #--------------------------------------------------------------------------------
        # Create notification workers
        #--------------------------------------------------------------------------------

        # set up the process definition
        process_definition_uns = ProcessDefinition(
            name='notification_worker_process',
            description='Worker transform process for user notifications')
        process_definition_uns.executable['module']= notification_module
        process_definition_uns.executable['class'] = notification_class
        uns_procdef_id = self.pds_client.create_process_definition(process_definition=process_definition_uns)

        config = DotDict()
        config.process.type = 'simple'

        for i in xrange(notification_workers):
            config.process.name = 'notification_worker_%s' % i
            config.process.queue_name = 'notification_worker_queue'
            self.pds_client.schedule_process(process_definition_id=uns_procdef_id, configuration=config)


    def replay_defs(self, process, config):
        replay_module       = config.get_safe('bootstrap.processes.replay.module', 'ion.processes.data.replay.replay_process')
        replay_class        = config.get_safe('bootstrap.processes.replay.class' , 'ReplayProcess')
        #--------------------------------------------------------------------------------
        # Create replay process definition
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(name=DataRetrieverService.REPLAY_PROCESS, description='Process for the replay of datasets')
        process_definition.executable['module']= replay_module
        process_definition.executable['class'] = replay_class
        self.pds_client.create_process_definition(process_definition=process_definition)

    
    def on_restart(self, process, config, **kwargs):
        pass
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process = VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.pubsubclient = PubsubManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.datasetclient = DatasetManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(
            node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(
            node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs

        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']

    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = 'user_queue'

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name=user_queue_name,
            name="user visualization queue")

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

        #        for x in range(msg_count):
        #            mo = subscriber.get_one_msg(timeout=1)
        #            print mo.body
        #            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
        # print msgs[x].body

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Should see more messages in the queue
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Viz_Test_Workflow',
            description=
            'A workflow to test collection of multiple data products in queues'
        )

        workflow_data_product_name = 'TEST-Workflow_Output_Product'  #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=ctd_L2_salinity_dprocdef_id,
            persist_process_output_data=False
        )  #Don't persist the intermediate data product
        configuration = {'stream_name': 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id,
                                               PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue

        user_queue_name1 = 'user_queue_1'
        user_queue_name2 = 'user_queue_2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription(
            stream_ids=dp_stream_id1,
            exchange_name=user_queue_name1,
            name="user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription(
            stream_ids=dp_stream_id2,
            exchange_name=user_queue_name2,
            name="user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count, _ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count, _ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body,
                                                      msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        #TODO - Need to add workflow creation for google data table
        vis_params = {}
        vis_token = self.vis_client.initiate_realtime_visualization(
            data_product_id=ctd_parsed_data_product_id,
            visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(
                ctd_sim_pid
            )  # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_google_dt_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )

        # start producing data
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_data(
            ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_google_dt_results(vis_data)

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_mpl_graphs_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_image(
            ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_mpl_graphs_results(vis_data)

        return
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = 'user_queue'

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)


    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = 'user_queue_1'
        user_queue_name2 = 'user_queue_2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #TODO - Need to add workflow creation for google data table
        vis_params ={}
        vis_token = self.vis_client.initiate_realtime_visualization(data_product_id=ctd_parsed_data_product_id, visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_google_dt_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()

        # start producing data
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_data(ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_google_dt_results(vis_data)


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_mpl_graphs_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_image(ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_mpl_graphs_results(vis_data)

        return