示例#1
0
    def on_start(self):

        rr_cli = ResourceRegistryServiceProcessClient(process=self, node=self.container.node)
        pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node)

        # Get the stream(s)
        data_product_id = self.CFG.get_safe('dispatcher.data_product_id','')

        stream_ids,_ = rr_cli.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)

        log.info('Got Stream Ids: "%s"', stream_ids)
        assert stream_ids, 'No streams found for this data product!'


        exchange_name = 'dispatcher_%s' % str(os.getpid())
        subscription_id = pubsub_cli.create_subscription(
                name='SampleSubscription', 
                exchange_name=exchange_name,
                stream_ids=stream_ids,
                description='Sample Subscription Description'
                )


        stream_defs = {}

        def message_received(message, stream_route, stream_id):
            granule = message

            stream_id = granule.stream_resource_id

            data_stream_id = granule.data_stream_id
            data_stream = granule.identifiables[data_stream_id]

            tstamp = get_datetime(data_stream.timestamp.value)

            records = granule.identifiables['record_count'].value
            

            log.info('Received a message from stream %s with time stamp %s and %d records' % (stream_id, tstamp, records))


            if stream_id not in stream_defs:
                stream_defs[stream_id] = pubsub_cli.find_stream_definition(stream_id, id_only=False).container
            stream_def = stream_defs.get(stream_id)

            sp = PointSupplementStreamParser(stream_definition=stream_def, stream_granule=granule)

            last_data = {}
            for field in sp.list_field_names():
                last_data[field] = sp.get_values(field)[-1]

            log.info('Last values in the message: %s' % str(last_data))


        subscriber = StreamSubscriber(process=self, exchange_name=exchange_name, callback=message_received)
        subscriber.start()

        pubsub_cli.activate_subscription(subscription_id)
示例#2
0
class TestGovernanceInt(IonIntegrationTestCase):
    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process = GovernanceTestProcess()

        #Load system policies after container has started all of the services
        LoadSystemPolicy.op_load_system_policies(process)

        self.rr_client = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.pol_client = PolicyManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ems_client = ExchangeManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()

        self.system_actor = self.id_client.find_actor_identity_by_name(
            name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)

        sa_header_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {
            'ion-actor-id': self.system_actor._id,
            'ion-actor-roles': sa_header_roles
        }

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_basic_policy(self):

        #Make sure that the system policies have been loaded
        policy_list, _ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(
            len(policy_list), 0,
            "The system policies have not been loaded into the Resource Registry"
        )

        #Attempt to access an operation in service which does not have specific policies set
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

        #Add a new policy to allow the the above service call.
        policy_obj = IonObject(
            RT.Policy,
            name='Exchange_Management_Test_Policy',
            definition_type="Service",
            rule=TEST_POLICY_TEXT,
            description=
            'Allow specific operations in the Exchange Management Service for anonymous user'
        )

        test_policy_id = self.pol_client.create_policy(
            policy_obj, headers=self.sa_user_header)
        self.pol_client.add_service_policy('exchange_management',
                                           test_policy_id,
                                           headers=self.sa_user_header)
        log.info('Policy created: ' + policy_obj.name)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn('Arguments not set', cm.exception.message)

        #disable the test policy to try again
        self.pol_client.disable_policy(test_policy_id,
                                       headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

        #now enable the test policy to try again
        self.pol_client.enable_policy(test_policy_id,
                                      headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn('Arguments not set', cm.exception.message)

        self.pol_client.remove_service_policy('exchange_management',
                                              test_policy_id,
                                              headers=self.sa_user_header)
        self.pol_client.delete_policy(test_policy_id,
                                      headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_org_policy(self):

        #Make sure that the system policies have been loaded
        policy_list, _ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(
            len(policy_list), 0,
            "The system policies have not been loaded into the Resource Registry"
        )

        with self.assertRaises(BadRequest) as cm:
            myorg = self.org_client.read_org()
        self.assertTrue(
            cm.exception.message == 'The org_id parameter is missing')

        user_id, valid_until, registered = self.id_client.signon(
            USER1_CERTIFICATE, True)
        log.debug("user id=" + user_id)

        user_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles}

        #Attempt to enroll a user anonymously - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id, user_id)
        self.assertIn('org_management(enroll_member) has been denied',
                      cm.exception.message)

        #Attempt to let a user enroll themselves - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,
                                          user_id,
                                          headers=user_header)
        self.assertIn('org_management(enroll_member) has been denied',
                      cm.exception.message)

        #Attept to enroll the user in the ION Root org as a manager - should not be allowed since
        #registration with the system implies membership in the ROOT Org.
        with self.assertRaises(BadRequest) as cm:
            self.org_client.enroll_member(self.ion_org._id,
                                          user_id,
                                          headers=self.sa_user_header)
        self.assertTrue(
            cm.exception.message ==
            'A request to enroll in the root ION Org is not allowed')

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id)
        self.assertIn('org_management(find_enrolled_users) has been denied',
                      cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id,
                                                        headers=user_header)
        self.assertIn('org_management(find_enrolled_users) has been denied',
                      cm.exception.message)

        users = self.org_client.find_enrolled_users(
            self.ion_org._id, headers=self.sa_user_header)
        self.assertEqual(len(users), 2)

        ## test_org_roles and policies

        roles = self.org_client.find_org_roles(self.ion_org._id)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id,
                                                   self.system_actor._id,
                                                   headers=self.sa_user_header)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id,
                                                   user_id,
                                                   headers=self.sa_user_header)
        self.assertEqual(len(roles), 1)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE])

        with self.assertRaises(NotFound) as nf:
            org2 = self.org_client.find_org(ORG2)
        self.assertIn('The Org with name Org2 does not exist',
                      nf.exception.message)

        org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
        org2_id = self.org_client.create_org(org2, headers=self.sa_user_header)

        org2 = self.org_client.find_org(ORG2)
        self.assertEqual(org2_id, org2._id)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles), 2)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE])

        operator_role = IonObject(RT.UserRole,
                                  name=INSTRUMENT_OPERATOR,
                                  label='Instrument Operator',
                                  description='Instrument Operator')

        #First try to add the user role anonymously
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.add_user_role(org2_id, operator_role)
        self.assertIn('org_management(add_user_role) has been denied',
                      cm.exception.message)

        self.org_client.add_user_role(org2_id,
                                      operator_role,
                                      headers=self.sa_user_header)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE, INSTRUMENT_OPERATOR])

        # test requests for enrollments and roles.

        #First try to find user requests anonymously
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id)
        self.assertIn('org_management(find_requests) has been denied',
                      cm.exception.message)

        #Next try to find user requests as as a basic member
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id,
                                                     headers=user_header)
        self.assertIn('org_management(find_requests) has been denied',
                      cm.exception.message)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 0)

        # First try to request a role without being a member
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_role(org2_id,
                                                  user_id,
                                                  INSTRUMENT_OPERATOR,
                                                  headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',
            cm.exception.message)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 0)

        req_id = self.org_client.request_enroll(org2_id,
                                                user_id,
                                                headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(
            user_id, org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id,
                                                    user_id,
                                                    headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: enroll_req_not_exist(org_id,user_id)',
            cm.exception.message)

        #Manager denies the request
        self.org_client.deny_request(org2_id,
                                     req_id,
                                     'To test the deny process',
                                     headers=self.sa_user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        self.assertEqual(requests[0].status, REQUEST_DENIED)

        #Manager approves request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        users = self.org_client.find_enrolled_users(
            org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users), 0)

        #User Accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        users = self.org_client.find_enrolled_users(
            org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users), 1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id,
                                                    user_id,
                                                    headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_not_enrolled(org_id,user_id)',
            cm.exception.message)

        req_id = self.org_client.request_role(org2_id,
                                              user_id,
                                              INSTRUMENT_OPERATOR,
                                              headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 2)

        requests = self.org_client.find_requests(org2_id,
                                                 request_status='Open',
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      headers=user_header)
        self.assertEqual(len(requests), 2)

        requests = self.org_client.find_user_requests(
            user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 1)

        ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent)

        self.assertEqual(len(ia_list), 0)

        ia_obj = IonObject(RT.InstrumentAgent,
                           name='Instrument Agent1',
                           description='The first Instrument Agent')

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj)
        self.assertIn(
            'instrument_management(create_instrument_agent) has been denied',
            cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj,
                                                    headers=user_header)
        self.assertIn(
            'instrument_management(create_instrument_agent) has been denied',
            cm.exception.message)

        #Manager approves request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Refresh headers with new role
        user_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles}

        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_obj = IonObject(RT.InstrumentAgent,
                           name='Instrument Agent2',
                           description='The second Instrument Agent')
        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
        self.assertEqual(len(ia_list), 2)

        #First make a acquire resource request with an non-enrolled user.
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_acquire_resource(
                org2_id,
                self.system_actor._id,
                ia_list[0]._id,
                headers=self.sa_user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',
            cm.exception.message)

        req_id = self.org_client.request_acquire_resource(org2_id,
                                                          user_id,
                                                          ia_list[0]._id,
                                                          headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 3)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      headers=user_header)
        self.assertEqual(len(requests), 3)

        requests = self.org_client.find_user_requests(
            user_id,
            org2_id,
            request_type=RT.ResourceRequest,
            headers=user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 1)

        self.assertEqual(requests[0]._id, req_id)

        #Manager approves Instrument request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 1)

        commitments, _ = self.rr_client.find_objects(user_id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 1)

        #Release the resource
        self.org_client.release_resource(
            org2_id,
            user_id,
            ia_list[0]._id,
            headers=self.sa_user_header,
            timeout=15)  #TODO - Refactor release_resource

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 0)

        commitments, _ = self.rr_client.find_objects(user_id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 0)
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)


        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(ctd_L2_salinity_dprocdef_id,ctd_parsed_data_product_id, 'salinity' )

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)


        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity' )

        stream_ids, _ = self.rrclient.find_objects(salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)


        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)


        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(l2_salinity_all_data_process_id)

        #Validate the data from each of the messages along the way
        self.validate_messages(results)


    @attr('LOCOINT')
    @attr('SMOKE')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_transform_workflow(self):

        assertions = self.assertTrue

        log.debug("Building the workflow definition")

        workflow_def_obj = IonObject(RT.WorkflowDefinition,
                                     name='Salinity_Test_Workflow',
                                     description='tests a workflow of multiple transform data processes')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity")
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=ctd_L2_salinity_dprocdef_id,
                                      persist_process_output_data=False)  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity doubler")
        #-------------------------------------------------------------------------------------------------------------------------

        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=salinity_doubler_dprocdef_id, )
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        log.debug( "Creating workflow def in the resource registry")
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        log.debug( "Creating the input data product")
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        log.debug( "Creating and starting the workflow")
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id,
                                                                                            ctd_parsed_data_product_id,
                persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        log.debug( "persisting the output product")
        #self.dataproductclient.activate_data_product_persistence(workflow_product_id)
        dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True)
        assertions(len(dataset_ids) == 1 )
        dataset_id = dataset_ids[0]

        log.debug( "Verifying the output data product name matches what was specified in the workflow definition")
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name))

        log.debug( "Walking the associations to find the appropriate output data streams to validate the messages")

        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        log.debug( "data_product_stream_ids: %s" % data_product_stream_ids)

        log.debug( "Starting the output stream listener to monitor to collect messages")
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        log.debug( "results::: %s" % results)

        log.debug( "Stopping the workflow processes")
        self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250)  # Should test true at some point

        log.debug( "Making sure the Workflow object was removed")
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        log.debug( "Validating the data from each of the messages along the way")
        self.validate_messages(results)

        log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id)
        self.validate_data_ingest_retrieve(dataset_id)

        log.debug( "Cleaning up to make sure delete is correct.")
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_google_dt_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='GoogleDT_Test_Workflow',description='Tests the workflow of converting stream data to Google DT')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(results)

        """
        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve(ds_ids[0])

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(retrieved_granule)
        """

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Mpl_Graphs_Test_Workflow',description='Tests the workflow of converting stream data to Matplotlib graphs')

        #Add a transformation process definition
        mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=mpl_graphs_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id,
            persist_workflow_data_product=True, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(results)

        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve_last_data_points(ds_ids[0], 10)

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(retrieved_granule)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )


    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Multiple_Test_Workflow',description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product('ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id1, timeout=60)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product('ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the second workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id2, timeout=60)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2 )


        #Start the first input stream process
        ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self.start_output_stream_and_listen(None, data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(ctd_sim_pid1) # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id1,delete_data_products=False, timeout=60)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id2,delete_data_products=False, timeout=60)  # Should test true at some point

        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0 )

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )

        aid_list = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aid_list) == 0 )
示例#4
0
class VizTransformMatplotlibGraphs(TransformStreamPublisher, TransformEventListener, TransformStreamListener):

    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    output_bindings = ['graph_image_param_dict']
    event_timer_interval = None


    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process = self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()

    # when tranform is used as a data process
    def recv_packet(self, packet, in_stream_route, in_stream_id):
        #Check to see if the class instance was set up as a event triggered transform. If yes, skip the packet
        if self.event_timer_interval:
            return

        log.info('Received packet')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(packet, params=self.get_stream_definition())
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

    def get_stream_definition(self):
        stream_id = self.stream_ids[0]
        stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
        return stream_def._id

    def process_event(self, msg, headers):

        return

    def interval_timer_callback(self, *args, **kwargs):
        #Find out the input data product to this process
        in_dp_id = self.CFG.get_safe('in_dp_id')

        print " >>>>>>>>>>>>>> IN DP ID from cfg : ", in_dp_id

        # get the dataset_id associated with the data_product. Need it to do the data retrieval
        ds_ids,_ = self.rrclient.find_objects(in_dp_id, PRED.hasDataset, RT.Dataset, True)
        if ds_ids is None or not ds_ids:
            return None

        # retrieve data for the specified time interval. Setup up query from pass config first
        query = {}

        param_list_str = self.CFG.get_safe('parameters')
        if param_list_str:
            query['parameters'] = param_list_str.split(', ')
            # append time if not present in list of parameters
            if not 'time' in query['parameters']:
                query['parameters'].append('time')


        query['start_time'] = query['end_time'] = ntplib.system_to_ntp_time(time.time()) # Now
        query['stride_time'] = 1
        if self.CFG.get_safe('graph_time_period'):
            query['start_time'] = query['end_time'] - self._str_to_secs(self.CFG.get_safe('graph_time_period'))

        #print " >>>>>>>>>>>>>> QUERY = ", query

        #retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],{'start_time':start_time,'end_time':end_time})
        retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0], query=query)

        # add extra parameters to query passed in config that are not needed by data retrieval
        if self.CFG.get_safe('resolution'):
            query['resolution'] = self.CFG.get_safe('resolution')

        # send the granule through the Algorithm code to get the matplotlib graphs
        mpl_pdict_id = self.dsm_client.read_parameter_dictionary_by_name('graph_image_param_dict',id_only=True)

        mpl_stream_def = self.pubsub_client.create_stream_definition('mpl', parameter_dictionary_id=mpl_pdict_id)
        fileName = self.CFG.get_safe('graph_time_period')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(retrieved_granule, config=query, params=mpl_stream_def, fileName=fileName)

        if mpl_data_granule == None:
            return None

        # publish on all specified output streams
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

        return

    def _str_to_secs(self, time_period):
        # this method converts commonly used time periods to its actual seconds counterpart
        #separate alpha and numeric parts of the time period
        time_n = time_period.lower().rstrip('abcdefghijklmnopqrstuvwxyz ')
        time_a = time_period.lower().lstrip('0123456789. ')

        # determine if user specified, secs, mins, hours, days, weeks, months, years
        factor = None
        if time_a == 'sec' or time_a == "secs" or time_a == 'second' or time_a == "seconds":
            factor = 1
        if time_a == "min" or time_a == "mins" or time_a == "minute" or time_a == "minutes":
            factor = 60
        if time_a == "hr" or time_a == "hrs" or time_a == "hour" or time_a == "hours":
            factor = 60 * 60
        if time_a == "day" or time_a == "days":
            factor = 60 * 60 * 24
        if time_a == "wk" or time_a == "wks" or time_a == "week" or time_a == "weeks":
            factor = 60 * 60 * 24 * 7
        if time_a == "mon" or time_a == "mons" or time_a == "month" or time_a == "months":
            factor = 60 * 60 * 24 * 30
        if time_a == "yr" or time_a == "yrs" or time_a == "year" or time_a == "years":
            factor = 60 * 60 * 24 * 365

        time_period_secs = float(time_n) * factor
        return time_period_secs


    def on_quit(self):

        #Cancel the timer
        if hasattr(self, 'interval_timer_id'):
            self.ssclient.cancel_timer(self.interval_timer_id)

        super(VizTransformMatplotlibGraphs,self).on_quit()
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)


    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
        user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @attr('SMOKE')
    def test_realtime_visualization(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        # Create the Highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        vis_params ={}
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id, visualization_parameters=simplejson.dumps(vis_params))
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        result = gevent.event.AsyncResult()

        def get_vis_messages(get_data_count=7):  #SHould be an odd number for round robbin processing by service workers


            get_cnt = 0
            while get_cnt < get_data_count:

                vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
                if (vis_data):
                    self.validate_highcharts_transform_results(vis_data)

                get_cnt += 1
                gevent.sleep(5) # simulates the polling from UI

            result.set(get_cnt)

        gevent.spawn(get_vis_messages)

        result.get(timeout=90)

        #Trying to continue to receive messages in the queue
        gevent.sleep(2.0)  # Send some messages - don't care how many


        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @patch.dict(CFG, {'user_queue_monitor_size': 25})
    @attr('CLEANUP')
    @unittest.skipIf(os.getenv('PYCC_MODE', False),'Not integrated for CEI')
    def test_realtime_visualization_cleanup(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names
            original_queue_count = len(q_names)
        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass

        # Create the highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #Start up a number of requests - and queues - to start accumulating messages. THe test will not clean them up
        #but instead check to see if the monitoring thread will.
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token1 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token2 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token3 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        #Get the default exchange space
        exchange = self.container.ex_manager.default_xs.exchange


        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names

            self.assertIn(exchange + "." + bad_vis_token1, q_names)
            self.assertIn(exchange + "." + bad_vis_token2, q_names)
            self.assertIn(exchange + "." + bad_vis_token3, q_names)
            self.assertIn(exchange + "." + vis_token, q_names)

        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass
class VizTransformProcForGoogleDT(TransformDataProcess):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to JSON style Google DataTables

    """
    def on_start(self):
        super(VizTransformProcForGoogleDT, self).on_start()
        self.initDataTableFlag = True

        # need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        stream_def_resource = self.rr_cli.read(self.stream_def_id)
        self.stream_def = stream_def_resource.container
        self.realtime_flag = False
        if self.CFG.get("realtime_flag") == "True":
            self.realtime_flag = True
        else:
            self.data_product_id_token = self.CFG.get('data_product_id_token')

        # extract the stream_id associated with the DP. Needed later
        stream_ids, _ = self.rr_cli.find_objects(self.data_product_id,
                                                 PRED.hasStream, None, True)
        self.stream_id = stream_ids[0]

        self.dataDescription = []
        self.dataTableContent = []
        self.varTuple = []
        self.total_num_of_records_recvd = 0

    def process(self, packet):

        log.debug('(%s): Received Viz Data Packet' % (self.name))

        element_count_id = 0
        expected_range = []

        psd = PointSupplementStreamParser(stream_definition=self.stream_def,
                                          stream_granule=packet)
        vardict = {}
        arrLen = None
        for varname in psd.list_field_names():
            vardict[varname] = psd.get_values(varname)
            arrLen = len(vardict[varname])

        #if its the first time, init the dataTable
        if self.initDataTableFlag:
            # create data description from the variables in the message
            self.dataDescription = [('time', 'datetime', 'time')]

            # split the data string to extract variable names
            for varname in psd.list_field_names():
                if varname == 'time':
                    continue

                self.dataDescription.append((varname, 'number', varname))

            self.initDataTableFlag = False

        # Add the records to the datatable
        for i in xrange(arrLen):
            varTuple = []

            for varname, _, _ in self.dataDescription:
                val = float(vardict[varname][i])
                if varname == 'time':
                    varTuple.append(datetime.fromtimestamp(val))
                else:
                    varTuple.append(val)

            # Append the tuples to the data table
            self.dataTableContent.append(varTuple)

            if self.realtime_flag:
                # Maintain a sliding window for realtime transform processes
                realtime_window_size = 100
                if len(self.dataTableContent) > realtime_window_size:
                    # always pop the first element till window size is what we want
                    while len(self.dataTableContent) > realtime_window_size:
                        self.dataTableContent.pop(0)

        if not self.realtime_flag:
            # This is the historical view part. Make a note of now many records were received
            data_stream_id = self.stream_def.data_stream_id
            element_count_id = self.stream_def.identifiables[
                data_stream_id].element_count_id
            # From each granule you can check the constraint on the number of records
            expected_range = packet.identifiables[
                element_count_id].constraint.intervals[0]

            # The number of records in a given packet is:
            self.total_num_of_records_recvd += packet.identifiables[
                element_count_id].value

        # submit the Json version of the datatable to the viz service
        if self.realtime_flag:
            # create the google viz data table
            data_table = gviz_api.DataTable(self.dataDescription)
            data_table.LoadData(self.dataTableContent)

            # submit resulting table back using the out stream publisher
            msg = {
                "viz_product_type": "google_realtime_dt",
                "data_product_id": self.data_product_id,
                "data_table": data_table.ToJSonResponse()
            }
            self.out_stream_pub.publish(msg)
        else:
            # Submit table back to the service if we received all the replay data
            if self.total_num_of_records_recvd == (expected_range[1] + 1):
                # If the datatable received was too big, decimate on the fly to a fixed size
                max_google_dt_len = 1024
                if len(self.dataTableContent) > max_google_dt_len:
                    decimation_factor = int(
                        math.ceil(
                            len(self.dataTableContent) / (max_google_dt_len)))

                    tempDataTableContent = []
                    for i in xrange(0, len(self.dataTableContent),
                                    decimation_factor):
                        # check limits
                        if i >= len(self.dataTableContent):
                            break

                        tempDataTableContent.append(self.dataTableContent[i])

                    self.dataTableContent = tempDataTableContent

                data_table = gviz_api.DataTable(self.dataDescription)
                data_table.LoadData(self.dataTableContent)

                # submit resulting table back using the out stream publisher
                msg = {
                    "viz_product_type": "google_dt",
                    "data_product_id_token": self.data_product_id_token,
                    "data_table": data_table.ToJSonResponse()
                }
                self.out_stream_pub.publish(msg)
                return

        # clear the tuple for future use
        self.varTuple[:] = []
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = 'user_queue'

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)


    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = 'user_queue_1'
        user_queue_name2 = 'user_queue_2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #TODO - Need to add workflow creation for google data table
        vis_params ={}
        vis_token = self.vis_client.initiate_realtime_visualization(data_product_id=ctd_parsed_data_product_id, visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_google_dt_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()

        # start producing data
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_data(ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_google_dt_results(vis_data)


    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_mpl_graphs_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_image(ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_mpl_graphs_results(vis_data)

        return
示例#8
0
class TestGovernanceInt(IonIntegrationTestCase):



    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=GovernanceTestProcess()


        #Load system policies after container has started all of the services
        LoadSystemPolicy.op_load_system_policies(process)

        self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(node=self.container.node, process=process)

        self.pol_client = PolicyManagementServiceProcessClient(node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)

        self.ems_client = ExchangeManagementServiceProcessClient(node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()


        self.system_actor = self.id_client.find_actor_identity_by_name(name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)


        sa_header_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles }

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_basic_policy(self):

        #Make sure that the system policies have been loaded
        policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")


        #Attempt to access an operation in service which does not have specific policies set
        es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)

        #Add a new policy to allow the the above service call.
        policy_obj = IonObject(RT.Policy, name='Exchange_Management_Test_Policy', definition_type="Service", rule=TEST_POLICY_TEXT,
            description='Allow specific operations in the Exchange Management Service for anonymous user')

        test_policy_id = self.pol_client.create_policy(policy_obj, headers=self.sa_user_header)
        self.pol_client.add_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header)
        log.info('Policy created: ' + policy_obj.name)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn( 'Arguments not set',cm.exception.message)

        #disable the test policy to try again
        self.pol_client.disable_policy(test_policy_id, headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)

        #now enable the test policy to try again
        self.pol_client.enable_policy(test_policy_id, headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn( 'Arguments not set',cm.exception.message)

        self.pol_client.remove_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header)
        self.pol_client.delete_policy(test_policy_id, headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' )
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message)



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_org_policy(self):

        #Make sure that the system policies have been loaded
        policy_list,_ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry")


        with self.assertRaises(BadRequest) as cm:
            myorg = self.org_client.read_org()
        self.assertTrue(cm.exception.message == 'The org_id parameter is missing')


        user_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True)
        log.debug( "user id=" + user_id)

        user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }

        #Attempt to enroll a user anonymously - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id)
        self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)

        #Attempt to let a user enroll themselves - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id, headers=user_header)
        self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)

        #Attept to enroll the user in the ION Root org as a manager - should not be allowed since
        #registration with the system implies membership in the ROOT Org.
        with self.assertRaises(BadRequest) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id, headers=self.sa_user_header)
        self.assertTrue(cm.exception.message == 'A request to enroll in the root ION Org is not allowed')

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id)
        self.assertIn('org_management(find_enrolled_users) has been denied',cm.exception.message)


        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id, headers=user_header)
        self.assertIn( 'org_management(find_enrolled_users) has been denied',cm.exception.message)

        users = self.org_client.find_enrolled_users(self.ion_org._id, headers=self.sa_user_header)
        self.assertEqual(len(users),2)


        ## test_org_roles and policies

        roles = self.org_client.find_org_roles(self.ion_org._id)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id, self.system_actor._id, headers=self.sa_user_header)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id, user_id, headers=self.sa_user_header)
        self.assertEqual(len(roles),1)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE])


        with self.assertRaises(NotFound) as nf:
            org2 = self.org_client.find_org(ORG2)
        self.assertIn('The Org with name Org2 does not exist',nf.exception.message)

        org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
        org2_id = self.org_client.create_org(org2, headers=self.sa_user_header)

        org2 = self.org_client.find_org(ORG2)
        self.assertEqual(org2_id, org2._id)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles),2)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE])


        operator_role = IonObject(RT.UserRole, name=INSTRUMENT_OPERATOR,label='Instrument Operator', description='Instrument Operator')

        #First try to add the user role anonymously
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.add_user_role(org2_id, operator_role)
        self.assertIn('org_management(add_user_role) has been denied',cm.exception.message)

        self.org_client.add_user_role(org2_id, operator_role, headers=self.sa_user_header)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE,  INSTRUMENT_OPERATOR])


        # test requests for enrollments and roles.

        #First try to find user requests anonymously
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id)
        self.assertIn('org_management(find_requests) has been denied',cm.exception.message)


        #Next try to find user requests as as a basic member
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id, headers=user_header)
        self.assertIn('org_management(find_requests) has been denied',cm.exception.message)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),0)

        # First try to request a role without being a member
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )
        self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',cm.exception.message)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),0)

        req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header )

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header )
        self.assertIn('A precondition for this request has not been satisfied: enroll_req_not_exist(org_id,user_id)',cm.exception.message)

        #Manager denies the request
        self.org_client.deny_request(org2_id,req_id,'To test the deny process', headers=self.sa_user_header)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        self.assertEqual(requests[0].status, REQUEST_DENIED)

        #Manager approves request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users),0)

        #User Accepts request
        self.org_client.accept_request(org2_id,req_id,  headers=user_header)

        users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users),1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header )
        self.assertIn('A precondition for this request has not been satisfied: is_not_enrolled(org_id,user_id)',cm.exception.message)


        req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),2)

        requests = self.org_client.find_requests(org2_id,request_status='Open', headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
        self.assertEqual(len(requests),2)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),1)

        ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)

        self.assertEqual(len(ia_list),0)

        ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj)
        self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
        self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)

        #Manager approves request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Refresh headers with new role
        user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }

        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent')
        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
        self.assertEqual(len(ia_list),2)

        #First make a acquire resource request with an non-enrolled user.
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_acquire_resource(org2_id,self.system_actor._id,ia_list[0]._id , headers=self.sa_user_header)
        self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',cm.exception.message)

        req_id = self.org_client.request_acquire_resource(org2_id,user_id,ia_list[0]._id , headers=user_header)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),3)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
        self.assertEqual(len(requests),3)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),1)

        self.assertEqual(requests[0]._id, req_id)

        #Manager approves Instrument request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),0)

        #User accepts request
        self.org_client.accept_request(org2_id,req_id, headers=user_header)

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),1)

        commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),1)

        #Release the resource
        self.org_client.release_resource(org2_id,user_id ,ia_list[0]._id, headers=self.sa_user_header,timeout=15)  #TODO - Refactor release_resource

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),0)

        commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),0)
示例#9
0
class TestGovernanceInt(IonIntegrationTestCase):



    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file that also loads basic policy.
        self.container.start_rel_from_url('res/deploy/r2gov.yml')

        process=FakeProcess()

        self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()


        self.system_actor = self.id_client.find_actor_identity_by_name(name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)


        sa_header_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles }



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    @unittest.skip("Not working on buildbot for some reason but works on Mac")
    def test_org_policy(self):

        with self.assertRaises(BadRequest) as cm:
            myorg = self.org_client.read_org()
        self.assertTrue(cm.exception.message == 'The org_id parameter is missing')


        user_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True)
        log.debug( "user id=" + user_id)

        user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }

        #Attempt to enroll a user anonymously - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id)
        self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)

        #Attempt to let a user enroll themselves - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id, headers=user_header)
        self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message)

        #Attept to enroll the user in the ION Root org as a manager - should not be allowed since
        #registration with the system implies membership in the ROOT Org.
        with self.assertRaises(BadRequest) as cm:
            self.org_client.enroll_member(self.ion_org._id,user_id, headers=self.sa_user_header)
        self.assertTrue(cm.exception.message == 'A request to enroll in the root ION Org is not allowed')

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id)
        self.assertIn('org_management(find_enrolled_users) has been denied',cm.exception.message)


        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id, headers=user_header)
        self.assertIn( 'org_management(find_enrolled_users) has been denied',cm.exception.message)

        users = self.org_client.find_enrolled_users(self.ion_org._id, headers=self.sa_user_header)
        self.assertEqual(len(users),2)


        ## test_org_roles and policies

        roles = self.org_client.find_org_roles(self.ion_org._id)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id, self.system_actor._id, headers=self.sa_user_header)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id, user_id, headers=self.sa_user_header)
        self.assertEqual(len(roles),1)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE])


        with self.assertRaises(NotFound) as nf:
            org2 = self.org_client.find_org(ORG2)
        self.assertIn('The Org with name Org2 does not exist',nf.exception.message)

        org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
        org2_id = self.org_client.create_org(org2, headers=self.sa_user_header)

        org2 = self.org_client.find_org(ORG2)
        self.assertEqual(org2_id, org2._id)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles),2)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE])


        operator_role = IonObject(RT.UserRole, name=INSTRUMENT_OPERATOR,label='Instrument Operator', description='Instrument Operator')

        #First try to add the user role anonymously
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.add_user_role(org2_id, operator_role)
        self.assertIn('org_management(add_user_role) has been denied',cm.exception.message)

        self.org_client.add_user_role(org2_id, operator_role, headers=self.sa_user_header)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles),3)
        self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE,  INSTRUMENT_OPERATOR])


        # test requests for enrollments and roles.

        #First try to find user requests anonymously
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id)
        self.assertIn('org_management(find_requests) has been denied',cm.exception.message)


        #Next try to find user requests as as a basic member
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id, headers=user_header)
        self.assertIn('org_management(find_requests) has been denied',cm.exception.message)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),0)

        # First try to request a role without being a member
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )
        self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id) == True',cm.exception.message)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),0)

        req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header )

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header )
        self.assertIn('A precondition for this request has not been satisfied: enroll_req_exists(org_id,user_id) == False',cm.exception.message)

        #Manager denies the request
        self.org_client.deny_request(org2_id,req_id,'To test the deny process', headers=self.sa_user_header)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        self.assertEqual(requests[0].status, REQUEST_DENIED)

        #Manager approves request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users),0)

        #User Accepts request
        self.org_client.accept_request(org2_id,req_id,  headers=user_header)

        users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users),1)

        req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header )

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),2)

        requests = self.org_client.find_requests(org2_id,request_status='Open', headers=self.sa_user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
        self.assertEqual(len(requests),2)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),1)

        ia_list = self.ims_client.find_instrument_agents()
        self.assertEqual(len(ia_list),0)

        ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent')

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj)
        self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj, headers=user_header)
        self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message)

        #Manager approves request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Refresh headers with new role
        user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles }

        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent')
        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_list = self.ims_client.find_instrument_agents()
        self.assertEqual(len(ia_list),2)

        #First make a acquire resource request with an non-enrolled user.
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_acquire_resource(org2_id,self.system_actor._id,ia_list[0]._id , headers=self.sa_user_header)
        self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id) == True',cm.exception.message)

        req_id = self.org_client.request_acquire_resource(org2_id,user_id,ia_list[0]._id , headers=user_header)

        requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests),3)

        requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header)
        self.assertEqual(len(requests),3)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header)
        self.assertEqual(len(requests),1)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),1)

        self.assertEqual(requests[0]._id, req_id)

        #Manager approves Instrument request
        self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header)
        self.assertEqual(len(requests),0)

        #User accepts request
        self.org_client.accept_request(org2_id,req_id, headers=user_header)

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),1)

        commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),1)

        #Release the resource
        self.org_client.release_resource(org2_id,user_id ,ia_list[0]._id, headers=self.sa_user_header)

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),0)

        commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment)
        self.assertEqual(len(commitments),0)
示例#10
0
    requests = org_client.find_user_requests(user._id,
                                             org2_id,
                                             headers={
                                                 'ion-actor-id':
                                                 user._id,
                                                 'ion-actor-roles':
                                                 user_header_roles
                                             })
    log.info("User Requests count: %d" % len(requests))
    for r in requests:
        log.info('User Request: ' + str(r))

    if req_id is not None:
        commitments, _ = rr_client.find_objects(ia_list[0]._id,
                                                PRED.hasCommitment,
                                                RT.ResourceCommitment)
        log.info("Resource Commitments: %d" % len(commitments))

        for c in commitments:
            log.info('Commitments: ' + str(c))

        log.info("Release resource commitment")
        org_client.release_resource(org2_id,
                                    user._id,
                                    ia_list[0]._id,
                                    headers={
                                        'ion-actor-id': system_actor._id,
                                        'ion-actor-roles': sa_header_roles
                                    })
示例#11
0
    for r in requests:
        log.info('User Request: ' +str(r))

    log.info("User accepts request")
    if req_id is not None:
        org_client.accept_request(org2_id,req_id, headers={'ion-actor-id': user._id, 'ion-actor-roles': user_header_roles })

    requests = org_client.find_user_requests(user._id, org2_id, headers={'ion-actor-id': user._id, 'ion-actor-roles': user_header_roles })
    log.info("User Requests count: %d" % len(requests))
    for r in requests:
        log.info('User Request: ' +str(r))



    if req_id is not None:
        commitments, _ = rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        log.info("Resource Commitments: %d" % len(commitments))

        for c in commitments:
            log.info('Commitments: ' +str(c))

        log.info("Release resource commitment")
        org_client.release_resource(org2_id,user._id,ia_list[0]._id, headers={'ion-actor-id': system_actor._id, 'ion-actor-roles': sa_header_roles })


        commitments, _ = rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment)
        log.info("Resource Commitments: %d" % len(commitments))
        for c in commitments:
            log.info('Commitments: ' +str(c))

示例#12
0
class VizTransformMatplotlibGraphs(TransformStreamPublisher,
                                   TransformEventListener,
                                   TransformStreamListener):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    output_bindings = ['graph_image_param_dict']
    event_timer_interval = None

    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(
            process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(
            process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process=self)

        self.stream_info = self.CFG.get_safe('process.publish_streams', {})
        self.stream_names = self.stream_info.keys()
        self.stream_ids = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods = self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent",
                                  callback=self.interval_timer_callback,
                                  origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(
                start_time="now",
                interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin,
                event_subtype="")

        super(VizTransformMatplotlibGraphs, self).on_start()

    # when tranform is used as a data process
    def recv_packet(self, packet, in_stream_route, in_stream_id):
        #Check to see if the class instance was set up as a event triggered transform. If yes, skip the packet
        if self.event_timer_interval:
            return

        log.info('Received packet')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(
            packet, params=self.get_stream_definition())
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

    def get_stream_definition(self):
        stream_id = self.stream_ids[0]
        stream_def = self.pubsub_management.read_stream_definition(
            stream_id=stream_id)
        return stream_def._id

    def process_event(self, msg, headers):

        return

    def interval_timer_callback(self, *args, **kwargs):
        #Find out the input data product to this process
        in_dp_id = self.CFG.get_safe('in_dp_id')

        print " >>>>>>>>>>>>>> IN DP ID from cfg : ", in_dp_id

        # get the dataset_id associated with the data_product. Need it to do the data retrieval
        ds_ids, _ = self.rrclient.find_objects(in_dp_id, PRED.hasDataset,
                                               RT.Dataset, True)
        if ds_ids is None or not ds_ids:
            return None

        # retrieve data for the specified time interval. Setup up query from pass config first
        query = {}

        param_list_str = self.CFG.get_safe('parameters')
        if param_list_str:
            query['parameters'] = param_list_str.split(', ')
            # append time if not present in list of parameters
            if not 'time' in query['parameters']:
                query['parameters'].append('time')

        query['start_time'] = query['end_time'] = ntplib.system_to_ntp_time(
            time.time())  # Now
        query['stride_time'] = 1
        if self.CFG.get_safe('graph_time_period'):
            query['start_time'] = query['end_time'] - self._str_to_secs(
                self.CFG.get_safe('graph_time_period'))

        #print " >>>>>>>>>>>>>> QUERY = ", query

        #retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],{'start_time':start_time,'end_time':end_time})
        retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],
                                                                query=query)

        # add extra parameters to query passed in config that are not needed by data retrieval
        if self.CFG.get_safe('resolution'):
            query['resolution'] = self.CFG.get_safe('resolution')

        # send the granule through the Algorithm code to get the matplotlib graphs
        mpl_pdict_id = self.dsm_client.read_parameter_dictionary_by_name(
            'graph_image_param_dict', id_only=True)

        mpl_stream_def = self.pubsub_client.create_stream_definition(
            'mpl', parameter_dictionary_id=mpl_pdict_id)
        fileName = self.CFG.get_safe('graph_time_period')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(
            retrieved_granule,
            config=query,
            params=mpl_stream_def,
            fileName=fileName)

        if mpl_data_granule == None:
            return None

        # publish on all specified output streams
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

        return

    def _str_to_secs(self, time_period):
        # this method converts commonly used time periods to its actual seconds counterpart
        #separate alpha and numeric parts of the time period
        time_n = time_period.lower().rstrip('abcdefghijklmnopqrstuvwxyz ')
        time_a = time_period.lower().lstrip('0123456789. ')

        # determine if user specified, secs, mins, hours, days, weeks, months, years
        factor = None
        if time_a == 'sec' or time_a == "secs" or time_a == 'second' or time_a == "seconds":
            factor = 1
        if time_a == "min" or time_a == "mins" or time_a == "minute" or time_a == "minutes":
            factor = 60
        if time_a == "hr" or time_a == "hrs" or time_a == "hour" or time_a == "hours":
            factor = 60 * 60
        if time_a == "day" or time_a == "days":
            factor = 60 * 60 * 24
        if time_a == "wk" or time_a == "wks" or time_a == "week" or time_a == "weeks":
            factor = 60 * 60 * 24 * 7
        if time_a == "mon" or time_a == "mons" or time_a == "month" or time_a == "months":
            factor = 60 * 60 * 24 * 30
        if time_a == "yr" or time_a == "yrs" or time_a == "year" or time_a == "years":
            factor = 60 * 60 * 24 * 365

        time_period_secs = float(time_n) * factor
        return time_period_secs

    def on_quit(self):

        #Cancel the timer
        if hasattr(self, 'interval_timer_id'):
            self.ssclient.cancel_timer(self.interval_timer_id)

        super(VizTransformMatplotlibGraphs, self).on_quit()
示例#13
0
class VizTransformProcForGoogleDT(TransformDataProcess):

    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to JSON style Google DataTables

    """
    def on_start(self):
        super(VizTransformProcForGoogleDT,self).on_start()
        self.initDataTableFlag = True

        # need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)

        # extract the various parameters passed
        self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        stream_def_resource = self.rr_cli.read(self.stream_def_id)
        self.stream_def = stream_def_resource.container
        self.realtime_flag = False
        if self.CFG.get("realtime_flag") == "True":
            self.realtime_flag = True
        else:
            self.data_product_id_token = self.CFG.get('data_product_id_token')


        # extract the stream_id associated with the DP. Needed later
        stream_ids,_ = self.rr_cli.find_objects(self.data_product_id, PRED.hasStream, None, True)
        self.stream_id = stream_ids[0]

        self.dataDescription = []
        self.dataTableContent = []
        self.varTuple = []
        self.total_num_of_records_recvd = 0


    def process(self, packet):

        log.debug('(%s): Received Viz Data Packet' % (self.name) )

        element_count_id = 0
        expected_range = []

        psd = PointSupplementStreamParser(stream_definition=self.stream_def, stream_granule=packet)
        vardict = {}
        arrLen = None
        for varname in psd.list_field_names():
            vardict[varname] = psd.get_values(varname)
            arrLen = len(vardict[varname])


        #if its the first time, init the dataTable
        if self.initDataTableFlag:
            # create data description from the variables in the message
            self.dataDescription = [('time', 'datetime', 'time')]

            # split the data string to extract variable names
            for varname in psd.list_field_names():
                if varname == 'time':
                    continue

                self.dataDescription.append((varname, 'number', varname))

            self.initDataTableFlag = False


        # Add the records to the datatable
        for i in xrange(arrLen):
            varTuple = []

            for varname,_,_ in self.dataDescription:
                val = float(vardict[varname][i])
                if varname == 'time':
                    varTuple.append(datetime.fromtimestamp(val))
                else:
                    varTuple.append(val)

            # Append the tuples to the data table
            self.dataTableContent.append (varTuple)

            if self.realtime_flag:
                # Maintain a sliding window for realtime transform processes
                realtime_window_size = 100
                if len(self.dataTableContent) > realtime_window_size:
                    # always pop the first element till window size is what we want
                    while len(self.dataTableContent) > realtime_window_size:
                        self.dataTableContent.pop(0)


        if not self.realtime_flag:
            # This is the historical view part. Make a note of now many records were received
            data_stream_id = self.stream_def.data_stream_id
            element_count_id = self.stream_def.identifiables[data_stream_id].element_count_id
            # From each granule you can check the constraint on the number of records
            expected_range = packet.identifiables[element_count_id].constraint.intervals[0]

            # The number of records in a given packet is:
            self.total_num_of_records_recvd += packet.identifiables[element_count_id].value


        # submit the Json version of the datatable to the viz service
        if self.realtime_flag:
        # create the google viz data table
            data_table = gviz_api.DataTable(self.dataDescription)
            data_table.LoadData(self.dataTableContent)

            # submit resulting table back using the out stream publisher
            msg = {"viz_product_type": "google_realtime_dt",
                   "data_product_id": self.data_product_id,
                   "data_table": data_table.ToJSonResponse() }
            self.out_stream_pub.publish(msg)
        else:
            # Submit table back to the service if we received all the replay data
            if self.total_num_of_records_recvd == (expected_range[1] + 1):
                # If the datatable received was too big, decimate on the fly to a fixed size
                max_google_dt_len = 1024
                if len(self.dataTableContent) > max_google_dt_len:
                    decimation_factor = int(math.ceil(len(self.dataTableContent) / (max_google_dt_len)))

                    for i in xrange(len(self.dataTableContent) - 1, 0, -1):

                        if(i % decimation_factor == 0):
                            continue
                        self.dataTableContent.pop(i)

                data_table = gviz_api.DataTable(self.dataDescription)
                data_table.LoadData(self.dataTableContent)

                # submit resulting table back using the out stream publisher
                msg = {"viz_product_type": "google_dt",
                       "data_product_id_token": self.data_product_id_token,
                       "data_table": data_table.ToJSonResponse() }
                self.out_stream_pub.publish(msg)
                return


        # clear the tuple for future use
        self.varTuple[:] = []
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process = VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.pubsubclient = PubsubManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.datasetclient = DatasetManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(
            node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(
            node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs

        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']

    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = 'user_queue'

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name=user_queue_name,
            name="user visualization queue")

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

        #        for x in range(msg_count):
        #            mo = subscriber.get_one_msg(timeout=1)
        #            print mo.body
        #            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
        # print msgs[x].body

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        #Should see more messages in the queue
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count, _ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Viz_Test_Workflow',
            description=
            'A workflow to test collection of multiple data products in queues'
        )

        workflow_data_product_name = 'TEST-Workflow_Output_Product'  #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=ctd_L2_salinity_dprocdef_id,
            persist_process_output_data=False
        )  #Don't persist the intermediate data product
        configuration = {'stream_name': 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id,
                                               PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue

        user_queue_name1 = 'user_queue_1'
        user_queue_name2 = 'user_queue_2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription(
            stream_ids=dp_stream_id1,
            exchange_name=user_queue_name1,
            name="user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription(
            stream_ids=dp_stream_id2,
            exchange_name=user_queue_name2,
            name="user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(
            subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count, _ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count, _ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body,
                                                      msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        #TODO - Need to add workflow creation for google data table
        vis_params = {}
        vis_token = self.vis_client.initiate_realtime_visualization(
            data_product_id=ctd_parsed_data_product_id,
            visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(
                ctd_sim_pid
            )  # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_google_dt_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )

        # start producing data
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_data(
            ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_google_dt_results(vis_data)

    #@unittest.skip('Skipped because of broken record dictionary work-around')
    def test_mpl_graphs_overview_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        # Generate some data for a few seconds
        gevent.sleep(5.0)

        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(
            ctd_sim_pid
        )  # kill the ctd simulator process - that is enough data

        # Use the data product to test the data retrieval and google dt generation capability of the vis service
        vis_data = self.vis_client.get_visualization_image(
            ctd_parsed_data_product_id)

        # validate the returned data
        self.validate_vis_service_mpl_graphs_results(vis_data)

        return
示例#15
0
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):
    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)

        #Instantiate a process to represent the test
        process = WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.pubsubclient = PubsubManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.datasetclient = DatasetManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(
            node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(
            node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(
            ctd_L2_salinity_dprocdef_id, ctd_parsed_data_product_id,
            'salinity')

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(
            ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)

        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition(
        )

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(
            salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id,
            'salinity')

        stream_ids, _ = self.rrclient.find_objects(
            salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0)
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(
            salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(
            l2_salinity_all_data_process_id)

        #Validate the data from each of the messages along the way
        self.validate_messages(results)

    @attr('LOCOINT')
    @attr('SMOKE')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_transform_workflow(self):

        assertions = self.assertTrue

        log.debug("Building the workflow definition")

        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Salinity_Test_Workflow',
            description='tests a workflow of multiple transform data processes'
        )

        workflow_data_product_name = 'TEST-Workflow_Output_Product'  #Set a specific output product name

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug("Adding a transformation process definition for salinity")
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=ctd_L2_salinity_dprocdef_id,
            persist_process_output_data=False
        )  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug(
            "Adding a transformation process definition for salinity doubler")
        #-------------------------------------------------------------------------------------------------------------------------

        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=salinity_doubler_dprocdef_id,
        )
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        log.debug("Creating workflow def in the resource registry")
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id,
                                               PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        log.debug("Creating the input data product")
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        log.debug("Creating and starting the workflow")
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id,
            ctd_parsed_data_product_id,
            persist_workflow_data_product=True,
            output_data_product_name=workflow_data_product_name,
            timeout=300)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        log.debug("persisting the output product")
        #self.dataproductclient.activate_data_product_persistence(workflow_product_id)
        dataset_ids, _ = self.rrclient.find_objects(workflow_product_id,
                                                    PRED.hasDataset,
                                                    RT.Dataset, True)
        assertions(len(dataset_ids) == 1)
        dataset_id = dataset_ids[0]

        log.debug(
            "Verifying the output data product name matches what was specified in the workflow definition"
        )
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(
            workflow_product.name.startswith(workflow_data_product_name),
            'Nope: %s != %s' %
            (workflow_product.name, workflow_data_product_name))

        log.debug(
            "Walking the associations to find the appropriate output data streams to validate the messages"
        )

        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        log.debug("data_product_stream_ids: %s" % data_product_stream_ids)

        log.debug(
            "Starting the output stream listener to monitor to collect messages"
        )
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        log.debug("results::: %s" % results)

        log.debug("Stopping the workflow processes")
        self.workflowclient.terminate_data_process_workflow(
            workflow_id, False, timeout=250)  # Should test true at some point

        log.debug("Making sure the Workflow object was removed")
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        log.debug(
            "Validating the data from each of the messages along the way")
        self.validate_messages(results)

        log.debug(
            "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...."
            % dataset_id)
        self.validate_data_ingest_retrieve(dataset_id)

        log.debug("Cleaning up to make sure delete is correct.")
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_highcharts_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='HighCharts_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to HighCharts')

        #Add a transformation process definition
        highcharts_procdef_id = self.create_highcharts_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=highcharts_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id, timeout=60)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_highcharts_transform_results(results)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)

        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Mpl_Graphs_Test_Workflow',
            description=
            'Tests the workflow of converting stream data to Matplotlib graphs'
        )

        #Add a transformation process definition
        mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=mpl_graphs_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(
            workflow_def_id,
            ctd_parsed_data_product_id,
            persist_workflow_data_product=True,
            timeout=60)

        workflow_output_ids, _ = self.rrclient.find_subjects(
            RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids, _ = self.rrclient.find_objects(
            workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1)

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream,
                                                       None, True)
            assertions(len(stream_ids) == 1)
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id,
                                                      data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(results)

        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids, _ = self.rrclient.find_objects(
            workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset,
            RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve_last_data_points(
            ds_ids[0], 10)

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(retrieved_granule)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """
        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )
        """

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(
            RT.WorkflowDefinition,
            name='Multiple_Test_Workflow',
            description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        highcharts_procdef_id = self.create_highcharts_data_process_definition(
        )
        workflow_step_obj = IonObject(
            'DataProcessWorkflowStep',
            data_process_definition_id=highcharts_procdef_id,
            persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(
            workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product(
            'ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id1, timeout=60)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product(
            'ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the second workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(
            workflow_def_id, ctd_parsed_data_product_id2, timeout=60)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2)

        #Start the first input stream process
        ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(
            ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self.start_output_stream_and_listen(
            None, data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(
            ctd_sim_pid1
        )  # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id1, delete_data_products=False,
            timeout=60)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(
            workflow_id=workflow_id2, delete_data_products=False,
            timeout=60)  # Should test true at some point

        workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
        """