def delete_indexes(self): ims_cli = IndexManagementServiceProcessClient(process=self) rr_cli = ResourceRegistryServiceProcessClient(process=self) #-------------------------------------------------------------------------------- # Standard Indexes #-------------------------------------------------------------------------------- for index,resources in STD_INDEXES.iteritems(): index_ids, _ = rr_cli.find_resources(name=index,restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) #-------------------------------------------------------------------------------- # CouchDB Indexes #-------------------------------------------------------------------------------- for index,datastore in COUCHDB_INDEXES.iteritems(): index_ids, _ = rr_cli.find_resources(name=index, restype=RT.CouchDBIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) #-------------------------------------------------------------------------------- # Edge Indexes #-------------------------------------------------------------------------------- index_ids, _ = rr_cli.find_resources(name='%s_resources_index' % self.sysname,restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) index_ids, _ = rr_cli.find_resources(name='%s_events_index' % self.sysname, restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id)
def delete_indexes(self): ims_cli = IndexManagementServiceProcessClient(process=self) rr_cli = ResourceRegistryServiceProcessClient(process=self) #-------------------------------------------------------------------------------- # Standard Indexes #-------------------------------------------------------------------------------- for index, resources in STD_INDEXES.iteritems(): index_ids, _ = rr_cli.find_resources(name=index, restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) #-------------------------------------------------------------------------------- # CouchDB Indexes #-------------------------------------------------------------------------------- for index, datastore in COUCHDB_INDEXES.iteritems(): index_ids, _ = rr_cli.find_resources(name=index, restype=RT.CouchDBIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) #-------------------------------------------------------------------------------- # Edge Indexes #-------------------------------------------------------------------------------- index_ids, _ = rr_cli.find_resources(name='%s_resources_index' % self.sysname, restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id) index_ids, _ = rr_cli.find_resources(name='%s_events_index' % self.sysname, restype=RT.ElasticSearchIndex, id_only=True) for index_id in index_ids: ims_cli.delete_index(index_id)
def list_resources_by_type(resource_type): result = None client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance) try: #Resource Types are not in unicode res_list,_ = client.find_resources(restype=convert_unicode(resource_type) ) return json_response({ GATEWAY_RESPONSE :res_list } ) except Exception, e: return build_error_response(e)
def list_resources_by_type(resource_type): ret = None client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance) try: res_list,_ = client.find_resources(restype=resource_type ) result = [] for res in res_list: result.append(res) ret = simplejson.dumps(result, default=ion_object_encoder) except Exception, e: ret = "Error: %s" % e
def find_resources_by_type(resource_type): try: client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance) #Validate requesting user and expiry and add governance headers ion_actor_id, expiry = get_governance_info_from_request() ion_actor_id, expiry = validate_request(ion_actor_id, expiry) #Resource Types are not in unicode res_list,_ = client.find_resources(restype=convert_unicode(resource_type) ) return gateway_json_response(res_list) except Exception, e: return build_error_response(e)
def find_resources_by_type(resource_type): try: client = ResourceRegistryServiceProcessClient( node=Container.instance.node, process=service_gateway_instance) #Validate requesting user and expiry and add governance headers ion_actor_id, expiry = get_governance_info_from_request() ion_actor_id, expiry = validate_request(ion_actor_id, expiry) #Resource Types are not in unicode res_list, _ = client.find_resources(restype=str(resource_type)) return gateway_json_response(res_list) except Exception, e: return build_error_response(e)
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper): def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') # simulate preloading preload_ion_params(self.container) #Instantiate a process to represent the test process = WorkflowServiceTestProcess() # Now create client to DataProductManagementService self.rrclient = ResourceRegistryServiceProcessClient( node=self.container.node, process=process) self.damsclient = DataAcquisitionManagementServiceProcessClient( node=self.container.node, process=process) self.pubsubclient = PubsubManagementServiceProcessClient( node=self.container.node, process=process) self.ingestclient = IngestionManagementServiceProcessClient( node=self.container.node, process=process) self.imsclient = InstrumentManagementServiceProcessClient( node=self.container.node, process=process) self.dataproductclient = DataProductManagementServiceProcessClient( node=self.container.node, process=process) self.dataprocessclient = DataProcessManagementServiceProcessClient( node=self.container.node, process=process) self.datasetclient = DatasetManagementServiceProcessClient( node=self.container.node, process=process) self.workflowclient = WorkflowManagementServiceProcessClient( node=self.container.node, process=process) self.process_dispatcher = ProcessDispatcherServiceProcessClient( node=self.container.node, process=process) self.data_retriever = DataRetrieverServiceProcessClient( node=self.container.node, process=process) self.ctd_stream_def = SBE37_CDM_stream_definition() @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_SA_transform_components(self): assertions = self.assertTrue #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product( ) data_product_stream_ids.append(ctd_stream_id) ### ### Setup the first transformation ### # Salinity: Data Process Definition ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition( ) l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process( ctd_L2_salinity_dprocdef_id, ctd_parsed_data_product_id, 'salinity') ## get the stream id for the transform outputs stream_ids, _ = self.rrclient.find_objects( ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) > 0) sal_stream_id = stream_ids[0] data_product_stream_ids.append(sal_stream_id) ### ### Setup the second transformation ### # Salinity Doubler: Data Process Definition salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition( ) salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process( salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity') stream_ids, _ = self.rrclient.find_objects( salinity_doubler_output_dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) > 0) sal_dbl_stream_id = stream_ids[0] data_product_stream_ids.append(sal_dbl_stream_id) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the transform processes self.dataprocessclient.deactivate_data_process( salinity_double_data_process_id) self.dataprocessclient.deactivate_data_process( l2_salinity_all_data_process_id) #Validate the data from each of the messages along the way self.validate_messages(results) @attr('LOCOINT') @attr('SMOKE') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_transform_workflow(self): assertions = self.assertTrue log.debug("Building the workflow definition") workflow_def_obj = IonObject( RT.WorkflowDefinition, name='Salinity_Test_Workflow', description='tests a workflow of multiple transform data processes' ) workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name #------------------------------------------------------------------------------------------------------------------------- log.debug("Adding a transformation process definition for salinity") #------------------------------------------------------------------------------------------------------------------------- ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition( ) workflow_step_obj = IonObject( 'DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False ) #Don't persist the intermediate data product workflow_def_obj.workflow_steps.append(workflow_step_obj) #------------------------------------------------------------------------------------------------------------------------- log.debug( "Adding a transformation process definition for salinity doubler") #------------------------------------------------------------------------------------------------------------------------- salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition( ) workflow_step_obj = IonObject( 'DataProcessWorkflowStep', data_process_definition_id=salinity_doubler_dprocdef_id, ) workflow_def_obj.workflow_steps.append(workflow_step_obj) log.debug("Creating workflow def in the resource registry") workflow_def_id = self.workflowclient.create_workflow_definition( workflow_def_obj) aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition) assertions(len(aids) == 2) #The list of data product streams to monitor data_product_stream_ids = list() log.debug("Creating the input data product") ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product( ) data_product_stream_ids.append(ctd_stream_id) log.debug("Creating and starting the workflow") workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow( workflow_def_id, ctd_parsed_data_product_id, persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300) workflow_output_ids, _ = self.rrclient.find_subjects( RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1) log.debug("persisting the output product") #self.dataproductclient.activate_data_product_persistence(workflow_product_id) dataset_ids, _ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True) assertions(len(dataset_ids) == 1) dataset_id = dataset_ids[0] log.debug( "Verifying the output data product name matches what was specified in the workflow definition" ) workflow_product = self.rrclient.read(workflow_product_id) assertions( workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name)) log.debug( "Walking the associations to find the appropriate output data streams to validate the messages" ) workflow_dp_ids, _ = self.rrclient.find_objects( workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 2) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1) data_product_stream_ids.append(stream_ids[0]) log.debug("data_product_stream_ids: %s" % data_product_stream_ids) log.debug( "Starting the output stream listener to monitor to collect messages" ) results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) log.debug("results::: %s" % results) log.debug("Stopping the workflow processes") self.workflowclient.terminate_data_process_workflow( workflow_id, False, timeout=250) # Should test true at some point log.debug("Making sure the Workflow object was removed") objs, _ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(objs) == 0) log.debug( "Validating the data from each of the messages along the way") self.validate_messages(results) log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id) self.validate_data_ingest_retrieve(dataset_id) log.debug("Cleaning up to make sure delete is correct.") self.workflowclient.delete_workflow_definition(workflow_def_id) """ workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) """ @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_highcharts_transform_workflow(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject( RT.WorkflowDefinition, name='HighCharts_Test_Workflow', description= 'Tests the workflow of converting stream data to HighCharts') #Add a transformation process definition highcharts_procdef_id = self.create_highcharts_data_process_definition( ) workflow_step_obj = IonObject( 'DataProcessWorkflowStep', data_process_definition_id=highcharts_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition( workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product( ) data_product_stream_ids.append(ctd_stream_id) #Create and start the workflow workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow( workflow_def_id, ctd_parsed_data_product_id, timeout=60) workflow_output_ids, _ = self.rrclient.find_subjects( RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1) #Walk the associations to find the appropriate output data streams to validate the messages workflow_dp_ids, _ = self.rrclient.find_objects( workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 1) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1) data_product_stream_ids.append(stream_ids[0]) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the workflow processes self.workflowclient.terminate_data_process_workflow( workflow_id=workflow_id, delete_data_products=False, timeout=60) # Should test true at some point #Validate the data from each of the messages along the way self.validate_highcharts_transform_results(results) #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) """ workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) """ @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_mpl_graphs_transform_workflow(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject( RT.WorkflowDefinition, name='Mpl_Graphs_Test_Workflow', description= 'Tests the workflow of converting stream data to Matplotlib graphs' ) #Add a transformation process definition mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition( ) workflow_step_obj = IonObject( 'DataProcessWorkflowStep', data_process_definition_id=mpl_graphs_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition( workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product( ) data_product_stream_ids.append(ctd_stream_id) #Create and start the workflow workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow( workflow_def_id, ctd_parsed_data_product_id, persist_workflow_data_product=True, timeout=60) workflow_output_ids, _ = self.rrclient.find_subjects( RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1) #Walk the associations to find the appropriate output data streams to validate the messages workflow_dp_ids, _ = self.rrclient.find_objects( workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 1) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1) data_product_stream_ids.append(stream_ids[0]) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the workflow processes self.workflowclient.terminate_data_process_workflow( workflow_id=workflow_id, delete_data_products=False, timeout=60) # Should test true at some point #Validate the data from each of the messages along the way self.validate_mpl_graphs_transform_results(results) # Check to see if ingestion worked. Extract the granules from data_retrieval. # First find the dataset associated with the output dp product ds_ids, _ = self.rrclient.find_objects( workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True) retrieved_granule = self.data_retriever.retrieve_last_data_points( ds_ids[0], 10) #Validate the data from each of the messages along the way self.validate_mpl_graphs_transform_results(retrieved_granule) #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) """ workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) """ @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_multiple_workflow_instances(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject( RT.WorkflowDefinition, name='Multiple_Test_Workflow', description='Tests the workflow of converting stream data') #Add a transformation process definition highcharts_procdef_id = self.create_highcharts_data_process_definition( ) workflow_step_obj = IonObject( 'DataProcessWorkflowStep', data_process_definition_id=highcharts_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition( workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the first input data product ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product( 'ctd_parsed1') data_product_stream_ids.append(ctd_stream_id1) #Create and start the first workflow workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow( workflow_def_id, ctd_parsed_data_product_id1, timeout=60) #Create the second input data product ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product( 'ctd_parsed2') data_product_stream_ids.append(ctd_stream_id2) #Create and start the second workflow workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow( workflow_def_id, ctd_parsed_data_product_id2, timeout=60) #Walk the associations to find the appropriate output data streams to validate the messages workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(workflow_ids) == 2) #Start the first input stream process ctd_sim_pid1 = self.start_sinusoidal_input_stream_process( ctd_stream_id1) #Start the second input stream process ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2) #Start the output stream listener to monitor a set number of messages being sent through the workflows results = self.start_output_stream_and_listen( None, data_product_stream_ids, message_count_per_stream=5) # stop the flow of messages... self.process_dispatcher.cancel_process( ctd_sim_pid1 ) # kill the ctd simulator process - that is enough data self.process_dispatcher.cancel_process(ctd_sim_pid2) #Stop the first workflow processes self.workflowclient.terminate_data_process_workflow( workflow_id=workflow_id1, delete_data_products=False, timeout=60) # Should test true at some point #Stop the second workflow processes self.workflowclient.terminate_data_process_workflow( workflow_id=workflow_id2, delete_data_products=False, timeout=60) # Should test true at some point workflow_ids, _ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(workflow_ids) == 0) #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) """
class TestGovernanceInt(IonIntegrationTestCase): def setUp(self): # Start container self._start_container() #Load a deploy file self.container.start_rel_from_url('res/deploy/r2deploy.yml') #Instantiate a process to represent the test process = GovernanceTestProcess() #Load system policies after container has started all of the services LoadSystemPolicy.op_load_system_policies(process) self.rr_client = ResourceRegistryServiceProcessClient( node=self.container.node, process=process) self.id_client = IdentityManagementServiceProcessClient( node=self.container.node, process=process) self.pol_client = PolicyManagementServiceProcessClient( node=self.container.node, process=process) self.org_client = OrgManagementServiceProcessClient( node=self.container.node, process=process) self.ims_client = InstrumentManagementServiceProcessClient( node=self.container.node, process=process) self.ems_client = ExchangeManagementServiceProcessClient( node=self.container.node, process=process) self.ion_org = self.org_client.find_org() self.system_actor = self.id_client.find_actor_identity_by_name( name=CFG.system.system_actor) log.debug('system actor:' + self.system_actor._id) sa_header_roles = get_role_message_headers( self.org_client.find_all_roles_by_user(self.system_actor._id)) self.sa_user_header = { 'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles } @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_basic_policy(self): #Make sure that the system policies have been loaded policy_list, _ = self.rr_client.find_resources(restype=RT.Policy) self.assertNotEqual( len(policy_list), 0, "The system policies have not been loaded into the Resource Registry" ) #Attempt to access an operation in service which does not have specific policies set es_obj = IonObject(RT.ExchangeSpace, description='ION test XS', name='ioncore2') with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied', cm.exception.message) #Add a new policy to allow the the above service call. policy_obj = IonObject( RT.Policy, name='Exchange_Management_Test_Policy', definition_type="Service", rule=TEST_POLICY_TEXT, description= 'Allow specific operations in the Exchange Management Service for anonymous user' ) test_policy_id = self.pol_client.create_policy( policy_obj, headers=self.sa_user_header) self.pol_client.add_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header) log.info('Policy created: ' + policy_obj.name) gevent.sleep(2) # Wait for events to be fired and policy updated #The previous attempt at this operations should now be allowed. es_obj = IonObject(RT.ExchangeSpace, description='ION test XS', name='ioncore2') with self.assertRaises(BadRequest) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn('Arguments not set', cm.exception.message) #disable the test policy to try again self.pol_client.disable_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The same request that previously was allowed should not be denied es_obj = IonObject(RT.ExchangeSpace, description='ION test XS', name='ioncore2') with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied', cm.exception.message) #now enable the test policy to try again self.pol_client.enable_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The previous attempt at this operations should now be allowed. es_obj = IonObject(RT.ExchangeSpace, description='ION test XS', name='ioncore2') with self.assertRaises(BadRequest) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn('Arguments not set', cm.exception.message) self.pol_client.remove_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header) self.pol_client.delete_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The same request that previously was allowed should not be denied es_obj = IonObject(RT.ExchangeSpace, description='ION test XS', name='ioncore2') with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied', cm.exception.message) @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Not integrated for CEI') def test_org_policy(self): #Make sure that the system policies have been loaded policy_list, _ = self.rr_client.find_resources(restype=RT.Policy) self.assertNotEqual( len(policy_list), 0, "The system policies have not been loaded into the Resource Registry" ) with self.assertRaises(BadRequest) as cm: myorg = self.org_client.read_org() self.assertTrue( cm.exception.message == 'The org_id parameter is missing') user_id, valid_until, registered = self.id_client.signon( USER1_CERTIFICATE, True) log.debug("user id=" + user_id) user_roles = get_role_message_headers( self.org_client.find_all_roles_by_user(user_id)) user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles} #Attempt to enroll a user anonymously - should not be allowed with self.assertRaises(Unauthorized) as cm: self.org_client.enroll_member(self.ion_org._id, user_id) self.assertIn('org_management(enroll_member) has been denied', cm.exception.message) #Attempt to let a user enroll themselves - should not be allowed with self.assertRaises(Unauthorized) as cm: self.org_client.enroll_member(self.ion_org._id, user_id, headers=user_header) self.assertIn('org_management(enroll_member) has been denied', cm.exception.message) #Attept to enroll the user in the ION Root org as a manager - should not be allowed since #registration with the system implies membership in the ROOT Org. with self.assertRaises(BadRequest) as cm: self.org_client.enroll_member(self.ion_org._id, user_id, headers=self.sa_user_header) self.assertTrue( cm.exception.message == 'A request to enroll in the root ION Org is not allowed') with self.assertRaises(Unauthorized) as cm: users = self.org_client.find_enrolled_users(self.ion_org._id) self.assertIn('org_management(find_enrolled_users) has been denied', cm.exception.message) with self.assertRaises(Unauthorized) as cm: users = self.org_client.find_enrolled_users(self.ion_org._id, headers=user_header) self.assertIn('org_management(find_enrolled_users) has been denied', cm.exception.message) users = self.org_client.find_enrolled_users( self.ion_org._id, headers=self.sa_user_header) self.assertEqual(len(users), 2) ## test_org_roles and policies roles = self.org_client.find_org_roles(self.ion_org._id) self.assertEqual(len(roles), 3) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER]) roles = self.org_client.find_roles_by_user(self.ion_org._id, self.system_actor._id, headers=self.sa_user_header) self.assertEqual(len(roles), 3) self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER]) roles = self.org_client.find_roles_by_user(self.ion_org._id, user_id, headers=self.sa_user_header) self.assertEqual(len(roles), 1) self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE]) with self.assertRaises(NotFound) as nf: org2 = self.org_client.find_org(ORG2) self.assertIn('The Org with name Org2 does not exist', nf.exception.message) org2 = IonObject(RT.Org, name=ORG2, description='A second Org') org2_id = self.org_client.create_org(org2, headers=self.sa_user_header) org2 = self.org_client.find_org(ORG2) self.assertEqual(org2_id, org2._id) roles = self.org_client.find_org_roles(org2_id) self.assertEqual(len(roles), 2) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE]) operator_role = IonObject(RT.UserRole, name=INSTRUMENT_OPERATOR, label='Instrument Operator', description='Instrument Operator') #First try to add the user role anonymously with self.assertRaises(Unauthorized) as cm: self.org_client.add_user_role(org2_id, operator_role) self.assertIn('org_management(add_user_role) has been denied', cm.exception.message) self.org_client.add_user_role(org2_id, operator_role, headers=self.sa_user_header) roles = self.org_client.find_org_roles(org2_id) self.assertEqual(len(roles), 3) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, INSTRUMENT_OPERATOR]) # test requests for enrollments and roles. #First try to find user requests anonymously with self.assertRaises(Unauthorized) as cm: requests = self.org_client.find_requests(org2_id) self.assertIn('org_management(find_requests) has been denied', cm.exception.message) #Next try to find user requests as as a basic member with self.assertRaises(Unauthorized) as cm: requests = self.org_client.find_requests(org2_id, headers=user_header) self.assertIn('org_management(find_requests) has been denied', cm.exception.message) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 0) # First try to request a role without being a member with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header) self.assertIn( 'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)', cm.exception.message) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 0) req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 1) requests = self.org_client.find_user_requests( user_id, org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 1) #User tried requesting enrollment again - this should fail with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header) self.assertIn( 'A precondition for this request has not been satisfied: enroll_req_not_exist(org_id,user_id)', cm.exception.message) #Manager denies the request self.org_client.deny_request(org2_id, req_id, 'To test the deny process', headers=self.sa_user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 1) self.assertEqual(requests[0].status, REQUEST_DENIED) #Manager approves request self.org_client.approve_request(org2_id, req_id, headers=self.sa_user_header) users = self.org_client.find_enrolled_users( org2_id, headers=self.sa_user_header) self.assertEqual(len(users), 0) #User Accepts request self.org_client.accept_request(org2_id, req_id, headers=user_header) users = self.org_client.find_enrolled_users( org2_id, headers=self.sa_user_header) self.assertEqual(len(users), 1) #User tried requesting enrollment again - this should fail with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header) self.assertIn( 'A precondition for this request has not been satisfied: is_not_enrolled(org_id,user_id)', cm.exception.message) req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 2) requests = self.org_client.find_requests(org2_id, request_status='Open', headers=self.sa_user_header) self.assertEqual(len(requests), 1) requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header) self.assertEqual(len(requests), 2) requests = self.org_client.find_user_requests( user_id, org2_id, request_type=RT.RoleRequest, headers=user_header) self.assertEqual(len(requests), 1) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests), 1) ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent) self.assertEqual(len(ia_list), 0) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent') with self.assertRaises(Unauthorized) as cm: self.ims_client.create_instrument_agent(ia_obj) self.assertIn( 'instrument_management(create_instrument_agent) has been denied', cm.exception.message) with self.assertRaises(Unauthorized) as cm: self.ims_client.create_instrument_agent(ia_obj, headers=user_header) self.assertIn( 'instrument_management(create_instrument_agent) has been denied', cm.exception.message) #Manager approves request self.org_client.approve_request(org2_id, req_id, headers=self.sa_user_header) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests), 0) #User accepts request self.org_client.accept_request(org2_id, req_id, headers=user_header) #Refresh headers with new role user_roles = get_role_message_headers( self.org_client.find_all_roles_by_user(user_id)) user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles} self.ims_client.create_instrument_agent(ia_obj, headers=user_header) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent') self.ims_client.create_instrument_agent(ia_obj, headers=user_header) ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent) self.assertEqual(len(ia_list), 2) #First make a acquire resource request with an non-enrolled user. with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_acquire_resource( org2_id, self.system_actor._id, ia_list[0]._id, headers=self.sa_user_header) self.assertIn( 'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)', cm.exception.message) req_id = self.org_client.request_acquire_resource(org2_id, user_id, ia_list[0]._id, headers=user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests), 3) requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header) self.assertEqual(len(requests), 3) requests = self.org_client.find_user_requests( user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header) self.assertEqual(len(requests), 1) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests), 1) self.assertEqual(requests[0]._id, req_id) #Manager approves Instrument request self.org_client.approve_request(org2_id, req_id, headers=self.sa_user_header) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests), 0) #User accepts request self.org_client.accept_request(org2_id, req_id, headers=user_header) #Check commitments commitments, _ = self.rr_client.find_objects(ia_list[0]._id, PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments), 1) commitments, _ = self.rr_client.find_objects(user_id, PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments), 1) #Release the resource self.org_client.release_resource( org2_id, user_id, ia_list[0]._id, headers=self.sa_user_header, timeout=15) #TODO - Refactor release_resource #Check commitments commitments, _ = self.rr_client.find_objects(ia_list[0]._id, PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments), 0) commitments, _ = self.rr_client.find_objects(user_id, PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments), 0)
class ExchangeManager(object): """ Manager object for the CC to manage Exchange related resources. """ def __init__(self, container): log.debug("ExchangeManager initializing ...") self.container = container # Define the callables that can be added to Container public API # @TODO: remove self.container_api = [self.create_xs, self.create_xp, self.create_xn_service, self.create_xn_process, self.create_xn_queue] # Add the public callables to Container for call in self.container_api: setattr(self.container, call.__name__, call) self.default_xs = ExchangeSpace(self, ION_ROOT_XS) self._xs_cache = {} # caching of xs names to RR objects self._default_xs_obj = None # default XS registry object self.org_id = None # mappings self.xs_by_name = { ION_ROOT_XS: self.default_xs } # friendly named XS to XSO self.xn_by_name = {} # friendly named XN to XNO # xn by xs is a property self._chan = None # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg self._ems_client = ExchangeManagementServiceProcessClient(process=self.container) self._rr_client = ResourceRegistryServiceProcessClient(process=self.container) # TODO: Do more initializing here, not in container @property def xn_by_xs(self): """ Get a list of XNs associated by XS (friendly name). """ ret = {} for xnname, xn in self.xn_by_name.iteritems(): xsn = xn._xs._exchange if not xsn in ret: ret[xsn] = [] ret[xsn].append(xn) return ret def _get_xs_obj(self, name=ION_ROOT_XS): """ Gets a resource-registry represented XS, either via cache or RR request. """ if name in self._xs_cache: return self._xs_cache[name] xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name) if not len(xs_objs) == 1: log.warn("Could not find RR XS object with name: %s", name) return None self._xs_cache[name] = xs_objs[0] return xs_objs[0] def start(self): log.debug("ExchangeManager starting ...") # Establish connection to broker # @TODO: raise error if sux node, ioloop = messaging.make_node() self._transport = AMQPTransport.get_instance() self._client = self._get_channel(node) # Declare root exchange #self.default_xs.ensure_exists(self._get_channel()) return node, ioloop def _ems_available(self): """ Returns True if the EMS is (likely) available and the auto_register CFG entry is True. Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR. Therefore, cannot be a property. """ if CFG.container.get('exchange', {}).get('auto_register', False): # ok now make sure it's in the directory svc_de = self.container.directory.lookup('/Services/exchange_management') if svc_de is not None: if not self.org_id: # find the default Org org_ids = self._rr_client.find_resources(RT.Org, id_only=True) if not (len(org_ids) and len(org_ids[0]) == 1): log.warn("EMS available but could not find Org") return False self.org_id = org_ids[0][0] log.debug("Bootstrapped Container exchange manager with org id: %s", self.org_id) return True return False def _get_channel(self, node): """ Get a raw channel to be used by all the ensure_exists methods. """ assert self.container # @TODO: needs lock, but so do all these methods if not self._chan: self._chan = blocking_cb(node.client.channel, 'on_open_callback') return self._chan def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True): log.debug("ExchangeManager.create_xs: %s", name) xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) self.xs_by_name[name] = xs if use_ems and self._ems_available(): log.debug("Using EMS to create_xs") # create a RR object xso = ResExchangeSpace(name=name) xso_id = self._ems_client.create_exchange_space(xso, self.org_id) log.debug("Created RR XS object, id: %s", xso_id) else: xs.declare() return xs def delete_xs(self, xs, use_ems=True): """ @type xs ExchangeSpace """ log.debug("ExchangeManager.delete_xs: %s", xs) name = xs._exchange # @TODO this feels wrong del self.xs_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xs") xso = self._get_xs_obj(name) self._ems_client.delete_exchange_space(xso._id) del self._xs_cache[name] else: xs.delete() def create_xp(self, name, xs=None, use_ems=True, **kwargs): log.debug("ExchangeManager.create_xp: %s", name) xs = xs or self.default_xs xp = ExchangePoint(self, name, xs, **kwargs) # put in xn_by_name anyway self.xn_by_name[name] = xp if use_ems and self._ems_available(): log.debug("Using EMS to create_xp") # create an RR object xpo = ResExchangePoint(name=name, topology_type=xp._xptype) xpo_id = self._ems_client.create_exchange_point(xpo, self._get_xs_obj(xs._exchange)._id) # @TODO: _exchange is wrong else: xp.declare() return xp def delete_xp(self, xp, use_ems=True): log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname()) name = xp._exchange # @TODO: not right del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xp") # find the XP object via RR xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True) if not (len(xpo_ids) and len(xpo_ids[0]) == 1): log.warn("Could not find XP in RR with name of %s", name) xpo_id = xpo_ids[0][0] self._ems_client.delete_exchange_point(xpo_id) else: xp.delete() def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs): xs = xs or self.default_xs log.debug("ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs) if xn_type == "service": xn = ExchangeNameService(self, name, xs, **kwargs) elif xn_type == "process": xn = ExchangeNameProcess(self, name, xs, **kwargs) elif xn_type == "queue": xn = ExchangeNameQueue(self, name, xs, **kwargs) else: raise StandardError("Unknown XN type: %s" % xn_type) self.xn_by_name[name] = xn if use_ems and self._ems_available(): log.debug("Using EMS to create_xn") xno = ResExchangeName(name=name, xn_type=xn.xn_type) self._ems_client.declare_exchange_name(xno, self._get_xs_obj(xs._exchange)._id) # @TODO: exchange is wrong else: xn.declare() return xn def create_xn_service(self, name, xs=None, **kwargs): return self._create_xn('service', name, xs=xs, **kwargs) def create_xn_process(self, name, xs=None, **kwargs): return self._create_xn('process', name, xs=xs, **kwargs) def create_xn_queue(self, name, xs=None, **kwargs): return self._create_xn('queue', name, xs=xs, **kwargs) def delete_xn(self, xn, use_ems=False): log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname()) name = xn._queue # @TODO feels wrong del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xn") # find the XN object via RR? xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True) if not (len(xno_ids) and len(xno_ids[0]) == 1): log.warn("Could not find XN in RR with name of %s", name) xno_id = xno_ids[0][0] self._ems_client.undeclare_exchange_name(xno_id) # "canonical name" currently understood to be RR id else: xn.delete() def stop(self, *args, **kwargs): log.debug("ExchangeManager stopping ...") # transport implementations - XOTransport objects call here def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True): log.info("ExchangeManager.declare_exchange") self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) def delete_exchange(self, exchange, **kwargs): log.info("ExchangeManager.delete_exchange") self._transport.delete_exchange_impl(self._client, exchange, **kwargs) def declare_queue(self, queue, durable=False, auto_delete=True): log.info("ExchangeManager.declare_queue") return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete) def delete_queue(self, queue, **kwargs): log.info("ExchangeManager.delete_queue") self._transport.delete_queue_impl(self._client, queue, **kwargs) def bind(self, exchange, queue, binding): log.info("ExchangeManager.bind") self._transport.bind_impl(self._client, exchange, queue, binding) def unbind(self, exchange, queue, binding): log.info("ExchangeManager.unbind") self._transport.unbind_impl(self._client, exchange, queue, binding)
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper): def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') #Instantiate a process to represent the test process=WorkflowServiceTestProcess() # Now create client to DataProductManagementService self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process) self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process) self.pubsubclient = PubsubManagementServiceProcessClient(node=self.container.node, process=process) self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process) self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process) self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process) self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process) self.datasetclient = DatasetManagementServiceProcessClient(node=self.container.node, process=process) self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process) self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process) self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process) self.ctd_stream_def = SBE37_CDM_stream_definition() @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_SA_transform_components(self): assertions = self.assertTrue #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product() data_product_stream_ids.append(ctd_stream_id) ### ### Setup the first transformation ### # Salinity: Data Process Definition ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition() l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(ctd_L2_salinity_dprocdef_id,ctd_parsed_data_product_id, 'salinity' ) ## get the stream id for the transform outputs stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) > 0 ) sal_stream_id = stream_ids[0] data_product_stream_ids.append(sal_stream_id) ### ### Setup the second transformation ### # Salinity Doubler: Data Process Definition salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition() salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity' ) stream_ids, _ = self.rrclient.find_objects(salinity_doubler_output_dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) > 0 ) sal_dbl_stream_id = stream_ids[0] data_product_stream_ids.append(sal_dbl_stream_id) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the transform processes self.dataprocessclient.deactivate_data_process(salinity_double_data_process_id) self.dataprocessclient.deactivate_data_process(l2_salinity_all_data_process_id) #Validate the data from each of the messages along the way self.validate_messages(results) @attr('LOCOINT') @attr('SMOKE') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_transform_workflow(self): assertions = self.assertTrue log.debug("Building the workflow definition") workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Salinity_Test_Workflow', description='tests a workflow of multiple transform data processes') workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name #------------------------------------------------------------------------------------------------------------------------- log.debug( "Adding a transformation process definition for salinity") #------------------------------------------------------------------------------------------------------------------------- ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition() workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False) #Don't persist the intermediate data product workflow_def_obj.workflow_steps.append(workflow_step_obj) #------------------------------------------------------------------------------------------------------------------------- log.debug( "Adding a transformation process definition for salinity doubler") #------------------------------------------------------------------------------------------------------------------------- salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition() workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=salinity_doubler_dprocdef_id, ) workflow_def_obj.workflow_steps.append(workflow_step_obj) log.debug( "Creating workflow def in the resource registry") workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj) aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition) assertions(len(aids) == 2 ) #The list of data product streams to monitor data_product_stream_ids = list() log.debug( "Creating the input data product") ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product() data_product_stream_ids.append(ctd_stream_id) log.debug( "Creating and starting the workflow") workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300) workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1 ) log.debug( "persisting the output product") #self.dataproductclient.activate_data_product_persistence(workflow_product_id) dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True) assertions(len(dataset_ids) == 1 ) dataset_id = dataset_ids[0] log.debug( "Verifying the output data product name matches what was specified in the workflow definition") workflow_product = self.rrclient.read(workflow_product_id) assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name)) log.debug( "Walking the associations to find the appropriate output data streams to validate the messages") workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 2 ) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1 ) data_product_stream_ids.append(stream_ids[0]) log.debug( "data_product_stream_ids: %s" % data_product_stream_ids) log.debug( "Starting the output stream listener to monitor to collect messages") results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) log.debug( "results::: %s" % results) log.debug( "Stopping the workflow processes") self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250) # Should test true at some point log.debug( "Making sure the Workflow object was removed") objs, _ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(objs) == 0) log.debug( "Validating the data from each of the messages along the way") self.validate_messages(results) log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id) self.validate_data_ingest_retrieve(dataset_id) log.debug( "Cleaning up to make sure delete is correct.") self.workflowclient.delete_workflow_definition(workflow_def_id) workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_google_dt_transform_workflow(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject(RT.WorkflowDefinition, name='GoogleDT_Test_Workflow',description='Tests the workflow of converting stream data to Google DT') #Add a transformation process definition google_dt_procdef_id = self.create_google_dt_data_process_definition() workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product() data_product_stream_ids.append(ctd_stream_id) #Create and start the workflow workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=60) workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1 ) #Walk the associations to find the appropriate output data streams to validate the messages workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 1 ) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1 ) data_product_stream_ids.append(stream_ids[0]) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the workflow processes self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60) # Should test true at some point #Validate the data from each of the messages along the way self.validate_google_dt_transform_results(results) """ # Check to see if ingestion worked. Extract the granules from data_retrieval. # First find the dataset associated with the output dp product ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True) retrieved_granule = self.data_retriever.retrieve(ds_ids[0]) #Validate the data from each of the messages along the way self.validate_google_dt_transform_results(retrieved_granule) """ #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_mpl_graphs_transform_workflow(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Mpl_Graphs_Test_Workflow',description='Tests the workflow of converting stream data to Matplotlib graphs') #Add a transformation process definition mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition() workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=mpl_graphs_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the input data product ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product() data_product_stream_ids.append(ctd_stream_id) #Create and start the workflow workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, persist_workflow_data_product=True, timeout=60) workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True) assertions(len(workflow_output_ids) == 1 ) #Walk the associations to find the appropriate output data streams to validate the messages workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True) assertions(len(workflow_dp_ids) == 1 ) for dp_id in workflow_dp_ids: stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True) assertions(len(stream_ids) == 1 ) data_product_stream_ids.append(stream_ids[0]) #Start the output stream listener to monitor and collect messages results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids) #Stop the workflow processes self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60) # Should test true at some point #Validate the data from each of the messages along the way self.validate_mpl_graphs_transform_results(results) # Check to see if ingestion worked. Extract the granules from data_retrieval. # First find the dataset associated with the output dp product ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True) retrieved_granule = self.data_retriever.retrieve_last_data_points(ds_ids[0], 10) #Validate the data from each of the messages along the way self.validate_mpl_graphs_transform_results(retrieved_granule) #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_multiple_workflow_instances(self): assertions = self.assertTrue # Build the workflow definition workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Multiple_Test_Workflow',description='Tests the workflow of converting stream data') #Add a transformation process definition google_dt_procdef_id = self.create_google_dt_data_process_definition() workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False) workflow_def_obj.workflow_steps.append(workflow_step_obj) #Create it in the resource registry workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj) #The list of data product streams to monitor data_product_stream_ids = list() #Create the first input data product ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product('ctd_parsed1') data_product_stream_ids.append(ctd_stream_id1) #Create and start the first workflow workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id1, timeout=60) #Create the second input data product ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product('ctd_parsed2') data_product_stream_ids.append(ctd_stream_id2) #Create and start the second workflow workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id2, timeout=60) #Walk the associations to find the appropriate output data streams to validate the messages workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(workflow_ids) == 2 ) #Start the first input stream process ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(ctd_stream_id1) #Start the second input stream process ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2) #Start the output stream listener to monitor a set number of messages being sent through the workflows results = self.start_output_stream_and_listen(None, data_product_stream_ids, message_count_per_stream=5) # stop the flow of messages... self.process_dispatcher.cancel_process(ctd_sim_pid1) # kill the ctd simulator process - that is enough data self.process_dispatcher.cancel_process(ctd_sim_pid2) #Stop the first workflow processes self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id1,delete_data_products=False, timeout=60) # Should test true at some point #Stop the second workflow processes self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id2,delete_data_products=False, timeout=60) # Should test true at some point workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow) assertions(len(workflow_ids) == 0 ) #Cleanup to make sure delete is correct. self.workflowclient.delete_workflow_definition(workflow_def_id) workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition) assertions(len(workflow_def_ids) == 0 ) aid_list = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition) assertions(len(aid_list) == 0 )
class ExchangeManager(object): """ Manager object for the CC to manage Exchange related resources. """ def __init__(self, container): log.debug("ExchangeManager initializing ...") self.container = container # Define the callables that can be added to Container public API # @TODO: remove self.container_api = [self.create_xs, self.create_xp, self.create_xn_service, self.create_xn_process, self.create_xn_queue] # Add the public callables to Container for call in self.container_api: setattr(self.container, call.__name__, call) self.default_xs = ExchangeSpace(self, ION_ROOT_XS) self._xs_cache = {} # caching of xs names to RR objects self._default_xs_obj = None # default XS registry object self.org_id = None # mappings self.xs_by_name = { ION_ROOT_XS: self.default_xs } # friendly named XS to XSO self.xn_by_name = {} # friendly named XN to XNO # xn by xs is a property self._chan = None # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg self._ems_client = ExchangeManagementServiceProcessClient(process=self.container) self._rr_client = ResourceRegistryServiceProcessClient(process=self.container) # mapping of node/ioloop runner by connection name (in config, named via container.messaging.server keys) self._nodes = {} self._ioloops = {} self._client = None self._transport = None self._default_xs_declared = False def start(self): log.debug("ExchangeManager.start") total_count = 0 def handle_failure(name, node): log.warn("Node %s could not be started", name) node.ready.set() # let it fall out below # Establish connection(s) to broker for name, cfgkey in CFG.container.messaging.server.iteritems(): if not cfgkey: continue if cfgkey not in CFG.server: raise ExchangeManagerError("Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server" % (cfgkey, name)) total_count += 1 log.debug("Starting connection: %s", name) # start it with a zero timeout so it comes right back to us try: node, ioloop = messaging.make_node(CFG.server[cfgkey], name, 0) # install a finished handler directly on the ioloop just for this startup period fail_handle = lambda _: handle_failure(name, node) ioloop.link(fail_handle) # wait for the node ready event, with a large timeout just in case node_ready = node.ready.wait(timeout=15) # remove the finished handler, we don't care about it here ioloop.unlink(fail_handle) # only add to our list if we started successfully if not node.running: ioloop.kill() # make sure ioloop dead else: self._nodes[name] = node self._ioloops[name] = ioloop except socket.error as e: log.warn("Could not start connection %s due to socket error, continuing", name) fail_count = total_count - len(self._nodes) if fail_count > 0 or total_count == 0: if fail_count == total_count: raise ExchangeManagerError("No node connection was able to start (%d nodes attempted, %d nodes failed)" % (total_count, fail_count)) log.warn("Some nodes could not be started, ignoring for now") # @TODO change when ready self._transport = AMQPTransport.get_instance() self._client = self._get_channel(self._nodes.get('priviledged', self._nodes.values()[0])) # @TODO log.debug("Started %d connections (%s)", len(self._nodes), ",".join(self._nodes.iterkeys())) def stop(self, *args, **kwargs): # ############## # HACK HACK HACK # # It appears during shutdown that when a channel is closed, it's not FULLY closed by the pika connection # until the next round of _handle_events. We have to yield here to let that happen, in order to have close # work fine without blowing up. # ############## time.sleep(0.1) # ############## # /HACK # ############## log.debug("ExchangeManager.stopping (%d connections)", len(self._nodes)) for name in self._nodes: self._nodes[name].stop_node() self._ioloops[name].kill() self._nodes[name].client.ioloop.start() # loop until connection closes # @TODO undeclare root xs?? need to know if last container #self.default_xs.delete() @property def default_node(self): """ Returns the default node connection. """ if 'primary' in self._nodes: return self._nodes['primary'] elif len(self._nodes): log.warn("No primary connection, returning first available") return self._nodes.values()[0] return None @property def xn_by_xs(self): """ Get a list of XNs associated by XS (friendly name). """ ret = {} for xnname, xn in self.xn_by_name.iteritems(): xsn = xn._xs._exchange if not xsn in ret: ret[xsn] = [] ret[xsn].append(xn) return ret def _get_xs_obj(self, name=ION_ROOT_XS): """ Gets a resource-registry represented XS, either via cache or RR request. """ if name in self._xs_cache: return self._xs_cache[name] xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name) if not len(xs_objs) == 1: log.warn("Could not find RR XS object with name: %s", name) return None self._xs_cache[name] = xs_objs[0] return xs_objs[0] def _ems_available(self): """ Returns True if the EMS is (likely) available and the auto_register CFG entry is True. Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR. Therefore, cannot be a property. """ if CFG.container.get('exchange', {}).get('auto_register', False): # ok now make sure it's in the directory svc_de = self.container.directory.lookup('/Services/exchange_management') if svc_de is not None: if not self.org_id: # find the default Org org_ids = self._rr_client.find_resources(RT.Org, id_only=True) if not (len(org_ids) and len(org_ids[0]) == 1): log.warn("EMS available but could not find Org") return False self.org_id = org_ids[0][0] log.debug("Bootstrapped Container exchange manager with org id: %s", self.org_id) return True return False def _get_channel(self, node): """ Get a raw channel to be used by all the ensure_exists methods. """ assert self.container # @TODO: needs lock, but so do all these methods if not self._chan: self._chan = blocking_cb(node.client.channel, 'on_open_callback') return self._chan def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True): log.debug("ExchangeManager.create_xs: %s", name) xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) self.xs_by_name[name] = xs if use_ems and self._ems_available(): log.debug("Using EMS to create_xs") # create a RR object xso = ResExchangeSpace(name=name) xso_id = self._ems_client.create_exchange_space(xso, self.org_id) log.debug("Created RR XS object, id: %s", xso_id) else: xs.declare() return xs def delete_xs(self, xs, use_ems=True): """ @type xs ExchangeSpace """ log.debug("ExchangeManager.delete_xs: %s", xs) name = xs._exchange # @TODO this feels wrong del self.xs_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xs") xso = self._get_xs_obj(name) self._ems_client.delete_exchange_space(xso._id) del self._xs_cache[name] else: xs.delete() def create_xp(self, name, xs=None, use_ems=True, **kwargs): log.debug("ExchangeManager.create_xp: %s", name) xs = xs or self.default_xs xp = ExchangePoint(self, name, xs, **kwargs) # put in xn_by_name anyway self.xn_by_name[name] = xp if use_ems and self._ems_available(): log.debug("Using EMS to create_xp") # create an RR object xpo = ResExchangePoint(name=name, topology_type=xp._xptype) xpo_id = self._ems_client.create_exchange_point(xpo, self._get_xs_obj(xs._exchange)._id) # @TODO: _exchange is wrong else: xp.declare() return xp def delete_xp(self, xp, use_ems=True): log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname()) name = xp._exchange # @TODO: not right del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xp") # find the XP object via RR xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True) if not (len(xpo_ids) and len(xpo_ids[0]) == 1): log.warn("Could not find XP in RR with name of %s", name) xpo_id = xpo_ids[0][0] self._ems_client.delete_exchange_point(xpo_id) else: xp.delete() def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs): xs = xs or self.default_xs log.debug("ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs) if xn_type == "service": xn = ExchangeNameService(self, name, xs, **kwargs) elif xn_type == "process": xn = ExchangeNameProcess(self, name, xs, **kwargs) elif xn_type == "queue": xn = ExchangeNameQueue(self, name, xs, **kwargs) else: raise StandardError("Unknown XN type: %s" % xn_type) self.xn_by_name[name] = xn if use_ems and self._ems_available(): log.debug("Using EMS to create_xn") xno = ResExchangeName(name=name, xn_type=xn.xn_type) self._ems_client.declare_exchange_name(xno, self._get_xs_obj(xs._exchange)._id) # @TODO: exchange is wrong else: xn.declare() return xn def create_xn_service(self, name, xs=None, **kwargs): return self._create_xn('service', name, xs=xs, **kwargs) def create_xn_process(self, name, xs=None, **kwargs): return self._create_xn('process', name, xs=xs, **kwargs) def create_xn_queue(self, name, xs=None, **kwargs): return self._create_xn('queue', name, xs=xs, **kwargs) def delete_xn(self, xn, use_ems=False): log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname()) name = xn._queue # @TODO feels wrong del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xn") # find the XN object via RR? xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True) if not (len(xno_ids) and len(xno_ids[0]) == 1): log.warn("Could not find XN in RR with name of %s", name) xno_id = xno_ids[0][0] self._ems_client.undeclare_exchange_name(xno_id) # "canonical name" currently understood to be RR id else: xn.delete() def _ensure_default_declared(self): """ Ensures we declared the default exchange space. Needed by most exchange object calls, so each one calls here. """ if not self._default_xs_declared: log.debug("ExchangeManager._ensure_default_declared, declaring default xs") self._default_xs_declared = True self.default_xs.declare() # transport implementations - XOTransport objects call here def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True): log.info("ExchangeManager.declare_exchange") self._ensure_default_declared() self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) def delete_exchange(self, exchange, **kwargs): log.info("ExchangeManager.delete_exchange") self._ensure_default_declared() self._transport.delete_exchange_impl(self._client, exchange, **kwargs) def declare_queue(self, queue, durable=False, auto_delete=False): log.info("ExchangeManager.declare_queue (queue %s, durable %s, AD %s)", queue, durable, auto_delete) self._ensure_default_declared() return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete) def delete_queue(self, queue, **kwargs): log.info("ExchangeManager.delete_queue") self._ensure_default_declared() self._transport.delete_queue_impl(self._client, queue, **kwargs) def bind(self, exchange, queue, binding): log.info("ExchangeManager.bind") self._ensure_default_declared() self._transport.bind_impl(self._client, exchange, queue, binding) def unbind(self, exchange, queue, binding): log.info("ExchangeManager.unbind") self._ensure_default_declared() self._transport.unbind_impl(self._client, exchange, queue, binding) def get_stats(self, queue): log.info("ExchangeManager.get_stats") self._ensure_default_declared() return self._transport.get_stats(self._client, queue) def purge(self, queue): log.info("ExchangeManager.purge") self._ensure_default_declared() self._transport.purge(self._client, queue)
class BootstrapProcessDispatcher(BootstrapPlugin): """ Bootstrap process for process dispatcher. """ def on_initial_bootstrap(self, process, config, **kwargs): self.pds_client = ProcessDispatcherServiceProcessClient( process=process) self.resource_registry = ResourceRegistryServiceProcessClient( process=process) self.ingestion_worker(process, config) self.replay_defs(process, config) self.notification_worker(process, config) self.registration_worker(process, config) self.pydap_server(process, config) self.eoi_services(process, config) def eoi_services(self, process, config): eoi_module = config.get_safe( 'bootstrap.processes.registration.module', 'ion.processes.data.registration.eoi_registration_process') eoi_class = config.get_safe('bootstrap.processes.registration.class', 'EOIRegistrationProcess') process_definition = ProcessDefinition( name='eoi_server', description='Process for eoi data sources') process_definition.executable['module'] = eoi_module process_definition.executable['class'] = eoi_class self._create_and_launch(process_definition) def pydap_server(self, process, config): pydap_module = config.get_safe( 'bootstrap.processes.pydap.module', 'ion.processes.data.externalization.lightweight_pydap') pydap_class = config.get_safe('bootstrap.processes.pydap.class', 'LightweightPyDAP') use_pydap = config.get_safe('bootstrap.launch_pydap', False) process_definition = ProcessDefinition( name='pydap_server', description='Lightweight WSGI Server for PyDAP') process_definition.executable['module'] = pydap_module process_definition.executable['class'] = pydap_class self._create_and_launch(process_definition, use_pydap) def registration_worker(self, process, config): res, meta = self.resource_registry.find_resources( name='registration_worker', restype=RT.ProcessDefinition) if len(res): return registration_module = config.get_safe( 'bootstrap.processes.registration.module', 'ion.processes.data.registration.registration_process') registration_class = config.get_safe( 'bootstrap.processes.registration.class', 'RegistrationProcess') use_pydap = True process_definition = ProcessDefinition( name='registration_worker', description='For registering datasets with ERDDAP') process_definition.executable['module'] = registration_module process_definition.executable['class'] = registration_class self._create_and_launch(process_definition, use_pydap) def _create_and_launch(self, process_definition, conditional=True): proc_def_id = self.pds_client.create_process_definition( process_definition=process_definition) if conditional: process_res_id = self.pds_client.create_process( process_definition_id=proc_def_id) self.pds_client.schedule_process(process_definition_id=proc_def_id, process_id=process_res_id) def ingestion_worker(self, process, config): # ingestion ingestion_module = config.get_safe( 'bootstrap.processes.ingestion.module', 'ion.processes.data.ingestion.science_granule_ingestion_worker') ingestion_class = config.get_safe( 'bootstrap.processes.ingestion.class', 'ScienceGranuleIngestionWorker') ingestion_datastore = config.get_safe( 'bootstrap.processes.ingestion.datastore_name', 'datasets') ingestion_queue = config.get_safe( 'bootstrap.processes.ingestion.queue', 'science_granule_ingestion') ingestion_workers = config.get_safe( 'bootstrap.processes.ingestion.workers', 1) #-------------------------------------------------------------------------------- # Create ingestion workers #-------------------------------------------------------------------------------- process_definition = ProcessDefinition( name='ingestion_worker_process', description='Worker transform process for ingestion of datasets') process_definition.executable['module'] = ingestion_module process_definition.executable['class'] = ingestion_class ingestion_procdef_id = self.pds_client.create_process_definition( process_definition=process_definition) #-------------------------------------------------------------------------------- # Simulate a HA ingestion worker by creating two of them #-------------------------------------------------------------------------------- # config = DotDict() # config.process.datastore_name = ingestion_datastore # config.process.queue_name = ingestion_queue # # for i in xrange(ingestion_workers): # self.pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config) def notification_worker(self, process, config): # user notifications notification_module = config.get_safe( 'bootstrap.processes.user_notification.module', 'ion.processes.data.transforms.notification_worker') notification_class = config.get_safe( 'bootstrap.processes.user_notification.class', 'NotificationWorker') notification_workers = config.get_safe( 'bootstrap.processes.user_notification.workers', 1) #-------------------------------------------------------------------------------- # Create notification workers #-------------------------------------------------------------------------------- # set up the process definition process_definition_uns = ProcessDefinition( name='notification_worker_process', description='Worker transform process for user notifications') process_definition_uns.executable['module'] = notification_module process_definition_uns.executable['class'] = notification_class uns_procdef_id = self.pds_client.create_process_definition( process_definition=process_definition_uns) config = DotDict() config.process.type = 'simple' for i in xrange(notification_workers): config.process.name = 'notification_worker_%s' % i config.process.queue_name = 'notification_worker_queue' self.pds_client.schedule_process( process_definition_id=uns_procdef_id, configuration=config) def replay_defs(self, process, config): replay_module = config.get_safe( 'bootstrap.processes.replay.module', 'ion.processes.data.replay.replay_process') replay_class = config.get_safe('bootstrap.processes.replay.class', 'ReplayProcess') #-------------------------------------------------------------------------------- # Create replay process definition #-------------------------------------------------------------------------------- process_definition = ProcessDefinition( name=DataRetrieverService.REPLAY_PROCESS, description='Process for the replay of datasets') process_definition.executable['module'] = replay_module process_definition.executable['class'] = replay_class self.pds_client.create_process_definition( process_definition=process_definition) def on_restart(self, process, config, **kwargs): pass
class ExchangeManager(object): """ Manager object for the CC to manage Exchange related resources. """ def __init__(self, container): log.debug("ExchangeManager initializing ...") self.container = container # Define the callables that can be added to Container public API # @TODO: remove self.container_api = [ self.create_xs, self.create_xp, self.create_xn_service, self.create_xn_process, self.create_xn_queue ] # Add the public callables to Container for call in self.container_api: setattr(self.container, call.__name__, call) self.default_xs = ExchangeSpace(self, ION_ROOT_XS) self._xs_cache = {} # caching of xs names to RR objects self._default_xs_obj = None # default XS registry object self.org_id = None # mappings self.xs_by_name = { ION_ROOT_XS: self.default_xs } # friendly named XS to XSO self.xn_by_name = {} # friendly named XN to XNO # xn by xs is a property self._chan = None # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg self._ems_client = ExchangeManagementServiceProcessClient( process=self.container) self._rr_client = ResourceRegistryServiceProcessClient( process=self.container) # TODO: Do more initializing here, not in container @property def xn_by_xs(self): """ Get a list of XNs associated by XS (friendly name). """ ret = {} for xnname, xn in self.xn_by_name.iteritems(): xsn = xn._xs._exchange if not xsn in ret: ret[xsn] = [] ret[xsn].append(xn) return ret def _get_xs_obj(self, name=ION_ROOT_XS): """ Gets a resource-registry represented XS, either via cache or RR request. """ if name in self._xs_cache: return self._xs_cache[name] xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name) if not len(xs_objs) == 1: log.warn("Could not find RR XS object with name: %s", name) return None self._xs_cache[name] = xs_objs[0] return xs_objs[0] def start(self): log.debug("ExchangeManager starting ...") # Establish connection to broker # @TODO: raise error if sux node, ioloop = messaging.make_node() self._transport = AMQPTransport.get_instance() self._client = self._get_channel(node) # Declare root exchange #self.default_xs.ensure_exists(self._get_channel()) return node, ioloop def _ems_available(self): """ Returns True if the EMS is (likely) available and the auto_register CFG entry is True. Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR. Therefore, cannot be a property. """ if CFG.container.get('exchange', {}).get('auto_register', False): # ok now make sure it's in the directory svc_de = self.container.directory.lookup( '/Services/exchange_management') if svc_de is not None: if not self.org_id: # find the default Org org_ids = self._rr_client.find_resources(RT.Org, id_only=True) if not (len(org_ids) and len(org_ids[0]) == 1): log.warn("EMS available but could not find Org") return False self.org_id = org_ids[0][0] log.debug( "Bootstrapped Container exchange manager with org id: %s", self.org_id) return True return False def _get_channel(self, node): """ Get a raw channel to be used by all the ensure_exists methods. """ assert self.container # @TODO: needs lock, but so do all these methods if not self._chan: self._chan = blocking_cb(node.client.channel, 'on_open_callback') return self._chan def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True): log.debug("ExchangeManager.create_xs: %s", name) xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) self.xs_by_name[name] = xs if use_ems and self._ems_available(): log.debug("Using EMS to create_xs") # create a RR object xso = ResExchangeSpace(name=name) xso_id = self._ems_client.create_exchange_space(xso, self.org_id) log.debug("Created RR XS object, id: %s", xso_id) else: xs.declare() return xs def delete_xs(self, xs, use_ems=True): """ @type xs ExchangeSpace """ log.debug("ExchangeManager.delete_xs: %s", xs) name = xs._exchange # @TODO this feels wrong del self.xs_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xs") xso = self._get_xs_obj(name) self._ems_client.delete_exchange_space(xso._id) del self._xs_cache[name] else: xs.delete() def create_xp(self, name, xs=None, use_ems=True, **kwargs): log.debug("ExchangeManager.create_xp: %s", name) xs = xs or self.default_xs xp = ExchangePoint(self, name, xs, **kwargs) # put in xn_by_name anyway self.xn_by_name[name] = xp if use_ems and self._ems_available(): log.debug("Using EMS to create_xp") # create an RR object xpo = ResExchangePoint(name=name, topology_type=xp._xptype) xpo_id = self._ems_client.create_exchange_point( xpo, self._get_xs_obj( xs._exchange)._id) # @TODO: _exchange is wrong else: xp.declare() return xp def delete_xp(self, xp, use_ems=True): log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname()) name = xp._exchange # @TODO: not right del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xp") # find the XP object via RR xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True) if not (len(xpo_ids) and len(xpo_ids[0]) == 1): log.warn("Could not find XP in RR with name of %s", name) xpo_id = xpo_ids[0][0] self._ems_client.delete_exchange_point(xpo_id) else: xp.delete() def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs): xs = xs or self.default_xs log.debug( "ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs) if xn_type == "service": xn = ExchangeNameService(self, name, xs, **kwargs) elif xn_type == "process": xn = ExchangeNameProcess(self, name, xs, **kwargs) elif xn_type == "queue": xn = ExchangeNameQueue(self, name, xs, **kwargs) else: raise StandardError("Unknown XN type: %s" % xn_type) self.xn_by_name[name] = xn if use_ems and self._ems_available(): log.debug("Using EMS to create_xn") xno = ResExchangeName(name=name, xn_type=xn.xn_type) self._ems_client.declare_exchange_name( xno, self._get_xs_obj(xs._exchange)._id) # @TODO: exchange is wrong else: xn.declare() return xn def create_xn_service(self, name, xs=None, **kwargs): return self._create_xn('service', name, xs=xs, **kwargs) def create_xn_process(self, name, xs=None, **kwargs): return self._create_xn('process', name, xs=xs, **kwargs) def create_xn_queue(self, name, xs=None, **kwargs): return self._create_xn('queue', name, xs=xs, **kwargs) def delete_xn(self, xn, use_ems=False): log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname()) name = xn._queue # @TODO feels wrong del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xn") # find the XN object via RR? xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True) if not (len(xno_ids) and len(xno_ids[0]) == 1): log.warn("Could not find XN in RR with name of %s", name) xno_id = xno_ids[0][0] self._ems_client.undeclare_exchange_name( xno_id) # "canonical name" currently understood to be RR id else: xn.delete() def stop(self, *args, **kwargs): log.debug("ExchangeManager stopping ...") # transport implementations - XOTransport objects call here def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True): log.info("ExchangeManager.declare_exchange") self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) def delete_exchange(self, exchange, **kwargs): log.info("ExchangeManager.delete_exchange") self._transport.delete_exchange_impl(self._client, exchange, **kwargs) def declare_queue(self, queue, durable=False, auto_delete=True): log.info("ExchangeManager.declare_queue") return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete) def delete_queue(self, queue, **kwargs): log.info("ExchangeManager.delete_queue") self._transport.delete_queue_impl(self._client, queue, **kwargs) def bind(self, exchange, queue, binding): log.info("ExchangeManager.bind") self._transport.bind_impl(self._client, exchange, queue, binding) def unbind(self, exchange, queue, binding): log.info("ExchangeManager.unbind") self._transport.unbind_impl(self._client, exchange, queue, binding)
class BootstrapProcessDispatcher(BootstrapPlugin): """ Bootstrap process for process dispatcher. """ def on_initial_bootstrap(self, process, config, **kwargs): self.pds_client = ProcessDispatcherServiceProcessClient(process=process) self.resource_registry = ResourceRegistryServiceProcessClient(process=process) self.ingestion_worker(process,config) self.replay_defs(process,config) self.notification_worker(process,config) self.registration_worker(process,config) self.pydap_server(process,config) def pydap_server(self, process, config): pydap_module = config.get_safe('bootstrap.processes.pydap.module', 'ion.processes.data.externalization.lightweight_pydap') pydap_class = config.get_safe('bootstrap.processes.pydap.class', 'LightweightPyDAP') use_pydap = config.get_safe('bootstrap.launch_pydap', False) process_definition = ProcessDefinition( name = 'pydap_server', description = 'Lightweight WSGI Server for PyDAP') process_definition.executable['module'] = pydap_module process_definition.executable['class'] = pydap_class self._create_and_launch(process_definition,use_pydap) def registration_worker(self, process, config): res, meta = self.resource_registry.find_resources(name='registration_worker', restype=RT.ProcessDefinition) if len(res): return registration_module = config.get_safe('bootstrap.processes.registration.module', 'ion.processes.data.registration.registration_process') registration_class = config.get_safe('bootstrap.processes.registration.class', 'RegistrationProcess') use_pydap = config.get_safe('bootstrap.use_pydap', False) process_definition = ProcessDefinition( name='registration_worker', description='For registering datasets with ERDDAP') process_definition.executable['module'] = registration_module process_definition.executable['class'] = registration_class self._create_and_launch(process_definition, use_pydap) def _create_and_launch(self, process_definition, conditional=True): proc_def_id = self.pds_client.create_process_definition(process_definition=process_definition) if conditional: process_res_id = self.pds_client.create_process(process_definition_id=proc_def_id) self.pds_client.schedule_process(process_definition_id=proc_def_id, process_id=process_res_id) def ingestion_worker(self, process, config): # ingestion ingestion_module = config.get_safe('bootstrap.processes.ingestion.module','ion.processes.data.ingestion.science_granule_ingestion_worker') ingestion_class = config.get_safe('bootstrap.processes.ingestion.class' ,'ScienceGranuleIngestionWorker') ingestion_datastore = config.get_safe('bootstrap.processes.ingestion.datastore_name', 'datasets') ingestion_queue = config.get_safe('bootstrap.processes.ingestion.queue' , 'science_granule_ingestion') ingestion_workers = config.get_safe('bootstrap.processes.ingestion.workers', 1) #-------------------------------------------------------------------------------- # Create ingestion workers #-------------------------------------------------------------------------------- process_definition = ProcessDefinition( name='ingestion_worker_process', description='Worker transform process for ingestion of datasets') process_definition.executable['module']= ingestion_module process_definition.executable['class'] = ingestion_class ingestion_procdef_id = self.pds_client.create_process_definition(process_definition=process_definition) #-------------------------------------------------------------------------------- # Simulate a HA ingestion worker by creating two of them #-------------------------------------------------------------------------------- # config = DotDict() # config.process.datastore_name = ingestion_datastore # config.process.queue_name = ingestion_queue # # for i in xrange(ingestion_workers): # self.pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config) def notification_worker(self, process, config): # user notifications notification_module = config.get_safe('bootstrap.processes.user_notification.module','ion.processes.data.transforms.notification_worker') notification_class = config.get_safe('bootstrap.processes.user_notification.class' ,'NotificationWorker') notification_workers = config.get_safe('bootstrap.processes.user_notification.workers', 1) #-------------------------------------------------------------------------------- # Create notification workers #-------------------------------------------------------------------------------- # set up the process definition process_definition_uns = ProcessDefinition( name='notification_worker_process', description='Worker transform process for user notifications') process_definition_uns.executable['module']= notification_module process_definition_uns.executable['class'] = notification_class uns_procdef_id = self.pds_client.create_process_definition(process_definition=process_definition_uns) config = DotDict() config.process.type = 'simple' for i in xrange(notification_workers): config.process.name = 'notification_worker_%s' % i config.process.queue_name = 'notification_worker_queue' self.pds_client.schedule_process(process_definition_id=uns_procdef_id, configuration=config) def replay_defs(self, process, config): replay_module = config.get_safe('bootstrap.processes.replay.module', 'ion.processes.data.replay.replay_process') replay_class = config.get_safe('bootstrap.processes.replay.class' , 'ReplayProcess') #-------------------------------------------------------------------------------- # Create replay process definition #-------------------------------------------------------------------------------- process_definition = ProcessDefinition(name=DataRetrieverService.REPLAY_PROCESS, description='Process for the replay of datasets') process_definition.executable['module']= replay_module process_definition.executable['class'] = replay_class self.pds_client.create_process_definition(process_definition=process_definition) def on_restart(self, process, config, **kwargs): pass
class ExchangeManager(object): """ Manager object for the CC to manage Exchange related resources. """ def __init__(self, container): log.debug("ExchangeManager initializing ...") self.container = container # Define the callables that can be added to Container public API # @TODO: remove self.container_api = [ self.create_xs, self.create_xp, self.create_xn_service, self.create_xn_process, self.create_xn_queue ] # Add the public callables to Container for call in self.container_api: setattr(self.container, call.__name__, call) self.default_xs = ExchangeSpace(self, ION_ROOT_XS) self._xs_cache = {} # caching of xs names to RR objects self._default_xs_obj = None # default XS registry object self.org_id = None # mappings self.xs_by_name = { ION_ROOT_XS: self.default_xs } # friendly named XS to XSO self.xn_by_name = {} # friendly named XN to XNO # xn by xs is a property self._chan = None # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg self._ems_client = ExchangeManagementServiceProcessClient( process=self.container) self._rr_client = ResourceRegistryServiceProcessClient( process=self.container) # mapping of node/ioloop runner by connection name (in config, named via container.messaging.server keys) self._nodes = {} self._ioloops = {} self._client = None self._transport = None self._default_xs_declared = False def start(self): log.debug("ExchangeManager.start") total_count = 0 def handle_failure(name, node): log.warn("Node %s could not be started", name) node.ready.set() # let it fall out below # Establish connection(s) to broker for name, cfgkey in CFG.container.messaging.server.iteritems(): if not cfgkey: continue if cfgkey not in CFG.server: raise ExchangeManagerError( "Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server" % (cfgkey, name)) total_count += 1 log.debug("Starting connection: %s", name) # start it with a zero timeout so it comes right back to us try: node, ioloop = messaging.make_node(CFG.server[cfgkey], name, 0) # install a finished handler directly on the ioloop just for this startup period fail_handle = lambda _: handle_failure(name, node) ioloop.link(fail_handle) # wait for the node ready event, with a large timeout just in case node_ready = node.ready.wait(timeout=15) # remove the finished handler, we don't care about it here ioloop.unlink(fail_handle) # only add to our list if we started successfully if not node.running: ioloop.kill() # make sure ioloop dead else: self._nodes[name] = node self._ioloops[name] = ioloop except socket.error as e: log.warn( "Could not start connection %s due to socket error, continuing", name) fail_count = total_count - len(self._nodes) if fail_count > 0 or total_count == 0: if fail_count == total_count: raise ExchangeManagerError( "No node connection was able to start (%d nodes attempted, %d nodes failed)" % (total_count, fail_count)) log.warn("Some nodes could not be started, ignoring for now" ) # @TODO change when ready self._transport = AMQPTransport.get_instance() self._client = self._get_channel( self._nodes.get('priviledged', self._nodes.values()[0])) # @TODO log.debug("Started %d connections (%s)", len(self._nodes), ",".join(self._nodes.iterkeys())) def stop(self, *args, **kwargs): # ############## # HACK HACK HACK # # It appears during shutdown that when a channel is closed, it's not FULLY closed by the pika connection # until the next round of _handle_events. We have to yield here to let that happen, in order to have close # work fine without blowing up. # ############## time.sleep(0.1) # ############## # /HACK # ############## log.debug("ExchangeManager.stopping (%d connections)", len(self._nodes)) for name in self._nodes: self._nodes[name].stop_node() self._ioloops[name].kill() self._nodes[name].client.ioloop.start( ) # loop until connection closes # @TODO undeclare root xs?? need to know if last container #self.default_xs.delete() @property def default_node(self): """ Returns the default node connection. """ if 'primary' in self._nodes: return self._nodes['primary'] elif len(self._nodes): log.warn("No primary connection, returning first available") return self._nodes.values()[0] return None @property def xn_by_xs(self): """ Get a list of XNs associated by XS (friendly name). """ ret = {} for xnname, xn in self.xn_by_name.iteritems(): xsn = xn._xs._exchange if not xsn in ret: ret[xsn] = [] ret[xsn].append(xn) return ret def _get_xs_obj(self, name=ION_ROOT_XS): """ Gets a resource-registry represented XS, either via cache or RR request. """ if name in self._xs_cache: return self._xs_cache[name] xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name) if not len(xs_objs) == 1: log.warn("Could not find RR XS object with name: %s", name) return None self._xs_cache[name] = xs_objs[0] return xs_objs[0] def _ems_available(self): """ Returns True if the EMS is (likely) available and the auto_register CFG entry is True. Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR. Therefore, cannot be a property. """ if CFG.container.get('exchange', {}).get('auto_register', False): # ok now make sure it's in the directory svc_de = self.container.directory.lookup( '/Services/exchange_management') if svc_de is not None: if not self.org_id: # find the default Org org_ids = self._rr_client.find_resources(RT.Org, id_only=True) if not (len(org_ids) and len(org_ids[0]) == 1): log.warn("EMS available but could not find Org") return False self.org_id = org_ids[0][0] log.debug( "Bootstrapped Container exchange manager with org id: %s", self.org_id) return True return False def _get_channel(self, node): """ Get a raw channel to be used by all the ensure_exists methods. """ assert self.container # @TODO: needs lock, but so do all these methods if not self._chan: self._chan = blocking_cb(node.client.channel, 'on_open_callback') return self._chan def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True): log.debug("ExchangeManager.create_xs: %s", name) xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) self.xs_by_name[name] = xs if use_ems and self._ems_available(): log.debug("Using EMS to create_xs") # create a RR object xso = ResExchangeSpace(name=name) xso_id = self._ems_client.create_exchange_space(xso, self.org_id) log.debug("Created RR XS object, id: %s", xso_id) else: xs.declare() return xs def delete_xs(self, xs, use_ems=True): """ @type xs ExchangeSpace """ log.debug("ExchangeManager.delete_xs: %s", xs) name = xs._exchange # @TODO this feels wrong del self.xs_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xs") xso = self._get_xs_obj(name) self._ems_client.delete_exchange_space(xso._id) del self._xs_cache[name] else: xs.delete() def create_xp(self, name, xs=None, use_ems=True, **kwargs): log.debug("ExchangeManager.create_xp: %s", name) xs = xs or self.default_xs xp = ExchangePoint(self, name, xs, **kwargs) # put in xn_by_name anyway self.xn_by_name[name] = xp if use_ems and self._ems_available(): log.debug("Using EMS to create_xp") # create an RR object xpo = ResExchangePoint(name=name, topology_type=xp._xptype) xpo_id = self._ems_client.create_exchange_point( xpo, self._get_xs_obj( xs._exchange)._id) # @TODO: _exchange is wrong else: xp.declare() return xp def delete_xp(self, xp, use_ems=True): log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname()) name = xp._exchange # @TODO: not right del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xp") # find the XP object via RR xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True) if not (len(xpo_ids) and len(xpo_ids[0]) == 1): log.warn("Could not find XP in RR with name of %s", name) xpo_id = xpo_ids[0][0] self._ems_client.delete_exchange_point(xpo_id) else: xp.delete() def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs): xs = xs or self.default_xs log.debug( "ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs) if xn_type == "service": xn = ExchangeNameService(self, name, xs, **kwargs) elif xn_type == "process": xn = ExchangeNameProcess(self, name, xs, **kwargs) elif xn_type == "queue": xn = ExchangeNameQueue(self, name, xs, **kwargs) else: raise StandardError("Unknown XN type: %s" % xn_type) self.xn_by_name[name] = xn if use_ems and self._ems_available(): log.debug("Using EMS to create_xn") xno = ResExchangeName(name=name, xn_type=xn.xn_type) self._ems_client.declare_exchange_name( xno, self._get_xs_obj(xs._exchange)._id) # @TODO: exchange is wrong else: xn.declare() return xn def create_xn_service(self, name, xs=None, **kwargs): return self._create_xn('service', name, xs=xs, **kwargs) def create_xn_process(self, name, xs=None, **kwargs): return self._create_xn('process', name, xs=xs, **kwargs) def create_xn_queue(self, name, xs=None, **kwargs): return self._create_xn('queue', name, xs=xs, **kwargs) def delete_xn(self, xn, use_ems=False): log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname()) name = xn._queue # @TODO feels wrong del self.xn_by_name[name] if use_ems and self._ems_available(): log.debug("Using EMS to delete_xn") # find the XN object via RR? xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True) if not (len(xno_ids) and len(xno_ids[0]) == 1): log.warn("Could not find XN in RR with name of %s", name) xno_id = xno_ids[0][0] self._ems_client.undeclare_exchange_name( xno_id) # "canonical name" currently understood to be RR id else: xn.delete() def _ensure_default_declared(self): """ Ensures we declared the default exchange space. Needed by most exchange object calls, so each one calls here. """ if not self._default_xs_declared: log.debug( "ExchangeManager._ensure_default_declared, declaring default xs" ) self._default_xs_declared = True self.default_xs.declare() # transport implementations - XOTransport objects call here def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True): log.info("ExchangeManager.declare_exchange") self._ensure_default_declared() self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete) def delete_exchange(self, exchange, **kwargs): log.info("ExchangeManager.delete_exchange") self._ensure_default_declared() self._transport.delete_exchange_impl(self._client, exchange, **kwargs) def declare_queue(self, queue, durable=False, auto_delete=False): log.info("ExchangeManager.declare_queue (queue %s, durable %s, AD %s)", queue, durable, auto_delete) self._ensure_default_declared() return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete) def delete_queue(self, queue, **kwargs): log.info("ExchangeManager.delete_queue") self._ensure_default_declared() self._transport.delete_queue_impl(self._client, queue, **kwargs) def bind(self, exchange, queue, binding): log.info("ExchangeManager.bind") self._ensure_default_declared() self._transport.bind_impl(self._client, exchange, queue, binding) def unbind(self, exchange, queue, binding): log.info("ExchangeManager.unbind") self._ensure_default_declared() self._transport.unbind_impl(self._client, exchange, queue, binding) def get_stats(self, queue): log.info("ExchangeManager.get_stats") self._ensure_default_declared() return self._transport.get_stats(self._client, queue) def purge(self, queue): log.info("ExchangeManager.purge") self._ensure_default_declared() self._transport.purge(self._client, queue)
class GovernanceController(object): def __init__(self, container): log.debug('GovernanceController.__init__()') self.container = container self.enabled = False self.interceptor_by_name_dict = dict() self.interceptor_order = [] self.policy_decision_point_manager = None self.governance_dispatcher = None def start(self): log.debug("GovernanceController starting ...") config = CFG.interceptor.interceptors.governance.config if config is None: config['enabled'] = False if "enabled" in config: self.enabled = config["enabled"] log.debug("GovernanceInterceptor enabled: %s" % str(self.enabled)) self.event_subscriber = None if self.enabled: self.initialize_from_config(config) self.event_subscriber = EventSubscriber( event_type="ResourceModifiedEvent", origin_type="Policy", callback=self.policy_event_callback) self.event_subscriber.activate() self.rr_client = ResourceRegistryServiceProcessClient( node=self.container.node, process=self.container) self.policy_client = PolicyManagementServiceProcessClient( node=self.container.node, process=self.container) def initialize_from_config(self, config): self.governance_dispatcher = GovernanceDispatcher() self.policy_decision_point_manager = PolicyDecisionPointManager() if 'interceptor_order' in config: self.interceptor_order = config['interceptor_order'] if 'governance_interceptors' in config: gov_ints = config['governance_interceptors'] for name in gov_ints: interceptor_def = gov_ints[name] # Instantiate and put in by_name array parts = interceptor_def["class"].split('.') modpath = ".".join(parts[:-1]) classname = parts[-1] module = __import__(modpath, fromlist=[classname]) classobj = getattr(module, classname) classinst = classobj() # Put in by_name_dict for possible re-use self.interceptor_by_name_dict[name] = classinst def stop(self): log.debug("GovernanceController stopping ...") if self.event_subscriber is not None: self.event_subscriber.deactivate() def process_incoming_message(self, invocation): self.process_message(invocation, self.interceptor_order, 'incoming') return self.governance_dispatcher.handle_incoming_message(invocation) def process_outgoing_message(self, invocation): self.process_message(invocation, reversed(self.interceptor_order), 'outgoing') return self.governance_dispatcher.handle_outgoing_message(invocation) def process_message(self, invocation, interceptor_list, method): for int_name in interceptor_list: class_inst = self.interceptor_by_name_dict[int_name] getattr(class_inst, method)(invocation) return invocation def policy_event_callback(self, *args, **kwargs): policy_event = args[0] log.debug("Policy modified: %s" % policy_event.origin) self.trigger_policy_update(policy_event.origin) #This is a function which allows for a manual update of the policies as well. def trigger_policy_update(self, policy_id): try: #TODO - Find a better way to work with org_id - use ION Org for now. ion_org, _ = self.rr_client.find_resources( restype=RT.Org, name=CFG.system.root_org) resource_list, _ = self.rr_client.find_subjects( "", PRED.hasPolicy, policy_id) for res in resource_list: #TODO - may figure out a better way to get the name of the Resource Type - or maybe this is ok resource_type = res.__class__.__name__ #log.debug("Resource Type: %s" % resource_type) if resource_type == 'ServiceDefinition': policy_rules = self.policy_client.get_active_service_policy_rules( ion_org[0]._id, res.name) self.update_resource_policy(res.name, policy_rules) elif resource_type == 'Org': self.update_all_resource_policy(res._id) else: policy_rules = self.policy_client.get_active_resource_policy_rules( res._id) self.update_resource_policy(res._id, policy_rules) except Exception, e: log.error(e.message)
class TestGovernanceInt(IonIntegrationTestCase): def setUp(self): # Start container self._start_container() #Load a deploy file self.container.start_rel_from_url('res/deploy/r2deploy.yml') #Instantiate a process to represent the test process=GovernanceTestProcess() #Load system policies after container has started all of the services LoadSystemPolicy.op_load_system_policies(process) self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=process) self.id_client = IdentityManagementServiceProcessClient(node=self.container.node, process=process) self.pol_client = PolicyManagementServiceProcessClient(node=self.container.node, process=process) self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=process) self.ims_client = InstrumentManagementServiceProcessClient(node=self.container.node, process=process) self.ems_client = ExchangeManagementServiceProcessClient(node=self.container.node, process=process) self.ion_org = self.org_client.find_org() self.system_actor = self.id_client.find_actor_identity_by_name(name=CFG.system.system_actor) log.debug('system actor:' + self.system_actor._id) sa_header_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(self.system_actor._id)) self.sa_user_header = {'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles } @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_basic_policy(self): #Make sure that the system policies have been loaded policy_list,_ = self.rr_client.find_resources(restype=RT.Policy) self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry") #Attempt to access an operation in service which does not have specific policies set es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' ) with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message) #Add a new policy to allow the the above service call. policy_obj = IonObject(RT.Policy, name='Exchange_Management_Test_Policy', definition_type="Service", rule=TEST_POLICY_TEXT, description='Allow specific operations in the Exchange Management Service for anonymous user') test_policy_id = self.pol_client.create_policy(policy_obj, headers=self.sa_user_header) self.pol_client.add_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header) log.info('Policy created: ' + policy_obj.name) gevent.sleep(2) # Wait for events to be fired and policy updated #The previous attempt at this operations should now be allowed. es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' ) with self.assertRaises(BadRequest) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'Arguments not set',cm.exception.message) #disable the test policy to try again self.pol_client.disable_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The same request that previously was allowed should not be denied es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' ) with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message) #now enable the test policy to try again self.pol_client.enable_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The previous attempt at this operations should now be allowed. es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' ) with self.assertRaises(BadRequest) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'Arguments not set',cm.exception.message) self.pol_client.remove_service_policy('exchange_management', test_policy_id, headers=self.sa_user_header) self.pol_client.delete_policy(test_policy_id, headers=self.sa_user_header) gevent.sleep(2) # Wait for events to be fired and policy updated #The same request that previously was allowed should not be denied es_obj = IonObject(RT.ExchangeSpace, description= 'ION test XS', name='ioncore2' ) with self.assertRaises(Unauthorized) as cm: self.ems_client.create_exchange_space(es_obj) self.assertIn( 'exchange_management(create_exchange_space) has been denied',cm.exception.message) @attr('LOCOINT') @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI') def test_org_policy(self): #Make sure that the system policies have been loaded policy_list,_ = self.rr_client.find_resources(restype=RT.Policy) self.assertNotEqual(len(policy_list),0,"The system policies have not been loaded into the Resource Registry") with self.assertRaises(BadRequest) as cm: myorg = self.org_client.read_org() self.assertTrue(cm.exception.message == 'The org_id parameter is missing') user_id, valid_until, registered = self.id_client.signon(USER1_CERTIFICATE, True) log.debug( "user id=" + user_id) user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id)) user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles } #Attempt to enroll a user anonymously - should not be allowed with self.assertRaises(Unauthorized) as cm: self.org_client.enroll_member(self.ion_org._id,user_id) self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message) #Attempt to let a user enroll themselves - should not be allowed with self.assertRaises(Unauthorized) as cm: self.org_client.enroll_member(self.ion_org._id,user_id, headers=user_header) self.assertIn( 'org_management(enroll_member) has been denied',cm.exception.message) #Attept to enroll the user in the ION Root org as a manager - should not be allowed since #registration with the system implies membership in the ROOT Org. with self.assertRaises(BadRequest) as cm: self.org_client.enroll_member(self.ion_org._id,user_id, headers=self.sa_user_header) self.assertTrue(cm.exception.message == 'A request to enroll in the root ION Org is not allowed') with self.assertRaises(Unauthorized) as cm: users = self.org_client.find_enrolled_users(self.ion_org._id) self.assertIn('org_management(find_enrolled_users) has been denied',cm.exception.message) with self.assertRaises(Unauthorized) as cm: users = self.org_client.find_enrolled_users(self.ion_org._id, headers=user_header) self.assertIn( 'org_management(find_enrolled_users) has been denied',cm.exception.message) users = self.org_client.find_enrolled_users(self.ion_org._id, headers=self.sa_user_header) self.assertEqual(len(users),2) ## test_org_roles and policies roles = self.org_client.find_org_roles(self.ion_org._id) self.assertEqual(len(roles),3) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER]) roles = self.org_client.find_roles_by_user(self.ion_org._id, self.system_actor._id, headers=self.sa_user_header) self.assertEqual(len(roles),3) self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER]) roles = self.org_client.find_roles_by_user(self.ion_org._id, user_id, headers=self.sa_user_header) self.assertEqual(len(roles),1) self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE]) with self.assertRaises(NotFound) as nf: org2 = self.org_client.find_org(ORG2) self.assertIn('The Org with name Org2 does not exist',nf.exception.message) org2 = IonObject(RT.Org, name=ORG2, description='A second Org') org2_id = self.org_client.create_org(org2, headers=self.sa_user_header) org2 = self.org_client.find_org(ORG2) self.assertEqual(org2_id, org2._id) roles = self.org_client.find_org_roles(org2_id) self.assertEqual(len(roles),2) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE]) operator_role = IonObject(RT.UserRole, name=INSTRUMENT_OPERATOR,label='Instrument Operator', description='Instrument Operator') #First try to add the user role anonymously with self.assertRaises(Unauthorized) as cm: self.org_client.add_user_role(org2_id, operator_role) self.assertIn('org_management(add_user_role) has been denied',cm.exception.message) self.org_client.add_user_role(org2_id, operator_role, headers=self.sa_user_header) roles = self.org_client.find_org_roles(org2_id) self.assertEqual(len(roles),3) self.assertItemsEqual([r.name for r in roles], [MANAGER_ROLE, MEMBER_ROLE, INSTRUMENT_OPERATOR]) # test requests for enrollments and roles. #First try to find user requests anonymously with self.assertRaises(Unauthorized) as cm: requests = self.org_client.find_requests(org2_id) self.assertIn('org_management(find_requests) has been denied',cm.exception.message) #Next try to find user requests as as a basic member with self.assertRaises(Unauthorized) as cm: requests = self.org_client.find_requests(org2_id, headers=user_header) self.assertIn('org_management(find_requests) has been denied',cm.exception.message) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),0) # First try to request a role without being a member with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header ) self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',cm.exception.message) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),0) req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header ) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),1) requests = self.org_client.find_user_requests(user_id, org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),1) #User tried requesting enrollment again - this should fail with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header ) self.assertIn('A precondition for this request has not been satisfied: enroll_req_not_exist(org_id,user_id)',cm.exception.message) #Manager denies the request self.org_client.deny_request(org2_id,req_id,'To test the deny process', headers=self.sa_user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),1) self.assertEqual(requests[0].status, REQUEST_DENIED) #Manager approves request self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header) users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header) self.assertEqual(len(users),0) #User Accepts request self.org_client.accept_request(org2_id,req_id, headers=user_header) users = self.org_client.find_enrolled_users(org2_id, headers=self.sa_user_header) self.assertEqual(len(users),1) #User tried requesting enrollment again - this should fail with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_enroll(org2_id, user_id, headers=user_header ) self.assertIn('A precondition for this request has not been satisfied: is_not_enrolled(org_id,user_id)',cm.exception.message) req_id = self.org_client.request_role(org2_id, user_id, INSTRUMENT_OPERATOR, headers=user_header ) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),2) requests = self.org_client.find_requests(org2_id,request_status='Open', headers=self.sa_user_header) self.assertEqual(len(requests),1) requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header) self.assertEqual(len(requests),2) requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.RoleRequest, headers=user_header) self.assertEqual(len(requests),1) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests),1) ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent) self.assertEqual(len(ia_list),0) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent') with self.assertRaises(Unauthorized) as cm: self.ims_client.create_instrument_agent(ia_obj) self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message) with self.assertRaises(Unauthorized) as cm: self.ims_client.create_instrument_agent(ia_obj, headers=user_header) self.assertIn('instrument_management(create_instrument_agent) has been denied',cm.exception.message) #Manager approves request self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests),0) #User accepts request self.org_client.accept_request(org2_id, req_id, headers=user_header) #Refresh headers with new role user_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(user_id)) user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles } self.ims_client.create_instrument_agent(ia_obj, headers=user_header) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent') self.ims_client.create_instrument_agent(ia_obj, headers=user_header) ia_list,_ = self.rr_client.find_resources(restype=RT.InstrumentAgent) self.assertEqual(len(ia_list),2) #First make a acquire resource request with an non-enrolled user. with self.assertRaises(BadRequest) as cm: req_id = self.org_client.request_acquire_resource(org2_id,self.system_actor._id,ia_list[0]._id , headers=self.sa_user_header) self.assertIn('A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',cm.exception.message) req_id = self.org_client.request_acquire_resource(org2_id,user_id,ia_list[0]._id , headers=user_header) requests = self.org_client.find_requests(org2_id, headers=self.sa_user_header) self.assertEqual(len(requests),3) requests = self.org_client.find_user_requests(user_id, org2_id, headers=user_header) self.assertEqual(len(requests),3) requests = self.org_client.find_user_requests(user_id, org2_id, request_type=RT.ResourceRequest, headers=user_header) self.assertEqual(len(requests),1) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests),1) self.assertEqual(requests[0]._id, req_id) #Manager approves Instrument request self.org_client.approve_request(org2_id,req_id, headers=self.sa_user_header) requests = self.org_client.find_user_requests(user_id, org2_id, request_status="Open", headers=user_header) self.assertEqual(len(requests),0) #User accepts request self.org_client.accept_request(org2_id,req_id, headers=user_header) #Check commitments commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments),1) commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments),1) #Release the resource self.org_client.release_resource(org2_id,user_id ,ia_list[0]._id, headers=self.sa_user_header,timeout=15) #TODO - Refactor release_resource #Check commitments commitments, _ = self.rr_client.find_objects(ia_list[0]._id,PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments),0) commitments, _ = self.rr_client.find_objects(user_id,PRED.hasCommitment, RT.ResourceCommitment) self.assertEqual(len(commitments),0)
ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent') ims_client.create_instrument_agent(ia_obj, headers={ 'ion-actor-id': user._id, 'ion-actor-roles': header_roles }) log.info('Instrument Agents') rr_client = ResourceRegistryServiceProcessClient(node=container.node, process=process) ia_list, _ = rr_client.find_resources(restype=RT.InstrumentAgent) for ia_obj in ia_list: log.info(str(ia_obj)) def test_requests(container, process=FakeProcess()): org_client = OrgManagementServiceProcessClient(node=container.node, process=process) ion_org = org_client.find_org() id_client = IdentityManagementServiceProcessClient(node=container.node, process=process) rr_client = ResourceRegistryServiceProcessClient(node=container.node, process=process)
org_client.grant_role(ion_org._id, user._id, 'INSTRUMENT_OPERATOR', headers={'ion-actor-id': system_actor._id, 'ion-actor-roles': header_roles }) header_roles = get_role_message_headers(org_client.find_all_roles_by_user(user._id)) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent1', description='The first Instrument Agent') ims_client.create_instrument_agent(ia_obj, headers={'ion-actor-id': user._id, 'ion-actor-roles': header_roles }) ia_obj = IonObject(RT.InstrumentAgent, name='Instrument Agent2', description='The second Instrument Agent') ims_client.create_instrument_agent(ia_obj, headers={'ion-actor-id': user._id, 'ion-actor-roles': header_roles }) log.info( 'Instrument Agents') rr_client = ResourceRegistryServiceProcessClient(node=container.node, process=process) ia_list,_ = rr_client.find_resources(restype=RT.InstrumentAgent) for ia_obj in ia_list: log.info( str(ia_obj)) def test_requests(container, process=FakeProcess()): org_client = OrgManagementServiceProcessClient(node=container.node, process=process) ion_org = org_client.find_org() id_client = IdentityManagementServiceProcessClient(node=container.node, process=process) rr_client = ResourceRegistryServiceProcessClient(node=container.node, process=process) system_actor = id_client.find_actor_identity_by_name(name=CFG.system.system_actor) log.info('system actor:' + system_actor._id)