Exemplo n.º 1
0
    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [
            self.create_xs, self.create_xp, self.create_xn_service,
            self.create_xn_process, self.create_xn_queue
        ]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs = ExchangeSpace(self, ION_ROOT_XS)
        self._xs_cache = {}  # caching of xs names to RR objects
        self._default_xs_obj = None  # default XS registry object
        self.org_id = None

        # mappings
        self.xs_by_name = {
            ION_ROOT_XS: self.default_xs
        }  # friendly named XS to XSO
        self.xn_by_name = {}  # friendly named XN to XNO
        # xn by xs is a property

        self._chan = None

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client = ExchangeManagementServiceProcessClient(
            process=self.container)
        self._rr_client = ResourceRegistryServiceProcessClient(
            process=self.container)
Exemplo n.º 2
0
    def delete_indexes(self):

        ims_cli = IndexManagementServiceProcessClient(process=self)
        rr_cli  = ResourceRegistryServiceProcessClient(process=self)

        #--------------------------------------------------------------------------------
        # Standard Indexes
        #--------------------------------------------------------------------------------

        for index,resources in STD_INDEXES.iteritems():
            index_ids, _ = rr_cli.find_resources(name=index,restype=RT.ElasticSearchIndex, id_only=True)
            for index_id in index_ids:
                ims_cli.delete_index(index_id)

        #--------------------------------------------------------------------------------
        # CouchDB Indexes
        #--------------------------------------------------------------------------------
        
        for index,datastore in COUCHDB_INDEXES.iteritems():
            index_ids, _ = rr_cli.find_resources(name=index, restype=RT.CouchDBIndex, id_only=True)
            for index_id in index_ids:
                ims_cli.delete_index(index_id)

        #--------------------------------------------------------------------------------
        # Edge Indexes
        #--------------------------------------------------------------------------------

        index_ids, _ = rr_cli.find_resources(name='%s_resources_index' % self.sysname,restype=RT.ElasticSearchIndex, id_only=True)
        for index_id in index_ids:
            ims_cli.delete_index(index_id)

        index_ids, _ = rr_cli.find_resources(name='%s_events_index' % self.sysname, restype=RT.ElasticSearchIndex, id_only=True)
        for index_id in index_ids:
            ims_cli.delete_index(index_id)
    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()
    def on_start(self):
        super(VizTransformProcForMatplotlibGraphs, self).on_start()
        #assert len(self.streams)==1
        self.initDataFlag = True
        self.graph_data = {
        }  # Stores a dictionary of variables : [List of values]

        # Need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed to the transform process
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        #stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        self.stream_def = self.rr_cli.read(self.stream_def_id)

        # Start the thread responsible for keeping track of time and generating graphs
        # Mutex for ensuring proper concurrent communications between threads
        self.lock = RLock()
        self.rendering_proc = Greenlet(self.rendering_thread)
        self.rendering_proc.start()
Exemplo n.º 5
0
    def on_start(self):

        self.query = self.CFG.get_safe('process.query',{})

        self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
        self.datastore_name = self.CFG.get_safe('process.datastore_name','dm_datastore')

        definition_id = self.delivery_format.get('definition_id')
        rrsc = ResourceRegistryServiceProcessClient(process=self, node=self.container.node)
        definition = rrsc.read(definition_id)
        self.definition = definition.container

        self.fields = self.delivery_format.get('fields',None)

        self.view_name = self.CFG.get_safe('process.view_name','datasets/dataset_by_id')
        self.key_id = self.CFG.get_safe('process.key_id')
        self.stream_id = self.CFG.get_safe('process.publish_streams.output')

        if not self.stream_id:
            raise Inconsistent('The replay process requires a stream id. Invalid configuration!')

        self.data_stream_id = self.definition.data_stream_id
        self.encoding_id = self.definition.identifiables[self.data_stream_id].encoding_id
        self.element_type_id = self.definition.identifiables[self.data_stream_id].element_type_id
        self.element_count_id = self.definition.identifiables[self.data_stream_id].element_count_id
        self.data_record_id = self.definition.identifiables[self.element_type_id].data_record_id
        self.field_ids = self.definition.identifiables[self.data_record_id].field_ids
        self.domain_ids = self.definition.identifiables[self.data_record_id].domain_ids
        self.time_id = self.definition.identifiables[self.domain_ids[0]].temporal_coordinate_vector_id
Exemplo n.º 6
0
    def start(self):

        log.debug("GovernanceController starting ...")

        config = CFG.interceptor.interceptors.governance.config

        if config is None:
            config['enabled'] = False

        if "enabled" in config:
            self.enabled = config["enabled"]

        log.debug("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.event_subscriber = None

        if self.enabled:
            self.initialize_from_config(config)

            self.event_subscriber = EventSubscriber(
                event_type="ResourceModifiedEvent",
                origin_type="Policy",
                callback=self.policy_event_callback)
            self.event_subscriber.activate()

            self.rr_client = ResourceRegistryServiceProcessClient(
                node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(
                node=self.container.node, process=self.container)
def create_attachment():

    try:
        resource_id = str(request.form.get('resource_id', ''))
        fil = request.files['file']
        content = fil.read()

        # build attachment
        attachment = Attachment(
            name=str(request.form['attachment_name']),
            description=str(request.form['attachment_description']),
            attachment_type=int(request.form['attachment_type']),
            content_type=str(request.form['attachment_content_type']),
            content=content)

        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.create_attachment(resource_id=resource_id,
                                          attachment=attachment)

        ret_obj = {'attachment_id': ret}

        return json_response(ret_obj)

    except Exception, e:
        log.exception("Error creating attachment")
        return build_error_response(e)
Exemplo n.º 8
0
    def on_start(self):

        rr_cli = ResourceRegistryServiceProcessClient(process=self, node=self.container.node)
        pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node)

        # Get the stream(s)
        data_product_id = self.CFG.get_safe('dispatcher.data_product_id','')

        stream_ids,_ = rr_cli.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)

        log.info('Got Stream Ids: "%s"', stream_ids)
        assert stream_ids, 'No streams found for this data product!'


        exchange_name = 'dispatcher_%s' % str(os.getpid())
        subscription_id = pubsub_cli.create_subscription(
                name='SampleSubscription', 
                exchange_name=exchange_name,
                stream_ids=stream_ids,
                description='Sample Subscription Description'
                )


        stream_defs = {}

        def message_received(message, stream_route, stream_id):
            granule = message

            stream_id = granule.stream_resource_id

            data_stream_id = granule.data_stream_id
            data_stream = granule.identifiables[data_stream_id]

            tstamp = get_datetime(data_stream.timestamp.value)

            records = granule.identifiables['record_count'].value
            

            log.info('Received a message from stream %s with time stamp %s and %d records' % (stream_id, tstamp, records))


            if stream_id not in stream_defs:
                stream_defs[stream_id] = pubsub_cli.find_stream_definition(stream_id, id_only=False).container
            stream_def = stream_defs.get(stream_id)

            sp = PointSupplementStreamParser(stream_definition=stream_def, stream_granule=granule)

            last_data = {}
            for field in sp.list_field_names():
                last_data[field] = sp.get_values(field)[-1]

            log.info('Last values in the message: %s' % str(last_data))


        subscriber = StreamSubscriber(process=self, exchange_name=exchange_name, callback=message_received)
        subscriber.start()

        pubsub_cli.activate_subscription(subscription_id)
Exemplo n.º 9
0
def delete_attachment(attachment_id):
    try:
        rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
        ret = rr_client.delete_attachment(attachment_id)
        return gateway_json_response(ret)

    except Exception as e:
        log.exception("Error deleting attachment")
        return build_error_response(e)
Exemplo n.º 10
0
def delete_attachment(attachment_id):
    try:
        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.delete_attachment(attachment_id)
        return gateway_json_response(ret)

    except Exception, e:
        log.exception("Error deleting attachment")
        return build_error_response(e)
Exemplo n.º 11
0
 def on_initial_bootstrap(self, process, config, **kwargs):
     self.pds_client = ProcessDispatcherServiceProcessClient(
         process=process)
     self.resource_registry = ResourceRegistryServiceProcessClient(
         process=process)
     self.ingestion_worker(process, config)
     self.replay_defs(process, config)
     self.notification_worker(process, config)
     self.registration_worker(process, config)
     self.pydap_server(process, config)
Exemplo n.º 12
0
def get_attachment(attachment_id):

    try:
        # Create client to interface with the viz service
        rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
        attachment = rr_client.read_attachment(attachment_id, include_content=True)

        return service_gateway_app.response_class(attachment.content, mimetype=attachment.content_type)

    except Exception as e:
        return build_error_response(e)
def get_attachment(attachment_id):

    try:
        # Create client to interface with the viz service
        rr_client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
        attachment = rr_client.read_attachment(attachment_id)

        return app.response_class(attachment.content,mimetype=attachment.content_type)


    except Exception, e:
        return build_error_response(e)
Exemplo n.º 14
0
def list_resources_by_type(resource_type):

    result = None

    client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
    try:
        #Resource Types are not in unicode
        res_list,_ = client.find_resources(restype=convert_unicode(resource_type) )

        return json_response({ GATEWAY_RESPONSE :res_list } )

    except Exception, e:
        return build_error_response(e)
Exemplo n.º 15
0
    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
def get_resource(resource_id):

    ret = None
    client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
    if resource_id != '':
        try:
            result = client.read(resource_id)
            if not result:
                raise NotFound("No resource found for id: %s " % resource_id)

            ret = simplejson.dumps(result, default=ion_object_encoder)

        except Exception, e:
            ret =  "Error: %s" % e
Exemplo n.º 17
0
def get_attachment(attachment_id):

    try:
        # Create client to interface with the viz service
        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        attachment = rr_client.read_attachment(attachment_id,
                                               include_content=True)

        return service_gateway_app.response_class(
            attachment.content, mimetype=attachment.content_type)

    except Exception, e:
        return build_error_response(e)
def list_resources_by_type(resource_type):

    ret = None

    client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
    try:
        res_list,_ = client.find_resources(restype=resource_type )
        result = []
        for res in res_list:
            result.append(res)

        ret = simplejson.dumps(result, default=ion_object_encoder)
    except Exception, e:
        ret =  "Error: %s" % e
def find_resources_by_type(resource_type):
    try:
        client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)

        #Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request()
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)

        #Resource Types are not in unicode
        res_list,_ = client.find_resources(restype=convert_unicode(resource_type) )

        return gateway_json_response(res_list)

    except Exception, e:
        return build_error_response(e)
Exemplo n.º 20
0
def get_resource(resource_id):

    result = None
    client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
    if resource_id != '':
        try:
            #Database object IDs are not unicode
            result = client.read(convert_unicode(resource_id))
            if not result:
                raise NotFound("No resource found for id: %s " % resource_id)

            return json_response({ GATEWAY_RESPONSE :result } )

        except Exception, e:
            return build_error_response(e)
Exemplo n.º 21
0
    def on_start(self):
        super(VizTransformProcForGoogleDT,self).on_start()
        self.initDataTableFlag = True

        # need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)

        # extract the various parameters passed
        self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        stream_def_resource = self.rr_cli.read(self.stream_def_id)
        self.stream_def = stream_def_resource.container
        self.realtime_flag = False
        if self.CFG.get("realtime_flag") == "True":
            self.realtime_flag = True
        else:
            self.data_product_id_token = self.CFG.get('data_product_id_token')


        # extract the stream_id associated with the DP. Needed later
        stream_ids,_ = self.rr_cli.find_objects(self.data_product_id, PRED.hasStream, None, True)
        self.stream_id = stream_ids[0]

        self.dataDescription = []
        self.dataTableContent = []
        self.varTuple = []
        self.total_num_of_records_recvd = 0
Exemplo n.º 22
0
    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [self.create_xs,
                              self.create_xp,
                              self.create_xn_service,
                              self.create_xn_process,
                              self.create_xn_queue]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs         = ExchangeSpace(self, ION_ROOT_XS)
        self._xs_cache          = {}        # caching of xs names to RR objects
        self._default_xs_obj    = None      # default XS registry object
        self.org_id             = None

        # mappings
        self.xs_by_name = { ION_ROOT_XS: self.default_xs }      # friendly named XS to XSO
        self.xn_by_name = {}                                    # friendly named XN to XNO
        # xn by xs is a property

        self._chan = None

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client    = ExchangeManagementServiceProcessClient(process=self.container)
        self._rr_client     = ResourceRegistryServiceProcessClient(process=self.container)
Exemplo n.º 23
0
    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process = self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()
Exemplo n.º 24
0
    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file that also loads basic policy.
        self.container.start_rel_from_url('res/deploy/r2gov.yml')

        process=FakeProcess()

        self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()


        self.system_actor = self.id_client.find_actor_identity_by_name(name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)


        sa_header_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles }
Exemplo n.º 25
0
    def on_start(self):
        super(VizTransformProcForMatplotlibGraphs,self).on_start()
        #assert len(self.streams)==1
        self.initDataFlag = True
        self.graph_data = {} # Stores a dictionary of variables : [List of values]

        # Need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(process = self, node = self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(node=self.container.node)

        # extract the various parameters passed to the transform process
        self.out_stream_id = self.CFG.get('process').get('publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        #stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
        out_stream_pub_registrar = StreamPublisherRegistrar(process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        self.stream_def = self.rr_cli.read(self.stream_def_id)

        # Start the thread responsible for keeping track of time and generating graphs
        # Mutex for ensuring proper concurrent communications between threads
        self.lock = RLock()
        self.rendering_proc = Greenlet(self.rendering_thread)
        self.rendering_proc.start()
 def on_initial_bootstrap(self, process, config, **kwargs):
     self.pds_client = ProcessDispatcherServiceProcessClient(process=process)
     self.resource_registry = ResourceRegistryServiceProcessClient(process=process)
     self.ingestion_worker(process,config)
     self.replay_defs(process,config)
     self.notification_worker(process,config)
     self.registration_worker(process,config)
Exemplo n.º 27
0
def find_resources_by_type(resource_type):
    try:
        client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)

        #Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request()
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)

        #Resource Types are not in unicode
        res_list, _ = client.find_resources(restype=str(resource_type))

        return gateway_json_response(res_list)

    except Exception, e:
        return build_error_response(e)
Exemplo n.º 28
0
def resolve_org_negotiation():
    try:
        payload              = request.form['payload']
        json_params          = json_loads(str(payload))

        ion_actor_id, expiry = get_governance_info_from_request('serviceRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        headers              = build_message_headers(ion_actor_id, expiry)

        # extract negotiation-specific data (convert from unicode just in case - these are machine generated and unicode specific
        # chars are unexpected)
        verb                 = str(json_params['verb'])
        originator           = str(json_params['originator'])
        negotiation_id       = str(json_params['negotiation_id'])
        reason               = str(json_params.get('reason', ''))

        proposal_status = None
        if verb.lower() == "accept":
            proposal_status = ProposalStatusEnum.ACCEPTED
        elif verb.lower() == "reject":
            proposal_status = ProposalStatusEnum.REJECTED

        proposal_originator = None
        if originator.lower() == "consumer":
            proposal_originator = ProposalOriginatorEnum.CONSUMER
        elif originator.lower() == "provider":
            proposal_originator = ProposalOriginatorEnum.PROVIDER

        rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
        negotiation = rr_client.read(negotiation_id, headers=headers)

        new_negotiation_sap = Negotiation.create_counter_proposal(negotiation, proposal_status, proposal_originator)

        org_client = OrgManagementServiceProcessClient(process=service_gateway_instance)
        resp = org_client.negotiate(new_negotiation_sap, headers=headers)

        # update reason if it exists
        if reason:
            # reload negotiation because it has changed
            negotiation = rr_client.read(negotiation_id, headers=headers)
            negotiation.reason = reason
            rr_client.update(negotiation)

        return gateway_json_response(resp)

    except Exception as e:
        return build_error_response(e)
Exemplo n.º 29
0
def create_attachment():

    try:
        payload = request.form['payload']
        json_params = simplejson.loads(str(payload))

        ion_actor_id, expiry = get_governance_info_from_request(
            'serviceRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        headers = build_message_headers(ion_actor_id, expiry)

        data_params = json_params['serviceRequest']['params']
        resource_id = str(data_params.get('resource_id', ''))
        fil = request.files['file']
        content = fil.read()

        keywords = []
        keywords_str = data_params.get('keywords', '')
        if keywords_str.strip():
            keywords = [str(x.strip()) for x in keywords_str.split(',')]

        created_by = data_params.get('attachment_created_by', 'unknown user')
        modified_by = data_params.get('attachment_modified_by', 'unknown user')

        # build attachment
        attachment = Attachment(
            name=str(data_params['attachment_name']),
            description=str(data_params['attachment_description']),
            attachment_type=int(data_params['attachment_type']),
            content_type=str(data_params['attachment_content_type']),
            keywords=keywords,
            created_by=created_by,
            modified_by=modified_by,
            content=content)

        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.create_attachment(resource_id=resource_id,
                                          attachment=attachment,
                                          headers=headers)

        return gateway_json_response(ret)

    except Exception, e:
        log.exception("Error creating attachment")
        return build_error_response(e)
def get_resource(resource_id):
    try:
        client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)

        # Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request()
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)

        # Database object IDs are not unicode
        result = client.read(convert_unicode(resource_id))
        if not result:
            raise NotFound("No resource found for id: %s " % resource_id)

        return gateway_json_response(result)

    except Exception, e:
        return build_error_response(e)
Exemplo n.º 31
0
    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process = GovernanceTestProcess()

        #Load system policies after container has started all of the services
        LoadSystemPolicy.op_load_system_policies(process)

        self.rr_client = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.pol_client = PolicyManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ems_client = ExchangeManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()

        self.system_actor = self.id_client.find_actor_identity_by_name(
            name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)

        sa_header_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {
            'ion-actor-id': self.system_actor._id,
            'ion-actor-roles': sa_header_roles
        }
Exemplo n.º 32
0
def get_resource(resource_id):

    try:
        client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)

        #Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request()
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)

        #Database object IDs are not unicode
        result = client.read(str(resource_id))
        if not result:
            raise NotFound("No resource found for id: %s " % resource_id)

        return gateway_json_response(result)

    except Exception, e:
        return build_error_response(e)
def create_attachment():

    try:
        payload = request.form["payload"]
        json_params = simplejson.loads(str(payload))

        ion_actor_id, expiry = get_governance_info_from_request("serviceRequest", json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        headers = build_message_headers(ion_actor_id, expiry)

        data_params = json_params["serviceRequest"]["params"]
        resource_id = str(data_params.get("resource_id", ""))
        fil = request.files["file"]
        content = fil.read()

        keywords = []
        keywords_str = data_params.get("keywords", "")
        if keywords_str.strip():
            keywords = [str(x.strip()) for x in keywords_str.split(",")]

        created_by = data_params.get("attachment_created_by", "unknown user")
        modified_by = data_params.get("attachment_modified_by", "unknown user")

        # build attachment
        attachment = Attachment(
            name=str(data_params["attachment_name"]),
            description=str(data_params["attachment_description"]),
            attachment_type=int(data_params["attachment_type"]),
            content_type=str(data_params["attachment_content_type"]),
            keywords=keywords,
            created_by=created_by,
            modified_by=modified_by,
            content=content,
        )

        rr_client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.create_attachment(resource_id=resource_id, attachment=attachment, headers=headers)

        return gateway_json_response(ret)

    except Exception, e:
        log.exception("Error creating attachment")
        return build_error_response(e)
Exemplo n.º 34
0
    def on_start(self):

        self.query = self.CFG.get_safe('process.query', {})

        self.delivery_format = self.CFG.get_safe('process.delivery_format', {})
        self.datastore_name = self.CFG.get_safe('process.datastore_name',
                                                'dm_datastore')

        definition_id = self.delivery_format.get('definition_id')
        rrsc = ResourceRegistryServiceProcessClient(process=self,
                                                    node=self.container.node)
        definition = rrsc.read(definition_id)
        self.definition = definition.container

        self.fields = self.delivery_format.get('fields', None)

        self.view_name = self.CFG.get_safe('process.view_name',
                                           'datasets/dataset_by_id')
        self.key_id = self.CFG.get_safe('process.key_id')
        self.stream_id = self.CFG.get_safe('process.publish_streams.output')

        if not self.stream_id:
            raise Inconsistent(
                'The replay process requires a stream id. Invalid configuration!'
            )

        self.data_stream_id = self.definition.data_stream_id
        self.encoding_id = self.definition.identifiables[
            self.data_stream_id].encoding_id
        self.element_type_id = self.definition.identifiables[
            self.data_stream_id].element_type_id
        self.element_count_id = self.definition.identifiables[
            self.data_stream_id].element_count_id
        self.data_record_id = self.definition.identifiables[
            self.element_type_id].data_record_id
        self.field_ids = self.definition.identifiables[
            self.data_record_id].field_ids
        self.domain_ids = self.definition.identifiables[
            self.data_record_id].domain_ids
        self.time_id = self.definition.identifiables[
            self.domain_ids[0]].temporal_coordinate_vector_id
def create_attachment():

    try:
        payload              = request.form['payload']
        json_params          = json_loads(str(payload))

        ion_actor_id, expiry = get_governance_info_from_request('serviceRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        headers              = build_message_headers(ion_actor_id, expiry)

        data_params          = json_params['serviceRequest']['params']
        resource_id          = str(data_params.get('resource_id', ''))
        fil                  = request.files['file']
        content              = fil.read()

        keywords             = []
        keywords_str         = data_params.get('keywords', '')
        if keywords_str.strip():
            keywords = [str(x.strip()) for x in keywords_str.split(',')]

        created_by           = data_params.get('attachment_created_by', 'unknown user')
        modified_by          = data_params.get('attachment_modified_by', 'unknown user')

        # build attachment
        attachment           = Attachment(name=str(data_params['attachment_name']),
                                          description=str(data_params['attachment_description']),
                                          attachment_type=int(data_params['attachment_type']),
                                          content_type=str(data_params['attachment_content_type']),
                                          keywords=keywords,
                                          created_by=created_by,
                                          modified_by=modified_by,
                                          content=content)

        rr_client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.create_attachment(resource_id=resource_id, attachment=attachment, headers=headers)

        return gateway_json_response(ret)

    except Exception, e:
        log.exception("Error creating attachment")
        return build_error_response(e)
Exemplo n.º 36
0
    def on_start(self):
        super(VizTransformProcForGoogleDT, self).on_start()
        self.initDataTableFlag = True

        # need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        stream_def_resource = self.rr_cli.read(self.stream_def_id)
        self.stream_def = stream_def_resource.container
        self.realtime_flag = False
        if self.CFG.get("realtime_flag") == "True":
            self.realtime_flag = True
        else:
            self.data_product_id_token = self.CFG.get('data_product_id_token')

        # extract the stream_id associated with the DP. Needed later
        stream_ids, _ = self.rr_cli.find_objects(self.data_product_id,
                                                 PRED.hasStream, None, True)
        self.stream_id = stream_ids[0]

        self.dataDescription = []
        self.dataTableContent = []
        self.varTuple = []
        self.total_num_of_records_recvd = 0
Exemplo n.º 37
0
    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
Exemplo n.º 38
0
    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(
            process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(
            process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process=self)

        self.stream_info = self.CFG.get_safe('process.publish_streams', {})
        self.stream_names = self.stream_info.keys()
        self.stream_ids = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods = self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent",
                                  callback=self.interval_timer_callback,
                                  origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(
                start_time="now",
                interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin,
                event_subtype="")

        super(VizTransformMatplotlibGraphs, self).on_start()
def create_attachment():

    try:
        resource_id        = convert_unicode(request.form.get('resource_id', ''))
        fil                = request.files['file']
        content            = fil.read()

        # build attachment
        attachment         = Attachment(name=convert_unicode(request.form['attachment_name']),
                                        description=convert_unicode(request.form['attachment_description']),
                                        attachment_type=int(request.form['attachment_type']),
                                        content_type=convert_unicode(request.form['attachment_content_type']),
                                        content=content)

        rr_client = ResourceRegistryServiceProcessClient(node=Container.instance.node, process=service_gateway_instance)
        ret = rr_client.create_attachment(resource_id=resource_id, attachment=attachment)

        ret_obj = {'attachment_id': ret}

        return json_response(ret_obj)

    except Exception, e:
        log.exception("Error creating attachment")
        return build_error_response(e)
Exemplo n.º 40
0
def resolve_org_negotiation():
    try:
        payload = request.form['payload']
        json_params = simplejson.loads(str(payload))

        ion_actor_id, expiry = get_governance_info_from_request(
            'serviceRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        headers = build_message_headers(ion_actor_id, expiry)

        # extract negotiation-specific data (convert from unicode just in case - these are machine generated and unicode specific
        # chars are unexpected)
        verb = str(json_params['verb'])
        originator = str(json_params['originator'])
        negotiation_id = str(json_params['negotiation_id'])
        reason = str(json_params.get('reason', ''))

        proposal_status = None
        if verb.lower() == "accept":
            proposal_status = ProposalStatusEnum.ACCEPTED
        elif verb.lower() == "reject":
            proposal_status = ProposalStatusEnum.REJECTED

        proposal_originator = None
        if originator.lower() == "consumer":
            proposal_originator = ProposalOriginatorEnum.CONSUMER
        elif originator.lower() == "provider":
            proposal_originator = ProposalOriginatorEnum.PROVIDER

        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        negotiation = rr_client.read(negotiation_id, headers=headers)

        new_negotiation_sap = Negotiation.create_counter_proposal(
            negotiation, proposal_status, proposal_originator)

        org_client = OrgManagementServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        resp = org_client.negotiate(new_negotiation_sap, headers=headers)

        # update reason if it exists
        if reason:
            # reload negotiation because it has changed
            negotiation = rr_client.read(negotiation_id, headers=headers)
            negotiation.reason = reason
            rr_client.update(negotiation)

        return gateway_json_response(resp)

    except Exception, e:
        return build_error_response(e)
Exemplo n.º 41
0
    def start(self):

        log.debug("GovernanceController starting ...")

        self.enabled = CFG.get_safe(
            'interceptor.interceptors.governance.config.enabled', False)

        log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.resource_policy_event_subscriber = None
        self.service_policy_event_subscriber = None

        #containers default to not Org Boundary and ION Root Org
        self._is_container_org_boundary = CFG.get_safe(
            'container.org_boundary', False)
        self._container_org_name = CFG.get_safe(
            'container.org_name', CFG.get_safe('system.root_org', 'ION'))
        self._container_org_id = None
        self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')

        self._is_root_org_container = (
            self._container_org_name == self._system_root_org_name)

        if self.enabled:

            config = CFG.get_safe('interceptor.interceptors.governance.config')

            self.initialize_from_config(config)

            self.resource_policy_event_subscriber = EventSubscriber(
                event_type="ResourcePolicyEvent",
                callback=self.resource_policy_event_callback)
            self.resource_policy_event_subscriber.start()

            self.service_policy_event_subscriber = EventSubscriber(
                event_type="ServicePolicyEvent",
                callback=self.service_policy_event_callback)
            self.service_policy_event_subscriber.start()

            self.rr_client = ResourceRegistryServiceProcessClient(
                node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(
                node=self.container.node, process=self.container)
Exemplo n.º 42
0
def test_requests(container, process=FakeProcess()):

    org_client = OrgManagementServiceProcessClient(node=container.node,
                                                   process=process)
    ion_org = org_client.find_org()

    id_client = IdentityManagementServiceProcessClient(node=container.node,
                                                       process=process)

    rr_client = ResourceRegistryServiceProcessClient(node=container.node,
                                                     process=process)

    system_actor = id_client.find_actor_identity_by_name(
        name=CFG.system.system_actor)
    log.info('system actor:' + system_actor._id)

    sa_header_roles = get_role_message_headers(
        org_client.find_all_roles_by_user(system_actor._id))

    try:
        user = id_client.find_actor_identity_by_name(
            '/DC=org/DC=cilogon/C=US/O=ProtectNetwork/CN=Roger Unwin A254')
    except:
        raise Inconsistent(
            "The test user is not found; did you seed the data?")

    log.debug('user_id: ' + user._id)
    user_header_roles = get_role_message_headers(
        org_client.find_all_roles_by_user(user._id))

    try:
        org2 = org_client.find_org('Org2')
        org2_id = org2._id
    except NotFound, e:

        org2 = IonObject(RT.Org, name='Org2', description='A second Org')
        org2_id = org_client.create_org(org2,
                                        headers={
                                            'ion-actor-id': system_actor._id,
                                            'ion-actor-roles': sa_header_roles
                                        })
Exemplo n.º 43
0
    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [
            self.create_xs, self.create_xp, self.create_xn_service,
            self.create_xn_process, self.create_xn_queue
        ]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs = None
        self._xs_cache = {}  # caching of xs names to RR objects
        self._default_xs_obj = None  # default XS registry object
        self.org_id = None
        self._default_xs_declared = False

        # mappings
        self.xs_by_name = {}  # friendly named XS to XSO
        self.xn_by_name = {}  # friendly named XN to XNO
        # xn by xs is a property

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client = ExchangeManagementServiceProcessClient(
            process=self.container)
        self._rr_client = ResourceRegistryServiceProcessClient(
            process=self.container)

        # mapping of node/ioloop runner by connection name (in config, named via container.messaging.server keys)
        self._nodes = {}
        self._ioloops = {}

        # public toggle switch for if EMS should be used by default
        self.use_ems = True
Exemplo n.º 44
0
    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=GovernanceTestProcess()


        #Load system policies after container has started all of the services
        LoadSystemPolicy.op_load_system_policies(process)

        gevent.sleep(1)  # Wait for events to be fired and policy updated

        self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(node=self.container.node, process=process)

        self.pol_client = PolicyManagementServiceProcessClient(node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)

        self.ems_client = ExchangeManagementServiceProcessClient(node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()


        self.system_actor = self.id_client.find_actor_identity_by_name(name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)


        sa_header_roles = get_role_message_headers(self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {'ion-actor-id': self.system_actor._id, 'ion-actor-roles': sa_header_roles }
Exemplo n.º 45
0
    def delete_indexes(self):

        ims_cli = IndexManagementServiceProcessClient(process=self)
        rr_cli = ResourceRegistryServiceProcessClient(process=self)

        #--------------------------------------------------------------------------------
        # Standard Indexes
        #--------------------------------------------------------------------------------

        for index, resources in STD_INDEXES.iteritems():
            index_ids, _ = rr_cli.find_resources(name=index,
                                                 restype=RT.ElasticSearchIndex,
                                                 id_only=True)
            for index_id in index_ids:
                ims_cli.delete_index(index_id)

        #--------------------------------------------------------------------------------
        # CouchDB Indexes
        #--------------------------------------------------------------------------------

        for index, datastore in COUCHDB_INDEXES.iteritems():
            index_ids, _ = rr_cli.find_resources(name=index,
                                                 restype=RT.CouchDBIndex,
                                                 id_only=True)
            for index_id in index_ids:
                ims_cli.delete_index(index_id)

        #--------------------------------------------------------------------------------
        # Edge Indexes
        #--------------------------------------------------------------------------------

        index_ids, _ = rr_cli.find_resources(name='%s_resources_index' %
                                             self.sysname,
                                             restype=RT.ElasticSearchIndex,
                                             id_only=True)
        for index_id in index_ids:
            ims_cli.delete_index(index_id)

        index_ids, _ = rr_cli.find_resources(name='%s_events_index' %
                                             self.sysname,
                                             restype=RT.ElasticSearchIndex,
                                             id_only=True)
        for index_id in index_ids:
            ims_cli.delete_index(index_id)
def upload_data(dataproduct_id):
    upload_folder = FileSystem.get_url(FS.TEMP, 'uploads')
    try:

        rr_client = ResourceRegistryServiceProcessClient(
            node=Container.instance.node, process=service_gateway_instance)
        object_store = Container.instance.object_store

        try:
            rr_client.read(str(dataproduct_id))
        except BadRequest:
            raise BadRequest('Unknown DataProduct ID %s' % dataproduct_id)

        # required fields
        upload = request.files['file']  # <input type=file name="file">

        # determine filetype
        filetype = _check_magic(upload)
        upload.seek(0)  # return to beginning for save

        if upload and filetype is not None:

            # upload file - run filename through werkzeug.secure_filename
            filename = secure_filename(upload.filename)
            path = os.path.join(upload_folder, filename)
            upload_time = time.time()
            upload.save(path)

            # register upload
            file_upload_context = {
                # TODO add dataproduct_id
                'name': 'User uploaded file %s' % filename,
                'filename': filename,
                'filetype': filetype,
                'path': path,
                'upload_time': upload_time,
                'status': 'File uploaded to server'
            }
            fuc_id, _ = object_store.create_doc(file_upload_context)

            # client to process dispatch
            pd_client = ProcessDispatcherServiceClient()

            # create process definition
            process_definition = ProcessDefinition(
                name='upload_data_processor',
                executable={
                    'module':
                    'ion.processes.data.upload.upload_data_processing',
                    'class': 'UploadDataProcessing'
                })
            process_definition_id = pd_client.create_process_definition(
                process_definition)
            # create process
            process_id = pd_client.create_process(process_definition_id)
            #schedule process
            config = DotDict()
            config.process.fuc_id = fuc_id
            config.process.dp_id = dataproduct_id
            pid = pd_client.schedule_process(process_definition_id,
                                             process_id=process_id,
                                             configuration=config)
            log.info('UploadDataProcessing process created %s' % pid)
            # response - only FileUploadContext ID and determined filetype for UX display
            resp = {'fuc_id': fuc_id}
            return gateway_json_response(resp)

        raise BadRequest('Invalid Upload')

    except Exception as e:
        return build_error_response(e)
Exemplo n.º 47
0
class TestGovernanceInt(IonIntegrationTestCase):
    def setUp(self):

        # Start container
        self._start_container()

        #Load a deploy file
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process = GovernanceTestProcess()

        #Load system policies after container has started all of the services
        LoadSystemPolicy.op_load_system_policies(process)

        self.rr_client = ResourceRegistryServiceProcessClient(
            node=self.container.node, process=process)

        self.id_client = IdentityManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.pol_client = PolicyManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.org_client = OrgManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ims_client = InstrumentManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ems_client = ExchangeManagementServiceProcessClient(
            node=self.container.node, process=process)

        self.ion_org = self.org_client.find_org()

        self.system_actor = self.id_client.find_actor_identity_by_name(
            name=CFG.system.system_actor)
        log.debug('system actor:' + self.system_actor._id)

        sa_header_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(self.system_actor._id))
        self.sa_user_header = {
            'ion-actor-id': self.system_actor._id,
            'ion-actor-roles': sa_header_roles
        }

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_basic_policy(self):

        #Make sure that the system policies have been loaded
        policy_list, _ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(
            len(policy_list), 0,
            "The system policies have not been loaded into the Resource Registry"
        )

        #Attempt to access an operation in service which does not have specific policies set
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

        #Add a new policy to allow the the above service call.
        policy_obj = IonObject(
            RT.Policy,
            name='Exchange_Management_Test_Policy',
            definition_type="Service",
            rule=TEST_POLICY_TEXT,
            description=
            'Allow specific operations in the Exchange Management Service for anonymous user'
        )

        test_policy_id = self.pol_client.create_policy(
            policy_obj, headers=self.sa_user_header)
        self.pol_client.add_service_policy('exchange_management',
                                           test_policy_id,
                                           headers=self.sa_user_header)
        log.info('Policy created: ' + policy_obj.name)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn('Arguments not set', cm.exception.message)

        #disable the test policy to try again
        self.pol_client.disable_policy(test_policy_id,
                                       headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

        #now enable the test policy to try again
        self.pol_client.enable_policy(test_policy_id,
                                      headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The previous attempt at this operations should now be allowed.
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(BadRequest) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn('Arguments not set', cm.exception.message)

        self.pol_client.remove_service_policy('exchange_management',
                                              test_policy_id,
                                              headers=self.sa_user_header)
        self.pol_client.delete_policy(test_policy_id,
                                      headers=self.sa_user_header)

        gevent.sleep(2)  # Wait for events to be fired and policy updated

        #The same request that previously was allowed should not be denied
        es_obj = IonObject(RT.ExchangeSpace,
                           description='ION test XS',
                           name='ioncore2')
        with self.assertRaises(Unauthorized) as cm:
            self.ems_client.create_exchange_space(es_obj)
        self.assertIn(
            'exchange_management(create_exchange_space) has been denied',
            cm.exception.message)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Not integrated for CEI')
    def test_org_policy(self):

        #Make sure that the system policies have been loaded
        policy_list, _ = self.rr_client.find_resources(restype=RT.Policy)
        self.assertNotEqual(
            len(policy_list), 0,
            "The system policies have not been loaded into the Resource Registry"
        )

        with self.assertRaises(BadRequest) as cm:
            myorg = self.org_client.read_org()
        self.assertTrue(
            cm.exception.message == 'The org_id parameter is missing')

        user_id, valid_until, registered = self.id_client.signon(
            USER1_CERTIFICATE, True)
        log.debug("user id=" + user_id)

        user_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles}

        #Attempt to enroll a user anonymously - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id, user_id)
        self.assertIn('org_management(enroll_member) has been denied',
                      cm.exception.message)

        #Attempt to let a user enroll themselves - should not be allowed
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.enroll_member(self.ion_org._id,
                                          user_id,
                                          headers=user_header)
        self.assertIn('org_management(enroll_member) has been denied',
                      cm.exception.message)

        #Attept to enroll the user in the ION Root org as a manager - should not be allowed since
        #registration with the system implies membership in the ROOT Org.
        with self.assertRaises(BadRequest) as cm:
            self.org_client.enroll_member(self.ion_org._id,
                                          user_id,
                                          headers=self.sa_user_header)
        self.assertTrue(
            cm.exception.message ==
            'A request to enroll in the root ION Org is not allowed')

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id)
        self.assertIn('org_management(find_enrolled_users) has been denied',
                      cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            users = self.org_client.find_enrolled_users(self.ion_org._id,
                                                        headers=user_header)
        self.assertIn('org_management(find_enrolled_users) has been denied',
                      cm.exception.message)

        users = self.org_client.find_enrolled_users(
            self.ion_org._id, headers=self.sa_user_header)
        self.assertEqual(len(users), 2)

        ## test_org_roles and policies

        roles = self.org_client.find_org_roles(self.ion_org._id)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id,
                                                   self.system_actor._id,
                                                   headers=self.sa_user_header)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MEMBER_ROLE, MANAGER_ROLE, ION_MANAGER])

        roles = self.org_client.find_roles_by_user(self.ion_org._id,
                                                   user_id,
                                                   headers=self.sa_user_header)
        self.assertEqual(len(roles), 1)
        self.assertItemsEqual([r.name for r in roles], [MEMBER_ROLE])

        with self.assertRaises(NotFound) as nf:
            org2 = self.org_client.find_org(ORG2)
        self.assertIn('The Org with name Org2 does not exist',
                      nf.exception.message)

        org2 = IonObject(RT.Org, name=ORG2, description='A second Org')
        org2_id = self.org_client.create_org(org2, headers=self.sa_user_header)

        org2 = self.org_client.find_org(ORG2)
        self.assertEqual(org2_id, org2._id)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles), 2)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE])

        operator_role = IonObject(RT.UserRole,
                                  name=INSTRUMENT_OPERATOR,
                                  label='Instrument Operator',
                                  description='Instrument Operator')

        #First try to add the user role anonymously
        with self.assertRaises(Unauthorized) as cm:
            self.org_client.add_user_role(org2_id, operator_role)
        self.assertIn('org_management(add_user_role) has been denied',
                      cm.exception.message)

        self.org_client.add_user_role(org2_id,
                                      operator_role,
                                      headers=self.sa_user_header)

        roles = self.org_client.find_org_roles(org2_id)
        self.assertEqual(len(roles), 3)
        self.assertItemsEqual([r.name for r in roles],
                              [MANAGER_ROLE, MEMBER_ROLE, INSTRUMENT_OPERATOR])

        # test requests for enrollments and roles.

        #First try to find user requests anonymously
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id)
        self.assertIn('org_management(find_requests) has been denied',
                      cm.exception.message)

        #Next try to find user requests as as a basic member
        with self.assertRaises(Unauthorized) as cm:
            requests = self.org_client.find_requests(org2_id,
                                                     headers=user_header)
        self.assertIn('org_management(find_requests) has been denied',
                      cm.exception.message)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 0)

        # First try to request a role without being a member
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_role(org2_id,
                                                  user_id,
                                                  INSTRUMENT_OPERATOR,
                                                  headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',
            cm.exception.message)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 0)

        req_id = self.org_client.request_enroll(org2_id,
                                                user_id,
                                                headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(
            user_id, org2_id, headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id,
                                                    user_id,
                                                    headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: enroll_req_not_exist(org_id,user_id)',
            cm.exception.message)

        #Manager denies the request
        self.org_client.deny_request(org2_id,
                                     req_id,
                                     'To test the deny process',
                                     headers=self.sa_user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        self.assertEqual(requests[0].status, REQUEST_DENIED)

        #Manager approves request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        users = self.org_client.find_enrolled_users(
            org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users), 0)

        #User Accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        users = self.org_client.find_enrolled_users(
            org2_id, headers=self.sa_user_header)
        self.assertEqual(len(users), 1)

        #User tried requesting enrollment again - this should fail
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_enroll(org2_id,
                                                    user_id,
                                                    headers=user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_not_enrolled(org_id,user_id)',
            cm.exception.message)

        req_id = self.org_client.request_role(org2_id,
                                              user_id,
                                              INSTRUMENT_OPERATOR,
                                              headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 2)

        requests = self.org_client.find_requests(org2_id,
                                                 request_status='Open',
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      headers=user_header)
        self.assertEqual(len(requests), 2)

        requests = self.org_client.find_user_requests(
            user_id, org2_id, request_type=RT.RoleRequest, headers=user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 1)

        ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent)

        self.assertEqual(len(ia_list), 0)

        ia_obj = IonObject(RT.InstrumentAgent,
                           name='Instrument Agent1',
                           description='The first Instrument Agent')

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj)
        self.assertIn(
            'instrument_management(create_instrument_agent) has been denied',
            cm.exception.message)

        with self.assertRaises(Unauthorized) as cm:
            self.ims_client.create_instrument_agent(ia_obj,
                                                    headers=user_header)
        self.assertIn(
            'instrument_management(create_instrument_agent) has been denied',
            cm.exception.message)

        #Manager approves request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Refresh headers with new role
        user_roles = get_role_message_headers(
            self.org_client.find_all_roles_by_user(user_id))
        user_header = {'ion-actor-id': user_id, 'ion-actor-roles': user_roles}

        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_obj = IonObject(RT.InstrumentAgent,
                           name='Instrument Agent2',
                           description='The second Instrument Agent')
        self.ims_client.create_instrument_agent(ia_obj, headers=user_header)

        ia_list, _ = self.rr_client.find_resources(restype=RT.InstrumentAgent)
        self.assertEqual(len(ia_list), 2)

        #First make a acquire resource request with an non-enrolled user.
        with self.assertRaises(BadRequest) as cm:
            req_id = self.org_client.request_acquire_resource(
                org2_id,
                self.system_actor._id,
                ia_list[0]._id,
                headers=self.sa_user_header)
        self.assertIn(
            'A precondition for this request has not been satisfied: is_enrolled(org_id,user_id)',
            cm.exception.message)

        req_id = self.org_client.request_acquire_resource(org2_id,
                                                          user_id,
                                                          ia_list[0]._id,
                                                          headers=user_header)

        requests = self.org_client.find_requests(org2_id,
                                                 headers=self.sa_user_header)
        self.assertEqual(len(requests), 3)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      headers=user_header)
        self.assertEqual(len(requests), 3)

        requests = self.org_client.find_user_requests(
            user_id,
            org2_id,
            request_type=RT.ResourceRequest,
            headers=user_header)
        self.assertEqual(len(requests), 1)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 1)

        self.assertEqual(requests[0]._id, req_id)

        #Manager approves Instrument request
        self.org_client.approve_request(org2_id,
                                        req_id,
                                        headers=self.sa_user_header)

        requests = self.org_client.find_user_requests(user_id,
                                                      org2_id,
                                                      request_status="Open",
                                                      headers=user_header)
        self.assertEqual(len(requests), 0)

        #User accepts request
        self.org_client.accept_request(org2_id, req_id, headers=user_header)

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 1)

        commitments, _ = self.rr_client.find_objects(user_id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 1)

        #Release the resource
        self.org_client.release_resource(
            org2_id,
            user_id,
            ia_list[0]._id,
            headers=self.sa_user_header,
            timeout=15)  #TODO - Refactor release_resource

        #Check commitments
        commitments, _ = self.rr_client.find_objects(ia_list[0]._id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 0)

        commitments, _ = self.rr_client.find_objects(user_id,
                                                     PRED.hasCommitment,
                                                     RT.ResourceCommitment)
        self.assertEqual(len(commitments), 0)
class TestVisualizationServiceIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        logging.disable(logging.ERROR)
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        # simulate preloading
        preload_ion_params(self.container)
        logging.disable(logging.NOTSET)

        #Instantiate a process to represent the test
        process=VisualizationServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)
        self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()

    def validate_messages(self, msgs):
        msg = msgs


        rdt = RecordDictionaryTool.load_from_granule(msg.body)

        vardict = {}
        vardict['temp'] = get_safe(rdt, 'temp')
        vardict['time'] = get_safe(rdt, 'time')
        print vardict['time']
        print vardict['temp']




    @attr('LOCOINT')
    #@patch.dict('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register': False}}})
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)


    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
        user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @attr('SMOKE')
    def test_realtime_visualization(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        # Create the Highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        vis_params ={}
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id, visualization_parameters=simplejson.dumps(vis_params))
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        result = gevent.event.AsyncResult()

        def get_vis_messages(get_data_count=7):  #SHould be an odd number for round robbin processing by service workers


            get_cnt = 0
            while get_cnt < get_data_count:

                vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
                if (vis_data):
                    self.validate_highcharts_transform_results(vis_data)

                get_cnt += 1
                gevent.sleep(5) # simulates the polling from UI

            result.set(get_cnt)

        gevent.spawn(get_vis_messages)

        result.get(timeout=90)

        #Trying to continue to receive messages in the queue
        gevent.sleep(2.0)  # Send some messages - don't care how many


        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data

    @patch.dict(CFG, {'user_queue_monitor_timeout': 5})
    @patch.dict(CFG, {'user_queue_monitor_size': 25})
    @attr('CLEANUP')
    @unittest.skipIf(os.getenv('PYCC_MODE', False),'Not integrated for CEI')
    def test_realtime_visualization_cleanup(self):

#        #Start up multiple vis service workers if not a CEI launch
#        if not os.getenv('CEI_LAUNCH_TEST', False):
#            vpid1 = self.container.spawn_process('visualization_service1','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid1)
#            vpid2 = self.container.spawn_process('visualization_service2','ion.services.ans.visualization_service','VisualizationService', CFG )
#            self.addCleanup(self.container.terminate_process, vpid2)

        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names
            original_queue_count = len(q_names)
        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass

        # Create the highcharts workflow definition since there is no preload for the test
        workflow_def_id = self.create_highcharts_workflow_def()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #Start up a number of requests - and queues - to start accumulating messages. THe test will not clean them up
        #but instead check to see if the monitoring thread will.
        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token1 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token2 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        bad_vis_token3 = ast.literal_eval(vis_token_resp)["rt_query_token"]

        vis_token_resp = self.vis_client.initiate_realtime_visualization_data(data_product_id=ctd_parsed_data_product_id)
        vis_token = ast.literal_eval(vis_token_resp)["rt_query_token"]

        #Get the default exchange space
        exchange = self.container.ex_manager.default_xs.exchange


        #get the list of queues and message counts on the broker for the user vis queues
        try:
            queues = self.container.ex_manager.list_queues(name=USER_VISUALIZATION_QUEUE, return_columns=['name', 'messages'])
            q_names = [ q['name'] for q in queues if q['name']] #Get a list of only the queue names

            self.assertIn(exchange + "." + bad_vis_token1, q_names)
            self.assertIn(exchange + "." + bad_vis_token2, q_names)
            self.assertIn(exchange + "." + bad_vis_token3, q_names)
            self.assertIn(exchange + "." + vis_token, q_names)

        except Exception, e:
            log.warn('Unable to get queue information from broker management plugin: ' + e.message)
            pass
Exemplo n.º 49
0
class TestSchedulerService(IonIntegrationTestCase):

    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

    def tearDown(self):
        pass

    def now_utc(self):
        return time.mktime(datetime.datetime.utcnow().timetuple())

    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))


    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        start_time = datetime.datetime.utcnow()
        gevent.sleep((self.interval_timer_interval * 2) + 1)
        time_diff = (datetime.datetime.utcnow() - start_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertGreater(self.interval_timer_count, timer_counts)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

    def on_restart_callback(self, *args, **kwargs):
        self.interval_timer_count += 1
        log.debug("test_scheduler: on_restart_callback: time: " + str(self.now_utc()) + " count: " + str(self.interval_timer_count))

    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

    def interval_timer_callback_with_end_time(self, *args, **kwargs):
        self.interval_timer_received_time_2 = datetime.datetime.utcnow()
        self.interval_timer_count_2 += 1
        time_diff = math.fabs( ((self.interval_timer_received_time_2 - self.interval_timer_sent_time_2).total_seconds())
                               - (self.interval_timer_interval_2 * self.interval_timer_count_2) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback_with_end_time: time:" + str(self.interval_timer_received_time_2) + " count:" + str(self.interval_timer_count_2))


    def interval_timer_callback(self, *args, **kwargs):
        self.interval_timer_received_time = datetime.datetime.utcnow()
        self.interval_timer_count += 1
        time_diff = math.fabs( ((self.interval_timer_received_time - self.interval_timer_sent_time).total_seconds())
                               - (self.interval_timer_interval * self.interval_timer_count) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback: time:" + str(self.interval_timer_received_time) + " count:" + str(self.interval_timer_count))

    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))

    def single_timer_callback (self, *args, **kwargs):
        self.single_timer_count =+ 1
        log.debug("test_scheduler: single_timer_call_back: time:" + str(self.now_utc()) + " count:" + str(self.single_timer_count))

    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval,
            end_time=-1,
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 5
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in one days
        expires = time.mktime((datetime.datetime.utcnow() + timedelta(days=2)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are generated
        gevent.sleep(9)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.expire_sec_1) + math.floor(time_diff/self.expire_sec_2)

        # After waiting, validate only 2 events are generated.
        self.assertEqual(self.tod_count, 2, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.tod_count, timer_counts, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)



    def tod_callback(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count += 1
        if self.tod_count == 1:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_1)
            self.assertTrue(time_diff <= 2)
        elif self.tod_count == 2:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_2)
            self.assertTrue(time_diff <= 2)
        log.debug("test_scheduler: tod_callback: time:" + str(tod_receive_time) + " count:" + str(self.tod_count))

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Exemplo n.º 50
0
class TestWorkflowManagementIntegration(VisualizationIntegrationTestHelper):

    def setUp(self):
        # Start container

        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        #Instantiate a process to represent the test
        process=WorkflowServiceTestProcess()

        # Now create client to DataProductManagementService
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
        self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process)
        self.pubsubclient =  PubsubManagementServiceProcessClient(node=self.container.node, process=process)
        self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process)
        self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process)
        self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process)
        self.datasetclient =  DatasetManagementServiceProcessClient(node=self.container.node, process=process)
        self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process)
        self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process)

        self.ctd_stream_def = SBE37_CDM_stream_definition()



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_SA_transform_components(self):

        assertions = self.assertTrue

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)


        ###
        ###  Setup the first transformation
        ###

        # Salinity: Data Process Definition
        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()

        l2_salinity_all_data_process_id, ctd_l2_salinity_output_dp_id = self.create_transform_process(ctd_L2_salinity_dprocdef_id,ctd_parsed_data_product_id, 'salinity' )

        ## get the stream id for the transform outputs
        stream_ids, _ = self.rrclient.find_objects(ctd_l2_salinity_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_stream_id)


        ###
        ###  Setup the second transformation
        ###

        # Salinity Doubler: Data Process Definition
        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()

        salinity_double_data_process_id, salinity_doubler_output_dp_id = self.create_transform_process(salinity_doubler_dprocdef_id, ctd_l2_salinity_output_dp_id, 'salinity' )

        stream_ids, _ = self.rrclient.find_objects(salinity_doubler_output_dp_id, PRED.hasStream, None, True)
        assertions(len(stream_ids) > 0 )
        sal_dbl_stream_id = stream_ids[0]
        data_product_stream_ids.append(sal_dbl_stream_id)


        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)


        #Stop the transform processes
        self.dataprocessclient.deactivate_data_process(salinity_double_data_process_id)
        self.dataprocessclient.deactivate_data_process(l2_salinity_all_data_process_id)

        #Validate the data from each of the messages along the way
        self.validate_messages(results)


    @attr('LOCOINT')
    @attr('SMOKE')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_transform_workflow(self):

        assertions = self.assertTrue

        log.debug("Building the workflow definition")

        workflow_def_obj = IonObject(RT.WorkflowDefinition,
                                     name='Salinity_Test_Workflow',
                                     description='tests a workflow of multiple transform data processes')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity")
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=ctd_L2_salinity_dprocdef_id,
                                      persist_process_output_data=False)  #Don't persist the intermediate data product
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #-------------------------------------------------------------------------------------------------------------------------
        log.debug( "Adding a transformation process definition for salinity doubler")
        #-------------------------------------------------------------------------------------------------------------------------

        salinity_doubler_dprocdef_id = self.create_salinity_doubler_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep',
                                      data_process_definition_id=salinity_doubler_dprocdef_id, )
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        log.debug( "Creating workflow def in the resource registry")
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 2 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        log.debug( "Creating the input data product")
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        log.debug( "Creating and starting the workflow")
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id,
                                                                                            ctd_parsed_data_product_id,
                persist_workflow_data_product=True, output_data_product_name=workflow_data_product_name, timeout=300)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        log.debug( "persisting the output product")
        #self.dataproductclient.activate_data_product_persistence(workflow_product_id)
        dataset_ids,_ = self.rrclient.find_objects(workflow_product_id, PRED.hasDataset, RT.Dataset, True)
        assertions(len(dataset_ids) == 1 )
        dataset_id = dataset_ids[0]

        log.debug( "Verifying the output data product name matches what was specified in the workflow definition")
        workflow_product = self.rrclient.read(workflow_product_id)
        assertions(workflow_product.name.startswith(workflow_data_product_name), 'Nope: %s != %s' % (workflow_product.name, workflow_data_product_name))

        log.debug( "Walking the associations to find the appropriate output data streams to validate the messages")

        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 2 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        log.debug( "data_product_stream_ids: %s" % data_product_stream_ids)

        log.debug( "Starting the output stream listener to monitor to collect messages")
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        log.debug( "results::: %s" % results)

        log.debug( "Stopping the workflow processes")
        self.workflowclient.terminate_data_process_workflow(workflow_id, False, timeout=250)  # Should test true at some point

        log.debug( "Making sure the Workflow object was removed")
        objs, _ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(objs) == 0)

        log.debug( "Validating the data from each of the messages along the way")
        self.validate_messages(results)

        log.debug( "Checking to see if dataset id = %s, was persisted, and that it can be retrieved...." % dataset_id)
        self.validate_data_ingest_retrieve(dataset_id)

        log.debug( "Cleaning up to make sure delete is correct.")
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_google_dt_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='GoogleDT_Test_Workflow',description='Tests the workflow of converting stream data to Google DT')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(results)

        """
        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve(ds_ids[0])

        #Validate the data from each of the messages along the way
        self.validate_google_dt_transform_results(retrieved_granule)
        """

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )



    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_mpl_graphs_transform_workflow(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Mpl_Graphs_Test_Workflow',description='Tests the workflow of converting stream data to Matplotlib graphs')

        #Add a transformation process definition
        mpl_graphs_procdef_id = self.create_mpl_graphs_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=mpl_graphs_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id,
            persist_workflow_data_product=True, timeout=60)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        #Start the output stream listener to monitor and collect messages
        results = self.start_output_stream_and_listen(ctd_stream_id, data_product_stream_ids)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id,delete_data_products=False, timeout=60)  # Should test true at some point

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(results)

        # Check to see if ingestion worked. Extract the granules from data_retrieval.
        # First find the dataset associated with the output dp product
        ds_ids,_ = self.rrclient.find_objects(workflow_dp_ids[len(workflow_dp_ids) - 1], PRED.hasDataset, RT.Dataset, True)
        retrieved_granule = self.data_retriever.retrieve_last_data_points(ds_ids[0], 10)

        #Validate the data from each of the messages along the way
        self.validate_mpl_graphs_transform_results(retrieved_granule)

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )


    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),'Not integrated for CEI')
    def test_multiple_workflow_instances(self):

        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Multiple_Test_Workflow',description='Tests the workflow of converting stream data')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=False)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the first input data product
        ctd_stream_id1, ctd_parsed_data_product_id1 = self.create_ctd_input_stream_and_data_product('ctd_parsed1')
        data_product_stream_ids.append(ctd_stream_id1)

        #Create and start the first workflow
        workflow_id1, workflow_product_id1 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id1, timeout=60)

        #Create the second input data product
        ctd_stream_id2, ctd_parsed_data_product_id2 = self.create_ctd_input_stream_and_data_product('ctd_parsed2')
        data_product_stream_ids.append(ctd_stream_id2)

        #Create and start the second workflow
        workflow_id2, workflow_product_id2 = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id2, timeout=60)

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 2 )


        #Start the first input stream process
        ctd_sim_pid1 = self.start_sinusoidal_input_stream_process(ctd_stream_id1)

        #Start the second input stream process
        ctd_sim_pid2 = self.start_simple_input_stream_process(ctd_stream_id2)

        #Start the output stream listener to monitor a set number of messages being sent through the workflows
        results = self.start_output_stream_and_listen(None, data_product_stream_ids, message_count_per_stream=5)

        # stop the flow of messages...
        self.process_dispatcher.cancel_process(ctd_sim_pid1) # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid2)

        #Stop the first workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id1,delete_data_products=False, timeout=60)  # Should test true at some point

        #Stop the second workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id=workflow_id2,delete_data_products=False, timeout=60)  # Should test true at some point

        workflow_ids,_ = self.rrclient.find_resources(restype=RT.Workflow)
        assertions(len(workflow_ids) == 0 )

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)

        workflow_def_ids,_ = self.rrclient.find_resources(restype=RT.WorkflowDefinition)
        assertions(len(workflow_def_ids) == 0 )

        aid_list = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aid_list) == 0 )
Exemplo n.º 51
0
class ExchangeManager(object):
    """
    Manager object for the CC to manage Exchange related resources.
    """

    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [self.create_xs,
                              self.create_xp,
                              self.create_xn_service,
                              self.create_xn_process,
                              self.create_xn_queue]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs         = ExchangeSpace(self, ION_ROOT_XS)
        self._xs_cache          = {}        # caching of xs names to RR objects
        self._default_xs_obj    = None      # default XS registry object
        self.org_id             = None

        # mappings
        self.xs_by_name = { ION_ROOT_XS: self.default_xs }      # friendly named XS to XSO
        self.xn_by_name = {}                                    # friendly named XN to XNO
        # xn by xs is a property

        self._chan = None

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client    = ExchangeManagementServiceProcessClient(process=self.container)
        self._rr_client     = ResourceRegistryServiceProcessClient(process=self.container)

        # mapping of node/ioloop runner by connection name (in config, named via container.messaging.server keys)
        self._nodes     = {}
        self._ioloops   = {}

        self._client    = None
        self._transport = None

        self._default_xs_declared = False

    def start(self):
        log.debug("ExchangeManager.start")

        total_count = 0

        def handle_failure(name, node):
            log.warn("Node %s could not be started", name)
            node.ready.set()        # let it fall out below

        # Establish connection(s) to broker
        for name, cfgkey in CFG.container.messaging.server.iteritems():
            if not cfgkey:
                continue

            if cfgkey not in CFG.server:
                raise ExchangeManagerError("Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server" % (cfgkey, name))

            total_count += 1
            log.debug("Starting connection: %s", name)

            # start it with a zero timeout so it comes right back to us
            try:
                node, ioloop = messaging.make_node(CFG.server[cfgkey], name, 0)

                # install a finished handler directly on the ioloop just for this startup period
                fail_handle = lambda _: handle_failure(name, node)
                ioloop.link(fail_handle)

                # wait for the node ready event, with a large timeout just in case
                node_ready = node.ready.wait(timeout=15)

                # remove the finished handler, we don't care about it here
                ioloop.unlink(fail_handle)

                # only add to our list if we started successfully
                if not node.running:
                    ioloop.kill()      # make sure ioloop dead
                else:
                    self._nodes[name]   = node
                    self._ioloops[name] = ioloop

            except socket.error as e:
                log.warn("Could not start connection %s due to socket error, continuing", name)

        fail_count = total_count - len(self._nodes)
        if fail_count > 0 or total_count == 0:
            if fail_count == total_count:
                raise ExchangeManagerError("No node connection was able to start (%d nodes attempted, %d nodes failed)" % (total_count, fail_count))

            log.warn("Some nodes could not be started, ignoring for now")   # @TODO change when ready

        self._transport = AMQPTransport.get_instance()
        self._client    = self._get_channel(self._nodes.get('priviledged', self._nodes.values()[0]))        # @TODO

        log.debug("Started %d connections (%s)", len(self._nodes), ",".join(self._nodes.iterkeys()))

    def stop(self, *args, **kwargs):
        # ##############
        # HACK HACK HACK
        #
        # It appears during shutdown that when a channel is closed, it's not FULLY closed by the pika connection
        # until the next round of _handle_events. We have to yield here to let that happen, in order to have close
        # work fine without blowing up.
        # ##############
        time.sleep(0.1)
        # ##############
        # /HACK
        # ##############

        log.debug("ExchangeManager.stopping (%d connections)", len(self._nodes))

        for name in self._nodes:
            self._nodes[name].stop_node()
            self._ioloops[name].kill()
            self._nodes[name].client.ioloop.start()     # loop until connection closes

        # @TODO undeclare root xs??  need to know if last container
        #self.default_xs.delete()

    @property
    def default_node(self):
        """
        Returns the default node connection.
        """
        if 'primary' in self._nodes:
            return self._nodes['primary']
        elif len(self._nodes):
            log.warn("No primary connection, returning first available")
            return self._nodes.values()[0]

        return None

    @property
    def xn_by_xs(self):
        """
        Get a list of XNs associated by XS (friendly name).
        """
        ret = {}
        for xnname, xn in self.xn_by_name.iteritems():
            xsn = xn._xs._exchange
            if not xsn in ret:
                ret[xsn] = []
            ret[xsn].append(xn)

        return ret

    def _get_xs_obj(self, name=ION_ROOT_XS):
        """
        Gets a resource-registry represented XS, either via cache or RR request.
        """
        if name in self._xs_cache:
            return self._xs_cache[name]

        xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name)
        if not len(xs_objs) == 1:
            log.warn("Could not find RR XS object with name: %s", name)
            return None

        self._xs_cache[name] = xs_objs[0]
        return xs_objs[0]

    def _ems_available(self):
        """
        Returns True if the EMS is (likely) available and the auto_register CFG entry is True.

        Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR.
        Therefore, cannot be a property.
        """
        if CFG.container.get('exchange', {}).get('auto_register', False):
            # ok now make sure it's in the directory
            svc_de = self.container.directory.lookup('/Services/exchange_management')
            if svc_de is not None:
                if not self.org_id:
                    # find the default Org
                    org_ids = self._rr_client.find_resources(RT.Org, id_only=True)
                    if not (len(org_ids) and len(org_ids[0]) == 1):
                        log.warn("EMS available but could not find Org")
                        return False

                    self.org_id = org_ids[0][0]
                    log.debug("Bootstrapped Container exchange manager with org id: %s", self.org_id)
                return True

        return False

    def _get_channel(self, node):
        """
        Get a raw channel to be used by all the ensure_exists methods.
        """
        assert self.container

        # @TODO: needs lock, but so do all these methods
        if not self._chan:
            self._chan = blocking_cb(node.client.channel, 'on_open_callback')

        return self._chan

    def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True):
        log.debug("ExchangeManager.create_xs: %s", name)
        xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete)

        self.xs_by_name[name] = xs

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xs")
            # create a RR object
            xso = ResExchangeSpace(name=name)
            xso_id = self._ems_client.create_exchange_space(xso, self.org_id)

            log.debug("Created RR XS object, id: %s", xso_id)
        else:
            xs.declare()

        return xs

    def delete_xs(self, xs, use_ems=True):
        """
        @type xs    ExchangeSpace
        """
        log.debug("ExchangeManager.delete_xs: %s", xs)

        name = xs._exchange     # @TODO this feels wrong
        del self.xs_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xs")
            xso = self._get_xs_obj(name)
            self._ems_client.delete_exchange_space(xso._id)
            del self._xs_cache[name]
        else:
            xs.delete()

    def create_xp(self, name, xs=None, use_ems=True, **kwargs):
        log.debug("ExchangeManager.create_xp: %s", name)
        xs = xs or self.default_xs
        xp = ExchangePoint(self, name, xs, **kwargs)

        # put in xn_by_name anyway
        self.xn_by_name[name] = xp

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xp")
            # create an RR object
            xpo = ResExchangePoint(name=name, topology_type=xp._xptype)
            xpo_id = self._ems_client.create_exchange_point(xpo, self._get_xs_obj(xs._exchange)._id)        # @TODO: _exchange is wrong
        else:
            xp.declare()

        return xp

    def delete_xp(self, xp, use_ems=True):
        log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname())

        name = xp._exchange # @TODO: not right
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xp")
            # find the XP object via RR
            xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True)
            if not (len(xpo_ids) and len(xpo_ids[0]) == 1):
                log.warn("Could not find XP in RR with name of %s", name)

            xpo_id = xpo_ids[0][0]
            self._ems_client.delete_exchange_point(xpo_id)
        else:
            xp.delete()

    def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs):
        xs = xs or self.default_xs
        log.debug("ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs)

        if xn_type == "service":
            xn = ExchangeNameService(self, name, xs, **kwargs)
        elif xn_type == "process":
            xn = ExchangeNameProcess(self, name, xs, **kwargs)
        elif xn_type == "queue":
            xn = ExchangeNameQueue(self, name, xs, **kwargs)
        else:
            raise StandardError("Unknown XN type: %s" % xn_type)

        self.xn_by_name[name] = xn

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xn")
            xno = ResExchangeName(name=name, xn_type=xn.xn_type)
            self._ems_client.declare_exchange_name(xno, self._get_xs_obj(xs._exchange)._id)     # @TODO: exchange is wrong
        else:
            xn.declare()

        return xn

    def create_xn_service(self, name, xs=None, **kwargs):
        return self._create_xn('service', name, xs=xs, **kwargs)

    def create_xn_process(self, name, xs=None, **kwargs):
        return self._create_xn('process', name, xs=xs, **kwargs)

    def create_xn_queue(self, name, xs=None, **kwargs):
        return self._create_xn('queue', name, xs=xs, **kwargs)

    def delete_xn(self, xn, use_ems=False):
        log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname())

        name = xn._queue                # @TODO feels wrong
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xn")
            # find the XN object via RR?
            xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True)
            if not (len(xno_ids) and len(xno_ids[0]) == 1):
                log.warn("Could not find XN in RR with name of %s", name)

            xno_id = xno_ids[0][0]

            self._ems_client.undeclare_exchange_name(xno_id)        # "canonical name" currently understood to be RR id
        else:
            xn.delete()

    def _ensure_default_declared(self):
        """
        Ensures we declared the default exchange space.
        Needed by most exchange object calls, so each one calls here.
        """
        if not self._default_xs_declared:
            log.debug("ExchangeManager._ensure_default_declared, declaring default xs")
            self._default_xs_declared = True
            self.default_xs.declare()

    # transport implementations - XOTransport objects call here
    def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True):
        log.info("ExchangeManager.declare_exchange")
        self._ensure_default_declared()
        self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete)
    def delete_exchange(self, exchange, **kwargs):
        log.info("ExchangeManager.delete_exchange")
        self._ensure_default_declared()
        self._transport.delete_exchange_impl(self._client, exchange, **kwargs)
    def declare_queue(self, queue, durable=False, auto_delete=False):
        log.info("ExchangeManager.declare_queue (queue %s, durable %s, AD %s)", queue, durable, auto_delete)
        self._ensure_default_declared()
        return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete)
    def delete_queue(self, queue, **kwargs):
        log.info("ExchangeManager.delete_queue")
        self._ensure_default_declared()
        self._transport.delete_queue_impl(self._client, queue, **kwargs)
    def bind(self, exchange, queue, binding):
        log.info("ExchangeManager.bind")
        self._ensure_default_declared()
        self._transport.bind_impl(self._client, exchange, queue, binding)
    def unbind(self, exchange, queue, binding):
        log.info("ExchangeManager.unbind")
        self._ensure_default_declared()
        self._transport.unbind_impl(self._client, exchange, queue, binding)
    def get_stats(self, queue):
        log.info("ExchangeManager.get_stats")
        self._ensure_default_declared()
        return self._transport.get_stats(self._client, queue)
    def purge(self, queue):
        log.info("ExchangeManager.purge")
        self._ensure_default_declared()
        self._transport.purge(self._client, queue)
Exemplo n.º 52
0
class BootstrapProcessDispatcher(BootstrapPlugin):
    """
    Bootstrap process for process dispatcher.
    """
    def on_initial_bootstrap(self, process, config, **kwargs):
        self.pds_client = ProcessDispatcherServiceProcessClient(
            process=process)
        self.resource_registry = ResourceRegistryServiceProcessClient(
            process=process)
        self.ingestion_worker(process, config)
        self.replay_defs(process, config)
        self.notification_worker(process, config)
        self.registration_worker(process, config)
        self.pydap_server(process, config)
        self.eoi_services(process, config)

    def eoi_services(self, process, config):
        eoi_module = config.get_safe(
            'bootstrap.processes.registration.module',
            'ion.processes.data.registration.eoi_registration_process')
        eoi_class = config.get_safe('bootstrap.processes.registration.class',
                                    'EOIRegistrationProcess')

        process_definition = ProcessDefinition(
            name='eoi_server', description='Process for eoi data sources')
        process_definition.executable['module'] = eoi_module
        process_definition.executable['class'] = eoi_class

        self._create_and_launch(process_definition)

    def pydap_server(self, process, config):
        pydap_module = config.get_safe(
            'bootstrap.processes.pydap.module',
            'ion.processes.data.externalization.lightweight_pydap')
        pydap_class = config.get_safe('bootstrap.processes.pydap.class',
                                      'LightweightPyDAP')

        use_pydap = config.get_safe('bootstrap.launch_pydap', False)

        process_definition = ProcessDefinition(
            name='pydap_server',
            description='Lightweight WSGI Server for PyDAP')
        process_definition.executable['module'] = pydap_module
        process_definition.executable['class'] = pydap_class

        self._create_and_launch(process_definition, use_pydap)

    def registration_worker(self, process, config):
        res, meta = self.resource_registry.find_resources(
            name='registration_worker', restype=RT.ProcessDefinition)
        if len(res):
            return

        registration_module = config.get_safe(
            'bootstrap.processes.registration.module',
            'ion.processes.data.registration.registration_process')
        registration_class = config.get_safe(
            'bootstrap.processes.registration.class', 'RegistrationProcess')
        use_pydap = True

        process_definition = ProcessDefinition(
            name='registration_worker',
            description='For registering datasets with ERDDAP')
        process_definition.executable['module'] = registration_module
        process_definition.executable['class'] = registration_class

        self._create_and_launch(process_definition, use_pydap)

    def _create_and_launch(self, process_definition, conditional=True):
        proc_def_id = self.pds_client.create_process_definition(
            process_definition=process_definition)

        if conditional:
            process_res_id = self.pds_client.create_process(
                process_definition_id=proc_def_id)
            self.pds_client.schedule_process(process_definition_id=proc_def_id,
                                             process_id=process_res_id)

    def ingestion_worker(self, process, config):
        # ingestion
        ingestion_module = config.get_safe(
            'bootstrap.processes.ingestion.module',
            'ion.processes.data.ingestion.science_granule_ingestion_worker')
        ingestion_class = config.get_safe(
            'bootstrap.processes.ingestion.class',
            'ScienceGranuleIngestionWorker')
        ingestion_datastore = config.get_safe(
            'bootstrap.processes.ingestion.datastore_name', 'datasets')
        ingestion_queue = config.get_safe(
            'bootstrap.processes.ingestion.queue', 'science_granule_ingestion')
        ingestion_workers = config.get_safe(
            'bootstrap.processes.ingestion.workers', 1)
        #--------------------------------------------------------------------------------
        # Create ingestion workers
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(
            name='ingestion_worker_process',
            description='Worker transform process for ingestion of datasets')
        process_definition.executable['module'] = ingestion_module
        process_definition.executable['class'] = ingestion_class
        ingestion_procdef_id = self.pds_client.create_process_definition(
            process_definition=process_definition)

        #--------------------------------------------------------------------------------
        # Simulate a HA ingestion worker by creating two of them
        #--------------------------------------------------------------------------------
#        config = DotDict()
#        config.process.datastore_name = ingestion_datastore
#        config.process.queue_name     = ingestion_queue
#
#        for i in xrange(ingestion_workers):
#            self.pds_client.schedule_process(process_definition_id=ingestion_procdef_id, configuration=config)

    def notification_worker(self, process, config):
        # user notifications
        notification_module = config.get_safe(
            'bootstrap.processes.user_notification.module',
            'ion.processes.data.transforms.notification_worker')
        notification_class = config.get_safe(
            'bootstrap.processes.user_notification.class',
            'NotificationWorker')
        notification_workers = config.get_safe(
            'bootstrap.processes.user_notification.workers', 1)

        #--------------------------------------------------------------------------------
        # Create notification workers
        #--------------------------------------------------------------------------------

        # set up the process definition
        process_definition_uns = ProcessDefinition(
            name='notification_worker_process',
            description='Worker transform process for user notifications')
        process_definition_uns.executable['module'] = notification_module
        process_definition_uns.executable['class'] = notification_class
        uns_procdef_id = self.pds_client.create_process_definition(
            process_definition=process_definition_uns)

        config = DotDict()
        config.process.type = 'simple'

        for i in xrange(notification_workers):
            config.process.name = 'notification_worker_%s' % i
            config.process.queue_name = 'notification_worker_queue'
            self.pds_client.schedule_process(
                process_definition_id=uns_procdef_id, configuration=config)

    def replay_defs(self, process, config):
        replay_module = config.get_safe(
            'bootstrap.processes.replay.module',
            'ion.processes.data.replay.replay_process')
        replay_class = config.get_safe('bootstrap.processes.replay.class',
                                       'ReplayProcess')
        #--------------------------------------------------------------------------------
        # Create replay process definition
        #--------------------------------------------------------------------------------

        process_definition = ProcessDefinition(
            name=DataRetrieverService.REPLAY_PROCESS,
            description='Process for the replay of datasets')
        process_definition.executable['module'] = replay_module
        process_definition.executable['class'] = replay_class
        self.pds_client.create_process_definition(
            process_definition=process_definition)

    def on_restart(self, process, config, **kwargs):
        pass
Exemplo n.º 53
0
class VizTransformProcForGoogleDT(TransformDataProcess):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to JSON style Google DataTables

    """
    def on_start(self):
        super(VizTransformProcForGoogleDT, self).on_start()
        self.initDataTableFlag = True

        # need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        stream_def_resource = self.rr_cli.read(self.stream_def_id)
        self.stream_def = stream_def_resource.container
        self.realtime_flag = False
        if self.CFG.get("realtime_flag") == "True":
            self.realtime_flag = True
        else:
            self.data_product_id_token = self.CFG.get('data_product_id_token')

        # extract the stream_id associated with the DP. Needed later
        stream_ids, _ = self.rr_cli.find_objects(self.data_product_id,
                                                 PRED.hasStream, None, True)
        self.stream_id = stream_ids[0]

        self.dataDescription = []
        self.dataTableContent = []
        self.varTuple = []
        self.total_num_of_records_recvd = 0

    def process(self, packet):

        log.debug('(%s): Received Viz Data Packet' % (self.name))

        element_count_id = 0
        expected_range = []

        psd = PointSupplementStreamParser(stream_definition=self.stream_def,
                                          stream_granule=packet)
        vardict = {}
        arrLen = None
        for varname in psd.list_field_names():
            vardict[varname] = psd.get_values(varname)
            arrLen = len(vardict[varname])

        #if its the first time, init the dataTable
        if self.initDataTableFlag:
            # create data description from the variables in the message
            self.dataDescription = [('time', 'datetime', 'time')]

            # split the data string to extract variable names
            for varname in psd.list_field_names():
                if varname == 'time':
                    continue

                self.dataDescription.append((varname, 'number', varname))

            self.initDataTableFlag = False

        # Add the records to the datatable
        for i in xrange(arrLen):
            varTuple = []

            for varname, _, _ in self.dataDescription:
                val = float(vardict[varname][i])
                if varname == 'time':
                    varTuple.append(datetime.fromtimestamp(val))
                else:
                    varTuple.append(val)

            # Append the tuples to the data table
            self.dataTableContent.append(varTuple)

            if self.realtime_flag:
                # Maintain a sliding window for realtime transform processes
                realtime_window_size = 100
                if len(self.dataTableContent) > realtime_window_size:
                    # always pop the first element till window size is what we want
                    while len(self.dataTableContent) > realtime_window_size:
                        self.dataTableContent.pop(0)

        if not self.realtime_flag:
            # This is the historical view part. Make a note of now many records were received
            data_stream_id = self.stream_def.data_stream_id
            element_count_id = self.stream_def.identifiables[
                data_stream_id].element_count_id
            # From each granule you can check the constraint on the number of records
            expected_range = packet.identifiables[
                element_count_id].constraint.intervals[0]

            # The number of records in a given packet is:
            self.total_num_of_records_recvd += packet.identifiables[
                element_count_id].value

        # submit the Json version of the datatable to the viz service
        if self.realtime_flag:
            # create the google viz data table
            data_table = gviz_api.DataTable(self.dataDescription)
            data_table.LoadData(self.dataTableContent)

            # submit resulting table back using the out stream publisher
            msg = {
                "viz_product_type": "google_realtime_dt",
                "data_product_id": self.data_product_id,
                "data_table": data_table.ToJSonResponse()
            }
            self.out_stream_pub.publish(msg)
        else:
            # Submit table back to the service if we received all the replay data
            if self.total_num_of_records_recvd == (expected_range[1] + 1):
                # If the datatable received was too big, decimate on the fly to a fixed size
                max_google_dt_len = 1024
                if len(self.dataTableContent) > max_google_dt_len:
                    decimation_factor = int(
                        math.ceil(
                            len(self.dataTableContent) / (max_google_dt_len)))

                    tempDataTableContent = []
                    for i in xrange(0, len(self.dataTableContent),
                                    decimation_factor):
                        # check limits
                        if i >= len(self.dataTableContent):
                            break

                        tempDataTableContent.append(self.dataTableContent[i])

                    self.dataTableContent = tempDataTableContent

                data_table = gviz_api.DataTable(self.dataDescription)
                data_table.LoadData(self.dataTableContent)

                # submit resulting table back using the out stream publisher
                msg = {
                    "viz_product_type": "google_dt",
                    "data_product_id_token": self.data_product_id_token,
                    "data_table": data_table.ToJSonResponse()
                }
                self.out_stream_pub.publish(msg)
                return

        # clear the tuple for future use
        self.varTuple[:] = []
Exemplo n.º 54
0
class ExchangeManager(object):
    """
    Manager object for the CC to manage Exchange related resources.
    """
    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [self.create_xs,
                              self.create_xp,
                              self.create_xn_service,
                              self.create_xn_process,
                              self.create_xn_queue]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs         = ExchangeSpace(self, ION_ROOT_XS)
        self._xs_cache          = {}        # caching of xs names to RR objects
        self._default_xs_obj    = None      # default XS registry object
        self.org_id             = None

        # mappings
        self.xs_by_name = { ION_ROOT_XS: self.default_xs }      # friendly named XS to XSO
        self.xn_by_name = {}                                    # friendly named XN to XNO
        # xn by xs is a property

        self._chan = None

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client    = ExchangeManagementServiceProcessClient(process=self.container)
        self._rr_client     = ResourceRegistryServiceProcessClient(process=self.container)

        # TODO: Do more initializing here, not in container

    @property
    def xn_by_xs(self):
        """
        Get a list of XNs associated by XS (friendly name).
        """
        ret = {}
        for xnname, xn in self.xn_by_name.iteritems():
            xsn = xn._xs._exchange
            if not xsn in ret:
                ret[xsn] = []
            ret[xsn].append(xn)

        return ret

    def _get_xs_obj(self, name=ION_ROOT_XS):
        """
        Gets a resource-registry represented XS, either via cache or RR request.
        """
        if name in self._xs_cache:
            return self._xs_cache[name]

        xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace, name=name)
        if not len(xs_objs) == 1:
            log.warn("Could not find RR XS object with name: %s", name)
            return None

        self._xs_cache[name] = xs_objs[0]
        return xs_objs[0]

    def start(self):
        log.debug("ExchangeManager starting ...")

        # Establish connection to broker
        # @TODO: raise error if sux
        node, ioloop = messaging.make_node()

        self._transport = AMQPTransport.get_instance()
        self._client    = self._get_channel(node)

        # Declare root exchange
        #self.default_xs.ensure_exists(self._get_channel())
        return node, ioloop

    def _ems_available(self):
        """
        Returns True if the EMS is (likely) available and the auto_register CFG entry is True.

        Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR.
        Therefore, cannot be a property.
        """
        if CFG.container.get('exchange', {}).get('auto_register', False):
            # ok now make sure it's in the directory
            svc_de = self.container.directory.lookup('/Services/exchange_management')
            if svc_de is not None:
                if not self.org_id:
                    # find the default Org
                    org_ids = self._rr_client.find_resources(RT.Org, id_only=True)
                    if not (len(org_ids) and len(org_ids[0]) == 1):
                        log.warn("EMS available but could not find Org")
                        return False

                    self.org_id = org_ids[0][0]
                    log.debug("Bootstrapped Container exchange manager with org id: %s", self.org_id)
                return True

        return False

    def _get_channel(self, node):
        """
        Get a raw channel to be used by all the ensure_exists methods.
        """
        assert self.container

        # @TODO: needs lock, but so do all these methods
        if not self._chan:
            self._chan = blocking_cb(node.client.channel, 'on_open_callback')

        return self._chan

    def create_xs(self, name, use_ems=True, exchange_type='topic', durable=False, auto_delete=True):
        log.debug("ExchangeManager.create_xs: %s", name)
        xs = ExchangeSpace(self, name, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete)

        self.xs_by_name[name] = xs

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xs")
            # create a RR object
            xso = ResExchangeSpace(name=name)
            xso_id = self._ems_client.create_exchange_space(xso, self.org_id)

            log.debug("Created RR XS object, id: %s", xso_id)
        else:
            xs.declare()

        return xs

    def delete_xs(self, xs, use_ems=True):
        """
        @type xs    ExchangeSpace
        """
        log.debug("ExchangeManager.delete_xs: %s", xs)

        name = xs._exchange     # @TODO this feels wrong
        del self.xs_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xs")
            xso = self._get_xs_obj(name)
            self._ems_client.delete_exchange_space(xso._id)
            del self._xs_cache[name]
        else:
            xs.delete()

    def create_xp(self, name, xs=None, use_ems=True, **kwargs):
        log.debug("ExchangeManager.create_xp: %s", name)
        xs = xs or self.default_xs
        xp = ExchangePoint(self, name, xs, **kwargs)

        # put in xn_by_name anyway
        self.xn_by_name[name] = xp

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xp")
            # create an RR object
            xpo = ResExchangePoint(name=name, topology_type=xp._xptype)
            xpo_id = self._ems_client.create_exchange_point(xpo, self._get_xs_obj(xs._exchange)._id)        # @TODO: _exchange is wrong
        else:
            xp.declare()

        return xp

    def delete_xp(self, xp, use_ems=True):
        log.debug("ExchangeManager.delete_xp: name=%s", 'TODO') #xp.build_xname())

        name = xp._exchange # @TODO: not right
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xp")
            # find the XP object via RR
            xpo_ids = self._rr_client.find_resources(RT.ExchangePoint, name=name, id_only=True)
            if not (len(xpo_ids) and len(xpo_ids[0]) == 1):
                log.warn("Could not find XP in RR with name of %s", name)

            xpo_id = xpo_ids[0][0]
            self._ems_client.delete_exchange_point(xpo_id)
        else:
            xp.delete()

    def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs):
        xs = xs or self.default_xs
        log.debug("ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s", xn_type, name, xs, kwargs)

        if xn_type == "service":
            xn = ExchangeNameService(self, name, xs, **kwargs)
        elif xn_type == "process":
            xn = ExchangeNameProcess(self, name, xs, **kwargs)
        elif xn_type == "queue":
            xn = ExchangeNameQueue(self, name, xs, **kwargs)
        else:
            raise StandardError("Unknown XN type: %s" % xn_type)

        self.xn_by_name[name] = xn

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xn")
            xno = ResExchangeName(name=name, xn_type=xn.xn_type)
            self._ems_client.declare_exchange_name(xno, self._get_xs_obj(xs._exchange)._id)     # @TODO: exchange is wrong
        else:
            xn.declare()

        return xn

    def create_xn_service(self, name, xs=None, **kwargs):
        return self._create_xn('service', name, xs=xs, **kwargs)

    def create_xn_process(self, name, xs=None, **kwargs):
        return self._create_xn('process', name, xs=xs, **kwargs)

    def create_xn_queue(self, name, xs=None, **kwargs):
        return self._create_xn('queue', name, xs=xs, **kwargs)

    def delete_xn(self, xn, use_ems=False):
        log.debug("ExchangeManager.delete_xn: name=%s", "TODO") #xn.build_xlname())

        name = xn._queue                # @TODO feels wrong
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xn")
            # find the XN object via RR?
            xno_ids = self._rr_client.find_resources(RT.ExchangeName, name=name, id_only=True)
            if not (len(xno_ids) and len(xno_ids[0]) == 1):
                log.warn("Could not find XN in RR with name of %s", name)

            xno_id = xno_ids[0][0]

            self._ems_client.undeclare_exchange_name(xno_id)        # "canonical name" currently understood to be RR id
        else:
            xn.delete()

    def stop(self, *args, **kwargs):
        log.debug("ExchangeManager stopping ...")

    # transport implementations - XOTransport objects call here
    def declare_exchange(self, exchange, exchange_type='topic', durable=False, auto_delete=True):
        log.info("ExchangeManager.declare_exchange")
        self._transport.declare_exchange_impl(self._client, exchange, exchange_type=exchange_type, durable=durable, auto_delete=auto_delete)
    def delete_exchange(self, exchange, **kwargs):
        log.info("ExchangeManager.delete_exchange")
        self._transport.delete_exchange_impl(self._client, exchange, **kwargs)
    def declare_queue(self, queue, durable=False, auto_delete=True):
        log.info("ExchangeManager.declare_queue")
        return self._transport.declare_queue_impl(self._client, queue, durable=durable, auto_delete=auto_delete)
    def delete_queue(self, queue, **kwargs):
        log.info("ExchangeManager.delete_queue")
        self._transport.delete_queue_impl(self._client, queue, **kwargs)
    def bind(self, exchange, queue, binding):
        log.info("ExchangeManager.bind")
        self._transport.bind_impl(self._client, exchange, queue, binding)
    def unbind(self, exchange, queue, binding):
        log.info("ExchangeManager.unbind")
        self._transport.unbind_impl(self._client, exchange, queue, binding)
Exemplo n.º 55
0
def upload_data(dataproduct_id):
    upload_folder = FileSystem.get_url(FS.TEMP,'uploads')
    try:

        rr_client = ResourceRegistryServiceProcessClient(process=service_gateway_instance)
        object_store = Container.instance.object_store

        try:
            rr_client.read(str(dataproduct_id))
        except BadRequest:
            raise BadRequest('Unknown DataProduct ID %s' % dataproduct_id)

        # required fields
        upload = request.files['file']  # <input type=file name="file">

        # determine filetype
        filetype = _check_magic(upload)
        upload.seek(0)  # return to beginning for save

        if upload and filetype is not None:

            # upload file - run filename through werkzeug.secure_filename
            filename = secure_filename(upload.filename)
            path = os.path.join(upload_folder, filename)
            upload_time = time.time()
            upload.save(path)

            # register upload
            file_upload_context = {
                # TODO add dataproduct_id
                'name':'User uploaded file %s' % filename,
                'filename':filename,
                'filetype':filetype,
                'path':path,
                'upload_time':upload_time,
                'status':'File uploaded to server'
            }
            fuc_id, _ = object_store.create_doc(file_upload_context)

            # client to process dispatch
            pd_client = ProcessDispatcherServiceClient()

            # create process definition
            process_definition = ProcessDefinition(
                name='upload_data_processor',
                executable={
                    'module':'ion.processes.data.upload.upload_data_processing',
                    'class':'UploadDataProcessing'
                }
            )
            process_definition_id = pd_client.create_process_definition(process_definition)
            # create process
            process_id = pd_client.create_process(process_definition_id)
            #schedule process
            config = DotDict()
            config.process.fuc_id = fuc_id
            config.process.dp_id = dataproduct_id
            pid = pd_client.schedule_process(process_definition_id, process_id=process_id, configuration=config)
            log.info('UploadDataProcessing process created %s' % pid)
            # response - only FileUploadContext ID and determined filetype for UX display
            resp = {'fuc_id': fuc_id}
            return gateway_json_response(resp)

        raise BadRequest('Invalid Upload')

    except Exception as e:
        return build_error_response(e)
Exemplo n.º 56
0
class ExchangeManager(object):
    """
    Manager object for the CC to manage Exchange related resources.
    """
    def __init__(self, container):
        log.debug("ExchangeManager initializing ...")
        self.container = container

        # Define the callables that can be added to Container public API
        # @TODO: remove
        self.container_api = [
            self.create_xs, self.create_xp, self.create_xn_service,
            self.create_xn_process, self.create_xn_queue
        ]

        # Add the public callables to Container
        for call in self.container_api:
            setattr(self.container, call.__name__, call)

        self.default_xs = ExchangeSpace(self, ION_ROOT_XS)
        self._xs_cache = {}  # caching of xs names to RR objects
        self._default_xs_obj = None  # default XS registry object
        self.org_id = None

        # mappings
        self.xs_by_name = {
            ION_ROOT_XS: self.default_xs
        }  # friendly named XS to XSO
        self.xn_by_name = {}  # friendly named XN to XNO
        # xn by xs is a property

        self._chan = None

        # @TODO specify our own to_name here so we don't get auto-behavior - tricky chicken/egg
        self._ems_client = ExchangeManagementServiceProcessClient(
            process=self.container)
        self._rr_client = ResourceRegistryServiceProcessClient(
            process=self.container)

        # TODO: Do more initializing here, not in container

    @property
    def xn_by_xs(self):
        """
        Get a list of XNs associated by XS (friendly name).
        """
        ret = {}
        for xnname, xn in self.xn_by_name.iteritems():
            xsn = xn._xs._exchange
            if not xsn in ret:
                ret[xsn] = []
            ret[xsn].append(xn)

        return ret

    def _get_xs_obj(self, name=ION_ROOT_XS):
        """
        Gets a resource-registry represented XS, either via cache or RR request.
        """
        if name in self._xs_cache:
            return self._xs_cache[name]

        xs_objs, _ = self._rr_client.find_resources(RT.ExchangeSpace,
                                                    name=name)
        if not len(xs_objs) == 1:
            log.warn("Could not find RR XS object with name: %s", name)
            return None

        self._xs_cache[name] = xs_objs[0]
        return xs_objs[0]

    def start(self):
        log.debug("ExchangeManager starting ...")

        # Establish connection to broker
        # @TODO: raise error if sux
        node, ioloop = messaging.make_node()

        self._transport = AMQPTransport.get_instance()
        self._client = self._get_channel(node)

        # Declare root exchange
        #self.default_xs.ensure_exists(self._get_channel())
        return node, ioloop

    def _ems_available(self):
        """
        Returns True if the EMS is (likely) available and the auto_register CFG entry is True.

        Has the side effect of bootstrapping the org_id and default_xs's id/rev from the RR.
        Therefore, cannot be a property.
        """
        if CFG.container.get('exchange', {}).get('auto_register', False):
            # ok now make sure it's in the directory
            svc_de = self.container.directory.lookup(
                '/Services/exchange_management')
            if svc_de is not None:
                if not self.org_id:
                    # find the default Org
                    org_ids = self._rr_client.find_resources(RT.Org,
                                                             id_only=True)
                    if not (len(org_ids) and len(org_ids[0]) == 1):
                        log.warn("EMS available but could not find Org")
                        return False

                    self.org_id = org_ids[0][0]
                    log.debug(
                        "Bootstrapped Container exchange manager with org id: %s",
                        self.org_id)
                return True

        return False

    def _get_channel(self, node):
        """
        Get a raw channel to be used by all the ensure_exists methods.
        """
        assert self.container

        # @TODO: needs lock, but so do all these methods
        if not self._chan:
            self._chan = blocking_cb(node.client.channel, 'on_open_callback')

        return self._chan

    def create_xs(self,
                  name,
                  use_ems=True,
                  exchange_type='topic',
                  durable=False,
                  auto_delete=True):
        log.debug("ExchangeManager.create_xs: %s", name)
        xs = ExchangeSpace(self,
                           name,
                           exchange_type=exchange_type,
                           durable=durable,
                           auto_delete=auto_delete)

        self.xs_by_name[name] = xs

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xs")
            # create a RR object
            xso = ResExchangeSpace(name=name)
            xso_id = self._ems_client.create_exchange_space(xso, self.org_id)

            log.debug("Created RR XS object, id: %s", xso_id)
        else:
            xs.declare()

        return xs

    def delete_xs(self, xs, use_ems=True):
        """
        @type xs    ExchangeSpace
        """
        log.debug("ExchangeManager.delete_xs: %s", xs)

        name = xs._exchange  # @TODO this feels wrong
        del self.xs_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xs")
            xso = self._get_xs_obj(name)
            self._ems_client.delete_exchange_space(xso._id)
            del self._xs_cache[name]
        else:
            xs.delete()

    def create_xp(self, name, xs=None, use_ems=True, **kwargs):
        log.debug("ExchangeManager.create_xp: %s", name)
        xs = xs or self.default_xs
        xp = ExchangePoint(self, name, xs, **kwargs)

        # put in xn_by_name anyway
        self.xn_by_name[name] = xp

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xp")
            # create an RR object
            xpo = ResExchangePoint(name=name, topology_type=xp._xptype)
            xpo_id = self._ems_client.create_exchange_point(
                xpo,
                self._get_xs_obj(
                    xs._exchange)._id)  # @TODO: _exchange is wrong
        else:
            xp.declare()

        return xp

    def delete_xp(self, xp, use_ems=True):
        log.debug("ExchangeManager.delete_xp: name=%s",
                  'TODO')  #xp.build_xname())

        name = xp._exchange  # @TODO: not right
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xp")
            # find the XP object via RR
            xpo_ids = self._rr_client.find_resources(RT.ExchangePoint,
                                                     name=name,
                                                     id_only=True)
            if not (len(xpo_ids) and len(xpo_ids[0]) == 1):
                log.warn("Could not find XP in RR with name of %s", name)

            xpo_id = xpo_ids[0][0]
            self._ems_client.delete_exchange_point(xpo_id)
        else:
            xp.delete()

    def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs):
        xs = xs or self.default_xs
        log.debug(
            "ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s",
            xn_type, name, xs, kwargs)

        if xn_type == "service":
            xn = ExchangeNameService(self, name, xs, **kwargs)
        elif xn_type == "process":
            xn = ExchangeNameProcess(self, name, xs, **kwargs)
        elif xn_type == "queue":
            xn = ExchangeNameQueue(self, name, xs, **kwargs)
        else:
            raise StandardError("Unknown XN type: %s" % xn_type)

        self.xn_by_name[name] = xn

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xn")
            xno = ResExchangeName(name=name, xn_type=xn.xn_type)
            self._ems_client.declare_exchange_name(
                xno,
                self._get_xs_obj(xs._exchange)._id)  # @TODO: exchange is wrong
        else:
            xn.declare()

        return xn

    def create_xn_service(self, name, xs=None, **kwargs):
        return self._create_xn('service', name, xs=xs, **kwargs)

    def create_xn_process(self, name, xs=None, **kwargs):
        return self._create_xn('process', name, xs=xs, **kwargs)

    def create_xn_queue(self, name, xs=None, **kwargs):
        return self._create_xn('queue', name, xs=xs, **kwargs)

    def delete_xn(self, xn, use_ems=False):
        log.debug("ExchangeManager.delete_xn: name=%s",
                  "TODO")  #xn.build_xlname())

        name = xn._queue  # @TODO feels wrong
        del self.xn_by_name[name]

        if use_ems and self._ems_available():
            log.debug("Using EMS to delete_xn")
            # find the XN object via RR?
            xno_ids = self._rr_client.find_resources(RT.ExchangeName,
                                                     name=name,
                                                     id_only=True)
            if not (len(xno_ids) and len(xno_ids[0]) == 1):
                log.warn("Could not find XN in RR with name of %s", name)

            xno_id = xno_ids[0][0]

            self._ems_client.undeclare_exchange_name(
                xno_id)  # "canonical name" currently understood to be RR id
        else:
            xn.delete()

    def stop(self, *args, **kwargs):
        log.debug("ExchangeManager stopping ...")

    # transport implementations - XOTransport objects call here
    def declare_exchange(self,
                         exchange,
                         exchange_type='topic',
                         durable=False,
                         auto_delete=True):
        log.info("ExchangeManager.declare_exchange")
        self._transport.declare_exchange_impl(self._client,
                                              exchange,
                                              exchange_type=exchange_type,
                                              durable=durable,
                                              auto_delete=auto_delete)

    def delete_exchange(self, exchange, **kwargs):
        log.info("ExchangeManager.delete_exchange")
        self._transport.delete_exchange_impl(self._client, exchange, **kwargs)

    def declare_queue(self, queue, durable=False, auto_delete=True):
        log.info("ExchangeManager.declare_queue")
        return self._transport.declare_queue_impl(self._client,
                                                  queue,
                                                  durable=durable,
                                                  auto_delete=auto_delete)

    def delete_queue(self, queue, **kwargs):
        log.info("ExchangeManager.delete_queue")
        self._transport.delete_queue_impl(self._client, queue, **kwargs)

    def bind(self, exchange, queue, binding):
        log.info("ExchangeManager.bind")
        self._transport.bind_impl(self._client, exchange, queue, binding)

    def unbind(self, exchange, queue, binding):
        log.info("ExchangeManager.unbind")
        self._transport.unbind_impl(self._client, exchange, queue, binding)
Exemplo n.º 57
0
class TestSchedulerService(IonIntegrationTestCase):

    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

    def tearDown(self):
        pass

    def now_utc(self):
        return time.time()

    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))


    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now", interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        start_time = datetime.datetime.utcnow()
        gevent.sleep((self.interval_timer_interval * 2) + 1)
        time_diff = (datetime.datetime.utcnow() - start_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertGreater(self.interval_timer_count, timer_counts)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

    def on_restart_callback(self, *args, **kwargs):
        self.interval_timer_count += 1
        log.debug("test_scheduler: on_restart_callback: time: " + str(self.now_utc()) + " count: " + str(self.interval_timer_count))

    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

    def interval_timer_callback_with_end_time(self, *args, **kwargs):
        self.interval_timer_received_time_2 = datetime.datetime.utcnow()
        self.interval_timer_count_2 += 1
        time_diff = math.fabs( ((self.interval_timer_received_time_2 - self.interval_timer_sent_time_2).total_seconds())
                               - (self.interval_timer_interval_2 * self.interval_timer_count_2) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback_with_end_time: time:" + str(self.interval_timer_received_time_2) + " count:" + str(self.interval_timer_count_2))


    def interval_timer_callback(self, *args, **kwargs):
        self.interval_timer_received_time = datetime.datetime.utcnow()
        self.interval_timer_count += 1
        time_diff = math.fabs( ((self.interval_timer_received_time - self.interval_timer_sent_time).total_seconds())
                               - (self.interval_timer_interval * self.interval_timer_count) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback: time:" + str(self.interval_timer_received_time) + " count:" + str(self.interval_timer_count))

    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))

    def single_timer_callback (self, *args, **kwargs):
        self.single_timer_count =+ 1
        log.debug("test_scheduler: single_timer_call_back: time:" + str(self.now_utc()) + " count:" + str(self.single_timer_count))

    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time=str(self.now_utc()), interval=self.interval_timer_interval,
            end_time="-1",
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 5
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in one days
        expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=2)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are generated
        gevent.sleep(9)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.expire_sec_1) + math.floor(time_diff/self.expire_sec_2)

        # After waiting, validate only 2 events are generated.
        self.assertEqual(self.tod_count, 2, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.tod_count, timer_counts, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)

    def tod_callback(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count += 1
        if self.tod_count == 1:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_1)
            self.assertTrue(time_diff <= 2)
        elif self.tod_count == 2:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_2)
            self.assertTrue(time_diff <= 2)
        log.debug("test_scheduler: tod_callback: time:" + str(tod_receive_time) + " count:" + str(self.tod_count))

    def test_timeoffday_timer_in_past_seconds(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time_Of_Day3"
        expire_sec = -4
        self.tod_count2 = 0
        now = datetime.datetime.utcnow()
        expire1 = now + timedelta(seconds=expire_sec)
        # Create two timers
        times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in 3 days
        expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=3)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait and see if the any events are generated
        gevent.sleep(5)

        # After waiting, validate no event is generated
        self.assertEqual(self.tod_count2, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.tod_count2, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)

        # This is example for the following case
        # Example current time is 8:00AM. User setups a timer for 6:00AM. Since it is 8am, it tries to
        #   setup a timer for tomorrow 6am but the expire time is set at 5AM tomorrow
        event_origin = "Time_Of_Day4"
        expire_sec = -4
        self.tod_count2 = 0
        now = datetime.datetime.utcnow()
        expire1 = now + timedelta(seconds=expire_sec)
        times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires before the first event
        time_delta = timedelta(days=1) + timedelta(seconds=-(abs(expire_sec*2)))   # Notice the minus sign. It expires before the first event
        expires = calendar.timegm((now + time_delta).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        with self.assertRaises(BadRequest):
            id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")

    def tod_callback2(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count2 += 1
        log.debug("test_scheduler: tod_callback2: time:")

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  end_time="-1",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Exemplo n.º 58
0
class VizTransformProcForMatplotlibGraphs(TransformDataProcess):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    def on_start(self):
        super(VizTransformProcForMatplotlibGraphs, self).on_start()
        #assert len(self.streams)==1
        self.initDataFlag = True
        self.graph_data = {
        }  # Stores a dictionary of variables : [List of values]

        # Need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed to the transform process
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        #stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        self.stream_def = self.rr_cli.read(self.stream_def_id)

        # Start the thread responsible for keeping track of time and generating graphs
        # Mutex for ensuring proper concurrent communications between threads
        self.lock = RLock()
        self.rendering_proc = Greenlet(self.rendering_thread)
        self.rendering_proc.start()

    def process(self, packet):
        log.debug('(%s): Received Viz Data Packet' % self.name)
        #log.debug('(%s):   - Processing: %s' % (self.name,packet))

        # parse the incoming data
        psd = PointSupplementStreamParser(
            stream_definition=self.stream_def.container, stream_granule=packet)

        # re-arrange incoming data into an easy to parse dictionary
        vardict = {}
        arrLen = None
        for varname in psd.list_field_names():
            vardict[varname] = psd.get_values(varname)
            arrLen = len(vardict[varname])

        if self.initDataFlag:
            # look at the incoming packet and store
            for varname in psd.list_field_names():
                self.lock.acquire()
                self.graph_data[varname] = []
                self.lock.release()

            self.initDataFlag = False

        # If code reached here, the graph data storage has been initialized. Just add values
        # to the list
        with self.lock:
            for varname in psd.list_field_names():
                self.graph_data[varname].extend(vardict[varname])

    def rendering_thread(self):
        from copy import deepcopy
        # Service Client

        # init Matplotlib
        fig = Figure()
        ax = fig.add_subplot(111)
        canvas = FigureCanvas(fig)
        imgInMem = StringIO.StringIO()
        while True:

            # Sleep for a pre-decided interval. Should be specifiable in a YAML file
            gevent.sleep(20)

            # If there's no data, wait
            # Lock is used here to make sure the entire vector exists start to finish, this assures that the data won
            working_set = None
            with self.lock:
                if len(self.graph_data) == 0:
                    continue
                else:
                    working_set = deepcopy(self.graph_data)

            # For the simple case of testing, lets plot all time variant variables one at a time
            xAxisVar = 'time'
            xAxisFloatData = working_set[xAxisVar]

            for varName, varData in working_set.iteritems():
                if varName == 'time' or varName == 'height' or varName == 'longitude' or varName == 'latitude':
                    continue

                yAxisVar = varName
                yAxisFloatData = working_set[varName]

                # Generate the plot

                ax.plot(xAxisFloatData, yAxisFloatData, 'ro')
                ax.set_xlabel(xAxisVar)
                ax.set_ylabel(yAxisVar)
                ax.set_title(yAxisVar + ' vs ' + xAxisVar)
                ax.set_autoscale_on(False)

                # generate filename for the output image
                fileName = yAxisVar + '_vs_' + xAxisVar + '.png'
                # Save the figure to the in memory file
                canvas.print_figure(imgInMem, format="png")
                imgInMem.seek(0)

                # submit resulting table back using the out stream publisher
                msg = {
                    "viz_product_type": "matplotlib_graphs",
                    "data_product_id": self.data_product_id,
                    "image_obj": imgInMem.getvalue(),
                    "image_name": fileName
                }
                self.out_stream_pub.publish(msg)

                #clear the canvas for the next image
                ax.clear()