def _register_service(self):

        definition = self.process_definition
        existing_services, _ = self.container.resource_registry.find_resources(
            restype="Service", name=definition.name)

        if len(existing_services) > 0:
            if len(existing_services) > 1:
                log.warning(
                    "There is more than one service object for %s. Using the first one"
                    % definition.name)
            service_id = existing_services[0]._id
        else:
            svc_obj = Service(name=definition.name,
                              exchange_name=definition.name)
            service_id, _ = self.container.resource_registry.create(svc_obj)

        svcdefs, _ = self.container.resource_registry.find_resources(
            restype="ServiceDefinition", name=definition.name)

        if svcdefs:
            try:
                self.container.resource_registry.create_association(
                    service_id, "hasServiceDefinition", svcdefs[0]._id)
            except BadRequest:
                log.warn(
                    "Failed to associate %s Service and ServiceDefinition. It probably exists.",
                    definition.name)
        else:
            log.error("Cannot find ServiceDefinition resource for %s",
                      definition.name)

        return service_id, definition.name
    def build_data_dict(self, rdt):
        np_dict = {}
        
        time_array = rdt[rdt.temporal_parameter]
        if time_array is None:
            raise ValueError("A granule needs a time array")
        for k,v in rdt.iteritems():
            # Sparse values are different and aren't constructed using NumpyParameterData
            if isinstance(rdt.param_type(k), SparseConstantType):
                value = v[0]
                if hasattr(value, 'dtype'):
                    value = np.asscalar(value)
                time_start = np.asscalar(time_array[0])
                np_dict[k] = ConstantOverTime(k, value, time_start=time_start, time_end=None) # From now on
                continue
            elif isinstance(rdt.param_type(k), CategoryType):
                log.warning("Category types temporarily unsupported")
                continue
            elif isinstance(rdt.param_type(k), RecordType):
                value = v
            else:
                value = v

            try:
                if k == 'temp_sample':
                    print repr(value)
                np_dict[k] = NumpyParameterData(k, value, time_array)
            except:
                raise

        return np_dict
    def _register_service(self):

        definition = self.process_definition
        existing_services, _ = self.container.resource_registry.find_resources(
            restype="Service", name=definition.name)

        if len(existing_services) > 0:
            if len(existing_services) > 1:
                log.warning("There is more than one service object for %s. Using the first one" % definition.name)
            service_id = existing_services[0]._id
        else:
            svc_obj = Service(name=definition.name, exchange_name=definition.name)
            service_id, _ = self.container.resource_registry.create(svc_obj)

        svcdefs, _ = self.container.resource_registry.find_resources(
            restype="ServiceDefinition", name=definition.name)

        if svcdefs:
            try:
                self.container.resource_registry.create_association(
                    service_id, "hasServiceDefinition", svcdefs[0]._id)
            except BadRequest:
                log.warn("Failed to associate %s Service and ServiceDefinition. It probably exists.",
                    definition.name)
        else:
            log.error("Cannot find ServiceDefinition resource for %s",
                    definition.name)

        return service_id, definition.name
    def _register_service(self):
        if not self.process_definition_id:
            log.error("No process definition id. Not registering service")
            return

        if len(self.pds) < 1:
            log.error("Must have at least one PD available to register a service")
            return

        pd_name = self.pds[0]
        pd = ProcessDispatcherServiceClient(to_name=pd_name)
        definition = pd.read_process_definition(self.process_definition_id)

        existing_services, _ = self.container.resource_registry.find_resources(
                restype="Service", name=definition.name)

        if len(existing_services) > 0:
            if len(existing_services) > 1:
                log.warning("There is more than one service object for %s. Using the first one" % definition.name)
            service_id = existing_services[0]._id
        else:
            svc_obj = Service(name=definition.name, exchange_name=definition.name)
            service_id, _ = self.container.resource_registry.create(svc_obj)

        svcdefs, _ = self.container.resource_registry.find_resources(
                restype="ServiceDefinition", name=definition.name)

        if svcdefs:
            self.container.resource_registry.create_association(
                    service_id, "hasServiceDefinition", svcdefs[0]._id)
        else:
            log.error("Cannot find ServiceDefinition resource for %s",
                    definition.name)

        return service_id
示例#5
0
    def build_data_dict(self, rdt):
        np_dict = {}

        time_array = rdt[rdt.temporal_parameter]
        if time_array is None:
            raise ValueError("A granule needs a time array")
        for k, v in rdt.iteritems():
            # Sparse values are different and aren't constructed using NumpyParameterData
            if isinstance(rdt.param_type(k), SparseConstantType):
                value = v[0]
                if hasattr(value, 'dtype'):
                    value = np.asscalar(value)
                time_start = np.asscalar(time_array[0])
                np_dict[k] = ConstantOverTime(k,
                                              value,
                                              time_start=time_start,
                                              time_end=None)  # From now on
                continue
            elif isinstance(rdt.param_type(k), CategoryType):
                log.warning("Category types temporarily unsupported")
                continue
            elif isinstance(rdt.param_type(k), RecordType):
                value = v
            else:
                value = v

            try:
                if k == 'temp_sample':
                    print repr(value)
                np_dict[k] = NumpyParameterData(k, value, time_array)
            except:
                raise

        return np_dict
 def _req_callback(self, result):
     """
     Terrestrial server callback for result receipts.
     Pop pending command, append result and publish.
     """
     log.debug('Terrestrial server got result: %s', str(result))
     
     try:
         id = result['command_id']
         _result = result['result']
         cmd = self._tx_dict.pop(id)
         cmd.time_completed = time.time()
         cmd.result = _result
         if cmd.resource_id:
             origin = cmd.resource_id
         elif cmd.svc_name:
             origin = cmd.svc_name + self._xs_name
         else:
             raise KeyError
         
         self._publisher.publish_event(
                                 event_type='RemoteCommandResult',
                                 command=cmd,
                                 origin=origin)
         log.debug('Published remote result: %s to origin %s.', str(result),
                   str(origin))
     except KeyError:
         log.warning('Error publishing remote result: %s.', str(result))
    def retrieve_function_and_define_args(self, stream_id, dataprocess_id):
        import importlib
        argument_list = {}
        function = ''
        context = {}

        #load the details of this data process
        dataprocess_info = self._dataprocesses[dataprocess_id]


        try:
            #todo: load once into a 'set' of modules?
            #load the associated transform function
            egg_uri = dataprocess_info.get_safe('uri','')
            if egg_uri:
                egg = self.download_egg(egg_uri)
                import pkg_resources
                pkg_resources.working_set.add_entry(egg)
            else:
                log.warning('No uri provided for module in data process definition.')

            module = importlib.import_module(dataprocess_info.get_safe('module', '') )

            function = getattr(module, dataprocess_info.get_safe('function','') )
            arguments = dataprocess_info.get_safe('arguments', '')
            argument_list = dataprocess_info.get_safe('argument_map', {})

            if self.has_context_arg(function,argument_list ):
                context = self.create_context_arg(stream_id, dataprocess_id)

        except ImportError:
            log.error('Error running transform')
        log.debug('retrieve_function_and_define_args  argument_list: %s',argument_list)
        return function, argument_list, context
示例#8
0
 def tearDown(self):
     self.waiter.stop()
     try:
         self.container.terminate_process(self._haa_pid)
     except BadRequest:
         log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)")
     self._stop_container()
示例#9
0
 def acquire_samples(self, max_samples=0):
     log.debug('Orb_DataAgentPlugin.acquire_samples')
     if os.path.exists(self.data_dir):
         files = os.listdir(self.data_dir)
         cols = []
         rows = []
         for f in files:
             fpath = self.data_dir + f
             with open(fpath) as fh:
                 try:
                     pkt = json.load(fh)
                     if not cols:
                         cols = [str(c['chan']) for c in pkt['channels']]              
                     row = self._extract_row(pkt, cols)
                     dims = [len(c) for c in row[:3]]
                     if all(d==400 for d in dims):
                         rows.append(row)
                     else:
                         log.warning('Inconsistent dimensions %s, %s' % (str(dims), fpath))
                     fh.close()
                     os.remove(fpath)
                     log.info('sample: ' + fpath)
                 except Exception as ex:
                     log.warn(ex)
                     log.warn('Incomplete packet %s' % fpath)
                     
         if cols and rows:
             coltypes = {}
             for c in cols:
                 coltypes[c] = '400i4'
             cols.append('time')
             samples = dict(cols=cols, data=rows, coltypes=coltypes)
             return samples
    def _req_callback(self, result):
        """
        Terrestrial server callback for result receipts.
        Pop pending command, append result and publish.
        """
        log.debug('Terrestrial server got result: %s', str(result))

        try:
            id = result['command_id']
            _result = result['result']
            cmd = self._tx_dict.pop(id)
            cmd.time_completed = time.time()
            cmd.result = _result
            if cmd.resource_id:
                origin = cmd.resource_id
            elif cmd.svc_name:
                origin = cmd.svc_name + self._xs_name
            else:
                raise KeyError

            self._publisher.publish_event(event_type='RemoteCommandResult',
                                          command=cmd,
                                          origin=origin)
            log.debug('Published remote result: %s to origin %s.', str(result),
                      str(origin))
        except KeyError:
            log.warning('Error publishing remote result: %s.', str(result))
示例#11
0
    def acquire_samples(self, max_samples=0):
        log.debug('Orb_DataAgentPlugin.acquire_samples')
        if os.path.exists(self.data_dir):
            files = os.listdir(self.data_dir)
            cols = []
            rows = []
            for f in files:
                fpath = self.data_dir + f
                with open(fpath) as fh:
                    try:
                        pkt = json.load(fh)
                        if not cols:
                            cols = [str(c['chan']) for c in pkt['channels']]
                        row = self._extract_row(pkt, cols)
                        dims = [len(c) for c in row[:3]]
                        if all(d == 400 for d in dims):
                            rows.append(row)
                        else:
                            log.warning('Inconsistent dimensions %s, %s' %
                                        (str(dims), fpath))
                        fh.close()
                        os.remove(fpath)
                        log.info('sample: ' + fpath)
                    except Exception as ex:
                        log.warn(ex)
                        log.warn('Incomplete packet %s' % fpath)

            if cols and rows:
                coltypes = {}
                for c in cols:
                    coltypes[c] = '400i4'
                cols.append('time')
                samples = dict(cols=cols, data=rows, coltypes=coltypes)
                return samples
示例#12
0
 def tearDown(self):
     self.waiter.stop()
     try:
         self.container.terminate_process(self._haa_pid)
     except BadRequest:
         log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)")
     self._stop_container()
示例#13
0
 def _result_complete(self, result):
     """
     """
     if self._client:
         log.debug('Remote endpoint enqueuing result %s.', str(result))
         self._client.enqueue(result)
     else:
         log.warning('Received a result but no client available to transmit.')
    def on_start(self):
        super(NotificationWorker, self).on_start()

        self.smtp_client = setting_up_smtp_client()

        # ------------------------------------------------------------------------------------
        # Start by loading the user info and reverse user info dictionaries
        # ------------------------------------------------------------------------------------

        try:
            self.user_info = self.load_user_info()
            self.reverse_user_info = calculate_reverse_user_info(self.user_info)

            log.info("On start up, notification workers loaded the following user_info dictionary: %s" % self.user_info)
            log.info("The calculated reverse user info: %s" % self.reverse_user_info)

        except NotFound as exc:
            if exc.message.find("users_index") > -1:
                log.warning("Notification workers found on start up that users_index have not been loaded yet.")
            else:
                raise NotFound(exc.message)

        # ------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        # ------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            """
            Callback method for the subscriber to ReloadUserInfoEvent
            """

            notification_id = event_msg.notification_id
            log.info(
                "(Notification worker received a ReloadNotificationEvent. The relevant notification_id is %s"
                % notification_id
            )

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info = calculate_reverse_user_info(self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)

            log.debug("After a reload, the user_info: %s" % self.user_info)
            log.debug("The recalculated reverse_user_info: %s" % self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(event_type="ReloadUserInfoEvent", callback=reload_user_info)
        self.reload_user_info_subscriber.start()

        # ------------------------------------------------------------------------------------
        # Create an event subscriber for all events that are of interest for notifications
        # ------------------------------------------------------------------------------------

        self.event_subscriber = EventSubscriber(queue_name="uns_queue", callback=self.process_event)
        self.event_subscriber.start()
示例#15
0
 def _result_complete(self, result):
     """
     """
     if self._client:
         log.debug('Remote endpoint enqueuing result %s.', str(result))
         self._client.enqueue(result)
     else:
         log.warning(
             'Received a result but no client available to transmit.')
    def on_sample(self, sample):
        
        try:
            stream_name = sample['stream_name']
            self._stream_buffers[stream_name].insert(0, sample)
            if not self._stream_greenlets[stream_name]:
                self._publish_stream_buffer(stream_name)

        except KeyError:
            log.warning('Instrument agent %s received sample with bad stream name %s.',
                      self._agent._proc_name, stream_name)
 def _stop_pagent(self):
     """
     Stop the port agent.
     """
     if self._pagent:
         pid = self._pagent.get_pid()
         if pid:
             log.info('Stopping pagent pid %i.', pid)
             self._pagent.stop()
         else:
             log.warning('No port agent running.')
    def on_start(self):
        super(NotificationWorker,self).on_start()

        self.reverse_user_info = None
        self.user_info = None

        #------------------------------------------------------------------------------------
        # Start by loading the user info and reverse user info dictionaries
        #------------------------------------------------------------------------------------

        try:
            self.user_info = self.load_user_info()
            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("On start up, notification workers loaded the following user_info dictionary: %s" % self.user_info)
            log.debug("The calculated reverse user info: %s" % self.reverse_user_info )

        except NotFound as exc:
            if exc.message.find('users_index') > -1:
                log.warning("Notification workers found on start up that users_index have not been loaded yet.")
            else:
                raise NotFound(exc.message)

        #------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        #------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id =  event_msg.notification_id
            log.debug("(Notification worker received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)

            log.debug("After a reload, the user_info: %s" % self.user_info)
            log.debug("The recalculated reverse_user_info: %s" % self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(
            event_type="ReloadUserInfoEvent",
            origin='UserNotificationService',
            callback=reload_user_info
        )

        self.add_endpoint(self.reload_user_info_subscriber)
    def on_sample(self, sample):
        
        try:
            stream_name = sample['stream_name']
            self._stream_buffers[stream_name].insert(0, sample)
            if not self._stream_greenlets[stream_name]:
                self._publish_stream_buffer(stream_name)

        except KeyError:
            log.warning('Instrument agent %s received sample with bad stream name %s.',
                      self._agent._proc_name, stream_name)
示例#20
0
        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)
示例#21
0
    def add_granule(self, stream_id, rdt):
        ''' Appends the granule's data to the coverage and persists it. '''
        if stream_id in self._bad_coverages:
            log.info(
                'Message attempting to be inserted into bad coverage: %s',
                DatasetManagementService._get_coverage_path(
                    self.get_dataset(stream_id)))

        #--------------------------------------------------------------------------------
        # Coverage determiniation and appending
        #--------------------------------------------------------------------------------
        dataset_id = self.get_dataset(stream_id)
        if not dataset_id:
            log.error('No dataset could be determined on this stream: %s',
                      stream_id)
            return

        try:
            coverage = self.get_coverage(stream_id)
        except IOError as e:
            log.error(
                "Couldn't open coverage: %s",
                DatasetManagementService._get_coverage_path(
                    self.get_dataset(stream_id)))
            raise CorruptionError(e.message)

        if not coverage:
            log.error(
                'Could not persist coverage from granule, coverage is None')
            return
        #--------------------------------------------------------------------------------
        # Actual persistence
        #--------------------------------------------------------------------------------

        if rdt[rdt.temporal_parameter] is None:
            log.warning("Empty granule received")
            return

        # Parse the RDT and set hte values in the coverage
        self.insert_values(coverage, rdt, stream_id)

        # Force the data to be flushed
        DatasetManagementService._save_coverage(coverage)

        self.update_metadata(dataset_id, rdt)

        try:
            window = rdt[rdt.temporal_parameter][[0, -1]]
            window = window.tolist()
        except (ValueError, IndexError):
            window = None
        self.dataset_changed(dataset_id, window)
    def cancel(self, process_id):
        self.container.proc_manager.terminate_process(process_id)
        log.debug('PD: Terminated Process (%s)', process_id)
        try:
            self._remove_process(process_id)
        except ValueError:
            log.warning("PD: No record of %s to remove?" % process_id)

        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
            origin=process_id, origin_type="DispatchedProcess",
            state=ProcessStateEnum.TERMINATE)

        return True
 def get_stored_values(self, lookup_value):
     if not self.new_lookups.empty():
         new_values = self.new_lookups.get()
         self.lookup_docs = new_values + self.lookup_docs
     lookup_value_document_keys = self.lookup_docs
     for key in lookup_value_document_keys:
         try:
             document = self.stored_value_manager.read_value(key)
             if lookup_value in document:
                 return document[lookup_value]
         except NotFound:
             log.warning('Specified lookup document does not exist')
     return None
 def get_stored_values(self, lookup_value):
     if not self.new_lookups.empty():
         new_values = self.new_lookups.get()
         self.lookup_docs = new_values + self.lookup_docs
     lookup_value_document_keys = self.lookup_docs
     for key in lookup_value_document_keys:
         try:
             document = self.stored_value_manager.read_value(key)
             if lookup_value in document:
                 return document[lookup_value] 
         except NotFound:
             log.warning('Specified lookup document does not exist')
     return None
示例#25
0
    def load_data_process(self, stream_id=""):

        dpms_client = DataProcessManagementServiceClient()

        dataprocess_details_list = dpms_client.read_data_process_for_stream(
            stream_id)

        dataprocess_ids = []
        #this returns a list of data process info dicts
        for dataprocess_details in dataprocess_details_list:

            dataprocess_details = DotDict(dataprocess_details or {})
            dataprocess_id = dataprocess_details.dataprocess_id

            #set metrics attributes
            dataprocess_details.granule_counter = 0

            self._dataprocesses[dataprocess_id] = dataprocess_details
            log.debug('load_data_process  dataprocess_id: %s', dataprocess_id)
            log.debug('load_data_process  dataprocess_details: %s',
                      dataprocess_details)

            # validate details
            # if not outstream info avaialable log a warning but TF may publish an event so proceed
            if not dataprocess_details.out_stream_def or not dataprocess_details.output_param:
                log.warning(
                    'No output stream details provided for data process %s, will not publish a granule',
                    dataprocess_id)

            #add the stream id to the map
            if 'in_stream_id' in dataprocess_details:
                if dataprocess_details['in_stream_id'] in self._streamid_map:
                    (self._streamid_map[dataprocess_details['in_stream_id']]
                     ).append(dataprocess_id)
                else:
                    self._streamid_map[dataprocess_details['in_stream_id']] = [
                        dataprocess_id
                    ]
            #todo: add transform worker id
            self.event_publisher.publish_event(
                origin=dataprocess_id,
                origin_type='DataProcess',
                status=DataProcessStatusType.NORMAL,
                description='data process loaded into transform worker')

            #create a publisher for output stream
            self.create_publisher(dataprocess_id, dataprocess_details)
            dataprocess_ids.append(dataprocess_id)

        return dataprocess_ids
def process_gateway_agent_request(resource_id, operation):
    try:
        if not resource_id:
            raise BadRequest("Am agent resource_id was not found in the URL")
        if operation == '':
            raise BadRequest("An agent operation was not specified in the URL")

        # Ensure there is no unicode
        resource_id = str(resource_id)
        operation = str(operation)

        # Retrieve json data from HTTP Post payload
        json_params = None
        if request.method == "POST":
            payload = request.form['payload']

            json_params = json_loads(str(payload))

            if 'agentRequest' not in json_params:
                raise Inconsistent("The JSON request is missing the 'agentRequest' key in the request")
            if 'agentId' not in json_params['agentRequest']:
                raise Inconsistent("The JSON request is missing the 'agentRequest' key in the request")
            if 'agentOp' not in json_params['agentRequest']:
                raise Inconsistent("The JSON request is missing the 'agentOp' key in the request")
            if json_params['agentRequest']['agentId'] != resource_id:
                raise Inconsistent("Target agent id in the JSON request (%s) does not match agent id in URL (%s)" % (
                    str(json_params['agentRequest']['agentId']), resource_id))
            if json_params['agentRequest']['agentOp'] != operation:
                raise Inconsistent("Target agent operation in the JSON request (%s) does not match agent operation in URL (%s)" % (
                    str(json_params['agentRequest']['agentOp']), operation))

        resource_agent = ResourceAgentClient(resource_id, process=service_gateway_instance)

        param_list = create_parameter_list('agentRequest', 'resource_agent', ResourceAgentProcessClient, operation, json_params)

        # Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request('agentRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        param_list['headers'] = build_message_headers(ion_actor_id, expiry)

        methodToCall = getattr(resource_agent, operation)
        result = methodToCall(**param_list)

        return gateway_json_response(result)

    except Exception, e:
        if e is NotFound:
            log.warning('The agent instance for id %s is not found.' % resource_id)
        return build_error_response(e)
示例#27
0
    def tearDown(self):

        new_policy = {'preserve_n': 0}
        self.haa_client.reconfigure_policy(new_policy)

        self.await_ha_state('STEADY')
        self.assertEqual(len(self.get_running_procs()), 0)

        self.waiter.stop()
        try:
            self._kill_haagent()
        except BadRequest:
            log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)")
        self.container.resource_registry.delete(self.service_def_id, del_associations=True)
        self._stop_container()
    def _restart_transform(self, transform_id):
        transform = self.clients.resource_registry.read(transform_id)
        configuration = transform.configuration
        proc_def_ids,other = self.clients.resource_registry.find_objects(subject=transform_id,predicate=PRED.hasProcessDefinition,id_only=True)

        if len(proc_def_ids) < 1:
            log.warning('Transform did not have a correct process definition.')
            return

        pid = self.clients.process_dispatcher.schedule_process(
            process_definition_id=proc_def_ids[0],
            configuration=configuration
        )

        transform.process_id = pid
        self.clients.resource_registry.update(transform)
    def on_sample_mult(self, sample_list):
        """
        Enqueues a list of granules and publishes them
        """
        streams = set()
        for sample in sample_list:
            try:
                stream_name = sample['stream_name']
                self._stream_buffers[stream_name].insert(0, sample)
                streams.add(stream_name)
            except KeyError:
                log.warning('Instrument agent %s received sample with bad stream name %s.',
                          self._agent._proc_name, stream_name)

        for stream_name in streams:
            if not self._stream_greenlets[stream_name]:
                self._publish_stream_buffer(stream_name)
    def add_granule(self,stream_id, rdt):
        ''' Appends the granule's data to the coverage and persists it. '''
        if stream_id in self._bad_coverages:
            log.info('Message attempting to be inserted into bad coverage: %s',
                     DatasetManagementService._get_coverage_path(self.get_dataset(stream_id)))
            
        #--------------------------------------------------------------------------------
        # Coverage determiniation and appending
        #--------------------------------------------------------------------------------
        dataset_id = self.get_dataset(stream_id)
        if not dataset_id:
            log.error('No dataset could be determined on this stream: %s', stream_id)
            return

        try:
            coverage = self.get_coverage(stream_id)
        except IOError as e:
            log.error("Couldn't open coverage: %s",
                      DatasetManagementService._get_coverage_path(self.get_dataset(stream_id)))
            raise CorruptionError(e.message)

        if not coverage:
            log.error('Could not persist coverage from granule, coverage is None')
            return
        #--------------------------------------------------------------------------------
        # Actual persistence
        #--------------------------------------------------------------------------------

        if rdt[rdt.temporal_parameter] is None:
            log.warning("Empty granule received")
            return

        # Parse the RDT and set hte values in the coverage
        self.insert_values(coverage, rdt, stream_id)
        
        # Force the data to be flushed
        DatasetManagementService._save_coverage(coverage)

        self.update_metadata(dataset_id, rdt)

        try:
            window = rdt[rdt.temporal_parameter][[0,-1]]
            window = window.tolist()
        except (ValueError, IndexError):
            window = None
        self.dataset_changed(dataset_id, window)
    def on_sample_mult(self, sample_list):
        """
        Enqueues a list of granules and publishes them
        """
        streams = set()
        for sample in sample_list:
            try:
                stream_name = sample['stream_name']
                self._stream_buffers[stream_name].insert(0, sample)
                streams.add(stream_name)
            except KeyError:
                log.warning('Instrument agent %s received sample with bad stream name %s.',
                          self._agent._proc_name, stream_name)

        for stream_name in streams:
            if not self._stream_greenlets[stream_name]:
                self._publish_stream_buffer(stream_name)
        def reload_user_info(event_msg, headers):
            """
            Callback method for the subscriber to ReloadUserInfoEvent
            """

            notification_id =  event_msg.notification_id
            log.debug("(UNS instance) received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info)
示例#33
0
    def _results_from_response(self, response, id_only):
        deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())
        if not (response.has_key('hits') and response['hits'].has_key('hits')):
            return []

        hits = response['hits']['hits']

        if len(hits) > 0:
            if len(hits) >= SEARCH_BUFFER_SIZE:
                log.warning("Query results exceeded search buffer limitations")
                self.raise_search_buffer_exceeded()
            if id_only:
                return [str(i['_id']) for i in hits]
            results = map(deserializer.deserialize, hits)
            return results

        else:
            return []
示例#34
0
    def _results_from_response(self, response, id_only):
        deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())
        if not (response.has_key('hits') and response['hits'].has_key('hits')):
            return []

        hits = response['hits']['hits']
       
        if len(hits) > 0:
            if len(hits) >= SEARCH_BUFFER_SIZE:
                log.warning("Query results exceeded search buffer limitations")
                self.raise_search_buffer_exceeded()
            if id_only:
                return [str(i['_id']) for i in hits]
            results = map(deserializer.deserialize,hits)
            return results
        
        else:
            return []
示例#35
0
    def tearDown(self):

        new_policy = {'preserve_n': 0}
        self.haa_client.reconfigure_policy(new_policy)

        self.assertEqual(len(self.get_running_procs()), 0)
        self.await_ha_state('STEADY')

        self.waiter.stop()
        try:
            self._kill_haagent()
        except BadRequest:
            log.warning(
                "Couldn't terminate HA Agent in teardown (May have been terminated by a test)"
            )
        self.container.resource_registry.delete(self.service_def_id,
                                                del_associations=True)
        self._stop_container()
示例#36
0
        def restart_transform(transform_id):
            transform = self.clients.resource_registry.read(transform_id)
            configuration = transform.configuration
            proc_def_ids, other = self.clients.resource_registry.find_objects(
                subject=transform_id,
                predicate=PRED.hasProcessDefinition,
                id_only=True)

            if len(proc_def_ids) < 1:
                log.warning(
                    'Transform did not have a correct process definition.')
                return

            pid = self.clients.process_dispatcher.schedule_process(
                process_definition_id=proc_def_ids[0],
                configuration=configuration)

            transform.process_id = pid
            self.clients.resource_registry.update(transform)
示例#37
0
    def acquire_samples(self, max_samples=0):
        log.debug('CDIP_DataAgentPlugin.acquire_samples')

        # Read server, extract last sample.
        data = requests.get(self.streaming_args.url)
        m = None
        for m in re.finditer(pattern, data.text, flags=re.MULTILINE):
            pass
        if not m:
            log.warning('CDIP_DataAgentPlugin.acquire_samples: No data found.')
            return None

        year = int(m.group(1))
        month = int(m.group(2))
        day = int(m.group(3))
        hour = int(m.group(4))
        minute = int(m.group(5))
        Hs = float(m.group(6))
        Tp = float(m.group(7))
        Dp = int(m.group(8))
        Ta = float(m.group(9))
        Temp = float(m.group(10))

        # Create sample.
        # [ntp64_ts, Hs, Tp, Dp, Ta, Temp]
        # ['\xdb\x07\x00,\x00\x00\x00\x00', 2.66, 9.09, 328, 6.67, 12.2]
        dt = datetime.datetime(year, month, day, hour, minute)
        ts = NTP4Time(dt).to_ntp64()
        sample = [ts, Hs, Tp, Dp, Ta, Temp]

        # Compare to last reading.
        if self.last_sample == sample:
            log.debug('CDIP_DataAgentPlugin.acquire_samples: No new data.')
            return None

        # Update, pack and return.
        log.debug('CDIP_DataAgentPlugin.acquire_samples: Got new data.')
        log.debug('CDIP data: %s' % str(sample))
        self.last_sample = sample
        sample_desc = dict(cols=["time", "Hs", "Tp", "Dp", "Ta", "Temp"],
                           data=[sample])
        return sample_desc
示例#38
0
    def acquire_samples(self, max_samples=0):
        log.debug('CDIP_DataAgentPlugin.acquire_samples')

        # Read server, extract last sample.
        data = requests.get(self.streaming_args.url)
        m = None
        for m in re.finditer(pattern, data.text, flags=re.MULTILINE):
            pass
        if not m:
            log.warning('CDIP_DataAgentPlugin.acquire_samples: No data found.')
            return None

        year = int(m.group(1))
        month = int(m.group(2))
        day = int(m.group(3))
        hour = int(m.group(4))
        minute = int(m.group(5))
        Hs = float(m.group(6))
        Tp = float(m.group(7))
        Dp = int(m.group(8))
        Ta = float(m.group(9))
        Temp = float(m.group(10))

        # Create sample.
        # [ntp64_ts, Hs, Tp, Dp, Ta, Temp]
        # ['\xdb\x07\x00,\x00\x00\x00\x00', 2.66, 9.09, 328, 6.67, 12.2]
        dt = datetime.datetime(year, month, day, hour, minute)
        ts = NTP4Time(dt).to_ntp64()
        sample = [ts, Hs, Tp, Dp, Ta, Temp]

        # Compare to last reading.
        if self.last_sample == sample:
            log.debug('CDIP_DataAgentPlugin.acquire_samples: No new data.')
            return None

        # Update, pack and return.
        log.debug('CDIP_DataAgentPlugin.acquire_samples: Got new data.')
        log.debug('CDIP data: %s' % str(sample))
        self.last_sample = sample
        sample_desc = dict(cols=["time", "Hs", "Tp", "Dp", "Ta", "Temp"],
            data=[sample])
        return sample_desc
示例#39
0
    def load_user_info(self):
        '''
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        '''

        users, _ = self.resource_registry.find_resources(restype=RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications_disabled = False
            notifications_daily_digest = False

            notifications = self.get_user_notifications(user_info_id=user)

            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']
                else:
                    log.warning(
                        'Invalid variables attribute on UserInfo instance. UserInfo: %s',
                        user)

            user_info[user._id] = {
                'user_contact': user.contact,
                'notifications': notifications,
                'notifications_daily_digest': notifications_daily_digest,
                'notifications_disabled': notifications_disabled
            }

        return user_info
    def on_start(self): #pragma no cover
        #--------------------------------------------------------------------------------
        # Explicit on_start
        #--------------------------------------------------------------------------------

        # Skip TransformStreamListener and go to StreamProcess to avoid the subscriber being created
        # We want explicit management of the thread and subscriber object for ingestion

        TransformStreamProcess.on_start(self)
        
        self.queue_name = self.CFG.get_safe('process.queue_name',self.id)
        self.subscriber = StreamSubscriber(process=self, exchange_name=self.queue_name, callback=self.receive_callback)
        self.thread_lock = RLock()
        
        #--------------------------------------------------------------------------------
        # Normal on_start after this point
        #--------------------------------------------------------------------------------

        BaseIngestionWorker.on_start(self)
        self._rpc_server = self.container.proc_manager._create_listening_endpoint(from_name=self.id, process=self)
        self.add_endpoint(self._rpc_server)

        self.event_publisher = EventPublisher(OT.DatasetModified)
        self.stored_value_manager = StoredValueManager(self.container)

        self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[])
        self.input_product = self.CFG.get_safe('process.input_product','')
        self.qc_enabled = self.CFG.get_safe('process.qc_enabled', True)
        self.ignore_gaps = self.CFG.get_safe('service.ingestion.ignore_gaps', True)
        if not self.ignore_gaps:
            log.warning("Gap handling is not supported in release 2")
        self.ignore_gaps = True
        self.new_lookups = Queue()
        self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent, callback=self._add_lookups, auto_delete=True)
        self.add_endpoint(self.lookup_monitor)
        self.qc_publisher = EventPublisher(event_type=OT.ParameterQCEvent)
        self.connection_id = ''
        self.connection_index = None
        
        self.start_listener()
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product(
        )
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)

        #TODO - Need to add workflow creation for google data table
        vis_params = {}
        vis_token = self.vis_client.initiate_realtime_visualization(
            data_product_id=ctd_parsed_data_product_id,
            visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many

        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(
                ctd_sim_pid
            )  # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)
    def load_data_process(self, stream_id=""):

        dpms_client = DataProcessManagementServiceClient()

        dataprocess_details_list = dpms_client.read_data_process_for_stream(stream_id)

        dataprocess_ids = []
        #this returns a list of data process info dicts
        for dataprocess_details in dataprocess_details_list:

            dataprocess_details = DotDict(dataprocess_details or {})
            dataprocess_id = dataprocess_details.dataprocess_id

            #set metrics attributes
            dataprocess_details.granule_counter = 0

            self._dataprocesses[dataprocess_id] = dataprocess_details
            log.debug('load_data_process  dataprocess_id: %s', dataprocess_id)
            log.debug('load_data_process  dataprocess_details: %s', dataprocess_details)

            # validate details
            # if not outstream info avaialable log a warning but TF may publish an event so proceed
            if not dataprocess_details.out_stream_def or not dataprocess_details.output_param:
                log.warning('No output stream details provided for data process %s, will not publish a granule', dataprocess_id)

            #add the stream id to the map
            if 'in_stream_id' in dataprocess_details:
                if dataprocess_details['in_stream_id'] in self._streamid_map:
                    (self._streamid_map[ dataprocess_details['in_stream_id'] ]).append(dataprocess_id)
                else:
                    self._streamid_map[ dataprocess_details['in_stream_id'] ]  = [dataprocess_id]
            #todo: add transform worker id
            self.event_publisher.publish_event(origin=dataprocess_id, origin_type='DataProcess', status=DataProcessStatusType.NORMAL,
                                               description='data process loaded into transform worker')

            #create a publisher for output stream
            self.create_publisher(dataprocess_id, dataprocess_details)
            dataprocess_ids.append(dataprocess_id)

        return dataprocess_ids
示例#43
0
        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id = event_msg.notification_id
            log.debug(
                "(Notification worker received a ReloadNotificationEvent. The relevant notification_id is %s"
                % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info = calculate_reverse_user_info(
                self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)

            log.debug("After a reload, the user_info: %s" % self.user_info)
            log.debug("The recalculated reverse_user_info: %s" %
                      self.reverse_user_info)
    def load_user_info(self):
        '''
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        '''

        users, _ = self.resource_registry.find_resources(restype= RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications = []
            notifications_disabled = False
            notifications_daily_digest = False

            log.debug('load_user_info: user.variables:  %s', user.variables)

            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):
                    if variable['name'] == 'notifications':
                        notifications = variable['value']

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']
                else:
                    log.warning('Invalid variables attribute on UserInfo instance. UserInfo: %s', user)

            user_info[user._id] = { 'user_contact' : user.contact, 'notifications' : notifications,
                                    'notifications_daily_digest' : notifications_daily_digest, 'notifications_disabled' : notifications_disabled}


        return user_info
示例#45
0
    def retrieve_function_and_define_args(self, stream_id, dataprocess_id):
        import importlib
        argument_list = {}
        function = ''
        context = {}

        #load the details of this data process
        dataprocess_info = self._dataprocesses[dataprocess_id]

        try:
            #todo: load once into a 'set' of modules?
            #load the associated transform function
            egg_uri = dataprocess_info.get_safe('uri', '')
            if egg_uri:
                egg = self.download_egg(egg_uri)
                import pkg_resources
                pkg_resources.working_set.add_entry(egg)
            else:
                log.warning(
                    'No uri provided for module in data process definition.')

            module = importlib.import_module(
                dataprocess_info.get_safe('module', ''))

            function = getattr(module,
                               dataprocess_info.get_safe('function', ''))
            arguments = dataprocess_info.get_safe('arguments', '')
            argument_list = dataprocess_info.get_safe('argument_map', {})

            if self.has_context_arg(function, argument_list):
                context = self.create_context_arg(stream_id, dataprocess_id)

        except ImportError:
            log.error('Error running transform')
        log.debug('retrieve_function_and_define_args  argument_list: %s',
                  argument_list)
        return function, argument_list, context
    def test_realtime_visualization(self):

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #TODO - Need to add workflow creation for google data table
        vis_params ={}
        vis_token = self.vis_client.initiate_realtime_visualization(data_product_id=ctd_parsed_data_product_id, visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)
        if (vis_data):
            self.validate_google_dt_transform_results(vis_data)

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")

        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        if vis_data:
            self.validate_google_dt_transform_results(vis_data)

        # Cleanup
        self.vis_client.terminate_realtime_visualization_data(vis_token)
def process_oms_event():
    if not request.data:
        log.warning('process_oms_event: invalid OMS event payload: %r', request.data)
        return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    payload = json_loads(str(request.data))
    if not isinstance(payload, list):
        log.warning('process_oms_event: invalid OMS event payload: '
                    'expecting array but got: %r', payload)
        return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    log.debug('process_oms_event: payload=%s', payload)

    event_publisher = EventPublisher()

    for obj in payload:
        for k in ['event_id', 'platform_id', 'message']:
            if k not in obj:
                log.warning('process_oms_event: invalid OMS event: %r missing. '
                            'Received object: %s', k, obj)
                #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

        # note the the external event_id is captured in the sub_type field:
        evt = dict(
            event_type     = 'OMSDeviceStatusEvent',
            origin_type    = 'OMS Platform',
            origin         = obj.get('platform_id', 'platform_id NOT PROVIDED'),
            sub_type       = obj.get('event_id', 'event_id NOT PROVIDED'),
            description    = obj.get('message', ''),
            status_details = obj)
        try:
            event_publisher.publish_event(**evt)
            log.debug('process_oms_event: published: %s', evt)

        except Exception as e:
            log.exception('process_oms_event: could not publish OMS event: %s', evt)

    return gateway_json_response(OMS_ACCEPTED_RESPONSE)
    def test_realtime_visualization(self):
        assertions = self.assertTrue


        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='GoogleDT_Test_Workflow',description='Tests the workflow of converting stream data to Google DT')

        #Add a transformation process definition
        google_dt_procdef_id = self.create_google_dt_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=google_dt_procdef_id, persist_process_output_data=True)
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)


        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=20)


        ctd_sim_pid = self.start_sinusoidal_input_stream_process(ctd_stream_id)


        #TODO - Need to add workflow creation for google data table

        vis_params ={}
        vis_params['in_product_type'] = 'google_dt'
        vis_token = self.vis_client.initiate_realtime_visualization(data_product_id=workflow_product_id, visualization_parameters=vis_params)

        #Trying to continue to receive messages in the queue
        gevent.sleep(10.0)  # Send some messages - don't care how many

        #TODO - find out what the actual return data type should be
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        print vis_data

        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        #todo remove the try except
        try:
            self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data
        except:
            log.warning("cancelling process did not work")
        vis_data = self.vis_client.get_realtime_visualization_data(vis_token)

        print vis_data

        self.vis_client.terminate_realtime_visualization_data(vis_token)

        #Stop the workflow processes
        self.workflowclient.terminate_data_process_workflow(workflow_id, False)  # Should test true at some point

        #Cleanup to make sure delete is correct.
        self.workflowclient.delete_workflow_definition(workflow_def_id)
示例#49
0
        def server_loop():
            """
            """
            log.debug('Server greenlet started.')
            
            while True:
                                                
                if not self._server:
                    self._start_sock()
                
                # Try to accept incomming message.
                # If nothing available, sleep the greenlet.
                # Never timeout.
                log.debug('Server awaiting request.')
                while True:
                    try:
                        request = self._server.recv_pyobj(NOBLOCK)
                        log.debug('Server got request %s.', str(request))
                        break
                    except ZMQError:
                        gevent.sleep(self._sleep)
    
                # Increment the receive count and retrieve any
                # instrumented test behaviors.
                self.recv_count += 1
                
                # Received a null request corresponding to a zmq interrupt.
                # Abandon request and restart server.
                if not request:
                    log.warning('Server got null request, abandoning request and restarting server socket.')
                    self._stop_sock()
                
                # Valid message received.
                # Try to send the message ack.
                # If socket not available, sleep and retry.
                # If timeout exceeded, abandon request and restart server.
                # If successful, send request to callback.
                else:
                    
                    # Grab the current test behavior if any.
                    test_behavior = self.test_behaviors.get(self.recv_count, None)
                    
                    # Run test behavior if any.
                    if test_behavior:

                        if test_behavior.type == R3PCTestBehavior.delay:
                            gevent.sleep(test_behavior.delay)
                                
                        if test_behavior.type == R3PCTestBehavior.stop:
                            if test_behavior.delay > 0:
                                gevent.sleep(test_behavior.delay)
                            self._stop_sock()
                            break
                        
                        if test_behavior.type == R3PCTestBehavior.restart:
                            if test_behavior.delay > 0:
                                gevent.sleep(test_behavior.delay)
                            self._stop_sock()
                        
                    sendtime = time.time()
                    while self._server:
                        try:
                            log.debug('Server sending ack.')
                            self._server.send_pyobj('OK', NOBLOCK)
                            log.debug('Server ack send, calling callback.')
                            self._callback(request)
                            break
                        except ZMQError:
                            gevent.sleep(self._sleep)
                            if time.time() - sendtime > self._timeout:
                                log.warning('Server ack failed, abandoning request and restarting server socket.')
                                self._stop_sock()
示例#50
0
    def execute_acquire_sample(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_sample and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        @throws IndexError if first argument is not a dictionary
        @throws ConfigurationError if required members aren't present
        @retval New ResourceAgentState (COMMAND)
        """
        log.debug('Executing acquire_sample: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(
                config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                p = get_safe(config_mods, k)
                if not p is None:
                    config[k] = p

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id', None)
        if not stream_id:
            raise ConfigurationError(
                'Configuration does not contain required \'stream_id\' member')
        stream_route_param = get_safe(config, 'stream_route', None)

        if not stream_route_param:
            raise ConfigurationError(
                'Configuration does not contain required stream_route member')
        if isinstance(stream_route_param, StreamRoute):
            # process_dispatcher.spawn does not support IonObjects, only JSONable configuration
            # but many existing tests pass stream route directly
            log.warning(
                'stream_route config parameter: use of a StreamRoute object is deprecated, pass dict instead'
            )
            stream_route = stream_route_param
        elif isinstance(stream_route_param, dict):
            # approved method -- pass a dict with appropriate fields to reconstruct IonObject
            log.debug('using stream route: %r', stream_route_param)
            stream_route = IonObject(OT.StreamRoute, stream_route_param)


#            stream_route = IonObject(RT.StreamRoute, exchange_point=stream_route_param['exchange_point'], routing_key=stream_route_param['routing_key'], credentials=stream_route_param['credentials'])
        else:
            raise ConfigurationError(
                'stream_route value should be a dict, not %r' %
                stream_route_param)

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        ndc = None
        if isNew:
            # Get the NewDataCheck attachment and add it's content to the config
            ext_ds_id = get_safe(config, 'external_dataset_res_id')
            if ext_ds_id:
                ndc = self._find_new_data_check_attachment(ext_ds_id)

        config['new_data_check'] = ndc

        # Create a publisher to pass into the greenlet
        publisher = StandaloneStreamPublisher(stream_id=stream_id,
                                              stream_route=stream_route)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_sample, config, publisher,
                  self._unlock_new_data_callback,
                  self._update_new_data_check_attachment)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)
        return ResourceAgentState.COMMAND, None
示例#51
0
        def client_loop():
            """
            """
            log.debug('Client greenlet started.')
            
            while True:

                # Start the client socket if necessary.
                # If retries exceeded, end loop.
                if not self._client:
                    self._start_sock()

                # Pop first message from the queue.
                # If empty sleep and retry.                    
                while True:
                    try:
                        request = self._queue[0]
                        break
                    except IndexError:
                        gevent.sleep(self._sleep)
                                                    
                # Send request to server.
                # If unable to send, sleep and retry.
                startsend = time.time()
                while True:
                    try:
                        log.debug('Client sending request %s.', request)                        
                        self._client.send_pyobj(request, NOBLOCK, 0)
                        self.send_count += 1
                        break
                    except ZMQError:
                        log.warning('Client socket not available for send.')
                        gevent.sleep(self._sleep)
                        if time.time() - startsend > self._timeout:
                            log.warning('Client timed out sending, restarting.')                            
                            self._stop_sock()
                            break

                # Grab the current test behavior if any.
                test_behavior = self.test_behaviors.get(self.send_count, None)

                # Run test behavior if any.
                if test_behavior:

                    if test_behavior.type == R3PCTestBehavior.delay:
                        gevent.sleep(test_behavior.delay)
                            
                    if test_behavior.type == R3PCTestBehavior.stop:
                        if test_behavior.delay > 0:
                            gevent.sleep(test_behavior.delay)
                        self._stop_sock()
                        break
                    
                    if test_behavior.type == R3PCTestBehavior.restart:
                        if test_behavior.delay > 0:
                            gevent.sleep(test_behavior.delay)
                        self._stop_sock()

                # Receive ack from server.
                # If unavailalbe, sleep and retry.
                # If timeout exceeded, increment retry count and restart client.
                # If ack received, zero retry could and pop message.
                startrecv = time.time()                
                while self._client:
                    try:
                        log.debug('Client awaiting ack.')
                        reply = self._client.recv_pyobj(NOBLOCK)
                        log.debug('Client received ack: %s for request: %s',
                            reply, request)
                        if not reply:
                            self._stop_sock()
                        else:
                            self._queue.pop(0)
                            self._callback(request)
                        break
                    except ZMQError:
                        log.debug('Client socket unavailable to receive ack.')
                        gevent.sleep(self._sleep)
                        #delta = time.time() - startrecv
                        #log.debug('delta=%f', delta)
                        if time.time() - startrecv > self._timeout:
                            log.warning('Client timed out awaiting ack, restarting.')                            
                            self._stop_sock()
                            break
示例#52
0
        def server_loop():
            """
            """
            log.debug('Server greenlet started.')
            
            while True:
                                                
                if not self._server:
                    self._start_sock()
                
                # Try to accept incomming message.
                # If nothing available, sleep the greenlet.
                # Never timeout.
                log.debug('Server awaiting request.')
                while True:
                    try:
                        request = self._server.recv_pyobj(NOBLOCK)
                        log.debug('Server got request %s.', str(request))
                        break
                    except ZMQError:
                        gevent.sleep(self._sleep)
    
                # Increment the receive count and retrieve any
                # instrumented test behaviors.
                self.recv_count += 1
                
                # Received a null request corresponding to a zmq interrupt.
                # Abandon request and restart server.
                if not request:
                    log.warning('Server got null request, abandoning request and restarting server socket.')
                    self._stop_sock()
                
                # Valid message received.
                # Try to send the message ack.
                # If socket not available, sleep and retry.
                # If timeout exceeded, abandon request and restart server.
                # If successful, send request to callback.
                else:
                    
                    # Grab the current test behavior if any.
                    test_behavior = self.test_behaviors.get(self.recv_count, None)
                    
                    # Run test behavior if any.
                    if test_behavior:

                        if test_behavior.type == R3PCTestBehavior.delay:
                            gevent.sleep(test_behavior.delay)
                                
                        if test_behavior.type == R3PCTestBehavior.stop:
                            if test_behavior.delay > 0:
                                gevent.sleep(test_behavior.delay)
                            self._stop_sock()
                            break
                        
                        if test_behavior.type == R3PCTestBehavior.restart:
                            if test_behavior.delay > 0:
                                gevent.sleep(test_behavior.delay)
                            self._stop_sock()
                        
                    sendtime = time.time()
                    while self._server:
                        try:
                            log.debug('Server sending ack.')
                            self._server.send_pyobj('OK', NOBLOCK)
                            log.debug('Server ack send, calling callback.')
                            self._callback(request)
                            break
                        except ZMQError:
                            gevent.sleep(self._sleep)
                            if time.time() - sendtime > self._timeout:
                                log.warning('Server ack failed, abandoning request and restarting server socket.')
                                self._stop_sock()
示例#53
0
    def on_start(self):
        super(NotificationWorker, self).on_start()

        self.reverse_user_info = None
        self.user_info = None

        #------------------------------------------------------------------------------------
        # Start by loading the user info and reverse user info dictionaries
        #------------------------------------------------------------------------------------

        try:
            self.user_info = self.load_user_info()
            self.reverse_user_info = calculate_reverse_user_info(
                self.user_info)

            log.debug(
                "On start up, notification workers loaded the following user_info dictionary: %s"
                % self.user_info)
            log.debug("The calculated reverse user info: %s" %
                      self.reverse_user_info)

        except NotFound as exc:
            if exc.message.find('users_index') > -1:
                log.warning(
                    "Notification workers found on start up that users_index have not been loaded yet."
                )
            else:
                raise NotFound(exc.message)

        #------------------------------------------------------------------------------------
        # Create an event subscriber for Reload User Info events
        #------------------------------------------------------------------------------------

        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id = event_msg.notification_id
            log.debug(
                "(Notification worker received a ReloadNotificationEvent. The relevant notification_id is %s"
                % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info = calculate_reverse_user_info(
                self.user_info)
            self.test_hook(self.user_info, self.reverse_user_info)

            log.debug("After a reload, the user_info: %s" % self.user_info)
            log.debug("The recalculated reverse_user_info: %s" %
                      self.reverse_user_info)

        # the subscriber for the ReloadUSerInfoEvent
        self.reload_user_info_subscriber = EventSubscriber(
            event_type=OT.ReloadUserInfoEvent,
            origin='UserNotificationService',
            callback=reload_user_info)

        self.add_endpoint(self.reload_user_info_subscriber)