def load_data_process(self, stream_id=""):

        dpms_client = DataProcessManagementServiceClient()

        dataprocess_details = dpms_client.read_data_process_for_stream(stream_id)
        dataprocess_details = DotDict(dataprocess_details or {})
        dataprocess_id = dataprocess_details.dataprocess_id

        #set metrics attributes
        dataprocess_details.granule_counter = 0

        self._dataprocesses[dataprocess_id] = dataprocess_details

        #add the stream id to the map
        if 'in_stream_id' in dataprocess_details:
            if dataprocess_details['in_stream_id'] in self._streamid_map:
                (self._streamid_map[ dataprocess_details['in_stream_id'] ]).append(dataprocess_id)
            else:
                self._streamid_map[ dataprocess_details['in_stream_id'] ]  = [dataprocess_id]
        #todo: add transform worker id
        self.event_publisher.publish_event(origin=dataprocess_id, origin_type='DataProcess', status=DataProcessStatusType.NORMAL,
                                           description='data process loaded into transform worker')

        #create a publisher for output stream
        self.create_publisher(dataprocess_id, dataprocess_details)

        return [dataprocess_id]
Example #2
0
    def load_data_process(self, stream_id=""):

        dpms_client = DataProcessManagementServiceClient()

        dataprocess_details_list = dpms_client.read_data_process_for_stream(
            stream_id)

        dataprocess_ids = []
        #this returns a list of data process info dicts
        for dataprocess_details in dataprocess_details_list:

            dataprocess_details = DotDict(dataprocess_details or {})
            dataprocess_id = dataprocess_details.dataprocess_id

            #set metrics attributes
            dataprocess_details.granule_counter = 0

            self._dataprocesses[dataprocess_id] = dataprocess_details
            log.debug('load_data_process  dataprocess_id: %s', dataprocess_id)
            log.debug('load_data_process  dataprocess_details: %s',
                      dataprocess_details)

            # validate details
            # if not outstream info avaialable log a warning but TF may publish an event so proceed
            if not dataprocess_details.out_stream_def or not dataprocess_details.output_param:
                log.warning(
                    'No output stream details provided for data process %s, will not publish a granule',
                    dataprocess_id)

            #add the stream id to the map
            if 'in_stream_id' in dataprocess_details:
                if dataprocess_details['in_stream_id'] in self._streamid_map:
                    (self._streamid_map[dataprocess_details['in_stream_id']]
                     ).append(dataprocess_id)
                else:
                    self._streamid_map[dataprocess_details['in_stream_id']] = [
                        dataprocess_id
                    ]
            #todo: add transform worker id
            self.event_publisher.publish_event(
                origin=dataprocess_id,
                origin_type='DataProcess',
                status=DataProcessStatusType.NORMAL,
                description='data process loaded into transform worker')

            #create a publisher for output stream
            self.create_publisher(dataprocess_id, dataprocess_details)
            dataprocess_ids.append(dataprocess_id)

        return dataprocess_ids
    def load_data_process(self, stream_id=""):

        dpms_client = DataProcessManagementServiceClient()

        dataprocess_details_list = dpms_client.read_data_process_for_stream(stream_id)

        dataprocess_ids = []
        #this returns a list of data process info dicts
        for dataprocess_details in dataprocess_details_list:

            dataprocess_details = DotDict(dataprocess_details or {})
            dataprocess_id = dataprocess_details.dataprocess_id

            #set metrics attributes
            dataprocess_details.granule_counter = 0

            self._dataprocesses[dataprocess_id] = dataprocess_details
            log.debug('load_data_process  dataprocess_id: %s', dataprocess_id)
            log.debug('load_data_process  dataprocess_details: %s', dataprocess_details)

            # validate details
            # if not outstream info avaialable log a warning but TF may publish an event so proceed
            if not dataprocess_details.out_stream_def or not dataprocess_details.output_param:
                log.warning('No output stream details provided for data process %s, will not publish a granule', dataprocess_id)

            #add the stream id to the map
            if 'in_stream_id' in dataprocess_details:
                if dataprocess_details['in_stream_id'] in self._streamid_map:
                    (self._streamid_map[ dataprocess_details['in_stream_id'] ]).append(dataprocess_id)
                else:
                    self._streamid_map[ dataprocess_details['in_stream_id'] ]  = [dataprocess_id]
            #todo: add transform worker id
            self.event_publisher.publish_event(origin=dataprocess_id, origin_type='DataProcess', status=DataProcessStatusType.NORMAL,
                                               description='data process loaded into transform worker')

            #create a publisher for output stream
            self.create_publisher(dataprocess_id, dataprocess_details)
            dataprocess_ids.append(dataprocess_id)

        return dataprocess_ids