def on_start(self): self.pdict = None stream_id = self.CFG.get_safe('process.stream_id') pubsub_cli = PubsubManagementServiceProcessClient(process=self) self.stream_def = pubsub_cli.read_stream_definition( stream_id=stream_id) super(BetterDataProducer, self).on_start()
def on_start(self): ''' Starts the process ''' log.info('Replay Process Started') super(ReplayProcess,self).on_start() dsm_cli = DatasetManagementServiceProcessClient(process=self) pubsub = PubsubManagementServiceProcessClient(process=self) self.dataset_id = self.CFG.get_safe('process.dataset_id', None) self.delivery_format = self.CFG.get_safe('process.delivery_format',{}) self.start_time = self.CFG.get_safe('process.query.start_time', None) self.end_time = self.CFG.get_safe('process.query.end_time', None) self.stride_time = self.CFG.get_safe('process.query.stride_time', None) self.parameters = self.CFG.get_safe('process.query.parameters',None) self.publish_limit = self.CFG.get_safe('process.query.publish_limit', 10) self.tdoa = self.CFG.get_safe('process.query.tdoa',None) self.stream_id = self.CFG.get_safe('process.publish_streams.output', '') self.stream_def = pubsub.read_stream_definition(stream_id=self.stream_id) self.stream_def_id = self.stream_def._id self.replay_thread = None self.publishing.clear() self.play.set() self.end.clear() if self.dataset_id is None: raise BadRequest('dataset_id not specified') self.dataset = dsm_cli.read_dataset(self.dataset_id) self.pubsub = PubsubManagementServiceProcessClient(process=self)
def on_start(self): pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node) # Get the stream(s) stream_id = self.CFG.get_safe('process.stream_id','') query = StreamQuery(stream_ids=[stream_id,]) exchange_name = 'dispatcher_%s' % self.id subscription_id = pubsub_cli.create_subscription( query = query, exchange_name = exchange_name, name = "SampleSubscription", description = "Sample Subscription Description") stream_subscriber = StreamSubscriberRegistrar(process=self, node=self.container.node) def message_received(granule, h): rdt = RecordDictionaryTool.load_from_granule(granule) log.warn('Logging Record Dictionary received in logger subscription \n%s', rdt.pretty_print()) subscriber = stream_subscriber.create_subscriber(exchange_name=exchange_name, callback=message_received) subscriber.start() pubsub_cli.activate_subscription(subscription_id)
def setUp(self): # Start container logging.disable(logging.ERROR) self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') # simulate preloading preload_ion_params(self.container) logging.disable(logging.NOTSET) #Instantiate a process to represent the test process=VisualizationServiceTestProcess() # Now create client to DataProductManagementService self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process) self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process) self.pubsubclient = PubsubManagementServiceProcessClient(node=self.container.node, process=process) self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process) self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process) self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process) self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process) self.datasetclient = DatasetManagementServiceProcessClient(node=self.container.node, process=process) self.workflowclient = WorkflowManagementServiceProcessClient(node=self.container.node, process=process) self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process) self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process) self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process) self.ctd_stream_def = SBE37_CDM_stream_definition()
def on_start(self): ''' Starts the process ''' log.info('Replay Process Started') super(ReplayProcess,self).on_start() dsm_cli = DatasetManagementServiceProcessClient(process=self) pubsub = PubsubManagementServiceProcessClient(process=self) self.dataset_id = self.CFG.get_safe('process.dataset_id', None) self.delivery_format = self.CFG.get_safe('process.delivery_format',{}) self.start_time = self.CFG.get_safe('process.query.start_time', None) self.end_time = self.CFG.get_safe('process.query.end_time', None) self.stride_time = self.CFG.get_safe('process.query.stride_time', None) self.parameters = self.CFG.get_safe('process.query.parameters',None) self.publish_limit = self.CFG.get_safe('process.query.publish_limit', 10) self.tdoa = self.CFG.get_safe('process.query.tdoa',None) self.stream_id = self.CFG.get_safe('process.publish_streams.output', '') self.stream_def = pubsub.read_stream_definition(stream_id=self.stream_id) self.stream_def_id = self.stream_def._id self.publishing.clear() self.play.set() self.end.clear() if self.dataset_id is None: raise BadRequest('dataset_id not specified') self.dataset = dsm_cli.read_dataset(self.dataset_id) self.pubsub = PubsubManagementServiceProcessClient(process=self)
def on_start(self): #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.ssclient = SchedulerServiceProcessClient(process=self) self.rrclient = ResourceRegistryServiceProcessClient(process=self) self.data_retriever_client = DataRetrieverServiceProcessClient(process=self) self.dsm_client = DatasetManagementServiceProcessClient(process=self) self.pubsub_client = PubsubManagementServiceProcessClient(process = self) self.stream_info = self.CFG.get_safe('process.publish_streams',{}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('MPL Transform has no output streams.') graph_time_periods= self.CFG.get_safe('graph_time_periods') # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours self.event_timer_interval = self.CFG.get_safe('graph_gen_interval') if self.event_timer_interval: event_origin = "Interval_Timer_Matplotlib" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval), event_origin=event_origin, event_subtype="") super(VizTransformMatplotlibGraphs,self).on_start()
def on_start(self): super(SalinityTransform, self).on_start() self.sal_stream = self.CFG.process.publish_streams.values()[0] # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.sal_stream)
def on_start(self): rr_cli = ResourceRegistryServiceProcessClient(process=self, node=self.container.node) pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node) # Get the stream(s) data_product_id = self.CFG.get_safe('dispatcher.data_product_id','') stream_ids,_ = rr_cli.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True) log.info('Got Stream Ids: "%s"', stream_ids) assert stream_ids, 'No streams found for this data product!' exchange_name = 'dispatcher_%s' % str(os.getpid()) subscription_id = pubsub_cli.create_subscription( name='SampleSubscription', exchange_name=exchange_name, stream_ids=stream_ids, description='Sample Subscription Description' ) stream_defs = {} def message_received(message, stream_route, stream_id): granule = message stream_id = granule.stream_resource_id data_stream_id = granule.data_stream_id data_stream = granule.identifiables[data_stream_id] tstamp = get_datetime(data_stream.timestamp.value) records = granule.identifiables['record_count'].value log.info('Received a message from stream %s with time stamp %s and %d records' % (stream_id, tstamp, records)) if stream_id not in stream_defs: stream_defs[stream_id] = pubsub_cli.find_stream_definition(stream_id, id_only=False).container stream_def = stream_defs.get(stream_id) sp = PointSupplementStreamParser(stream_definition=stream_def, stream_granule=granule) last_data = {} for field in sp.list_field_names(): last_data[field] = sp.get_values(field)[-1] log.info('Last values in the message: %s' % str(last_data)) subscriber = StreamSubscriber(process=self, exchange_name=exchange_name, callback=message_received) subscriber.start() pubsub_cli.activate_subscription(subscription_id)
def on_start(self): super(EventTriggeredTransform_A, self).on_start() self.awake = False self.cond_stream = self.CFG.process.publish_streams.values()[0] # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.cond_stream)
def on_start(self): super(StreamGranuleLogger,self).on_start() pubsub_cli = PubsubManagementServiceProcessClient(process=self, node=self.container.node) # Get the stream(s) stream_id = self.CFG.get_safe('process.stream_id','') if stream_id: self.subscription_id = pubsub_cli.create_subscription('%s_sub' % self.id, stream_ids=[stream_id], exchange_name=self.queue_name) pubsub_cli.activate_subscription(self.subscription_id)
def on_start(self): super(TransformDataProcess,self).on_start() if dot.isEnabledFor(logging.INFO): pubsub_cli = PubsubManagementServiceProcessClient(process=self) self.streams = self.CFG.get_safe('process.publish_streams',{}) for k,v in self.streams.iteritems(): stream_route = pubsub_cli.read_stream_route(v) queue_name = re.sub(r'[ -]', '_', self.queue_name) dot.info(' %s -> %s' %( queue_name, stream_route.routing_key.strip('.stream')))
def on_start(self): self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names or not self.stream_ids: raise BadRequest('HighCharts Transform: No output streams.') self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.stream_def = self.pubsub_management.read_stream_definition(stream_id=self.stream_ids[0]) super(VizTransformHighCharts,self).on_start()
def on_start(self): super(CTDL1ConductivityTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('conductivity'): raise BadRequest("For CTD transforms, please send the stream_id using " "a special keyword (ex: conductivity)") self.cond_stream = self.CFG.process.publish_streams.conductivity # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.cond_stream)
def on_start(self): super(EventTriggeredTransform_A, self).on_start() self.awake = False self.cond_stream = self.CFG.process.publish_streams.values()[0] # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition( stream_id=self.cond_stream)
def on_start(self): self.pubsub_management = PubsubManagementServiceProcessClient( process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('Google DT Transform has no output streams.') super(VizTransformGoogleDT, self).on_start()
def on_start(self): super(DensityTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('density'): raise BadRequest("For CTD transforms, please send the stream_id " "using a special keyword (ex: density)") self.dens_stream = self.CFG.process.publish_streams.density # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.dens_stream)
def on_start(self): super(PresfL1Transform, self).on_start() if not self.CFG.process.publish_streams.has_key('seafloor_pressure'): raise BadRequest("For the PresfL1Transform, please send the stream_id using " "a special keyword (ex: seafloor_pressure)") self.pres_stream = self.CFG.process.publish_streams.seafloor_pressure # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.pres_stream)
def on_start(self): super(CTDL1PressureTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('pressure'): raise BadRequest("For CTD transforms, please send the stream_id using " "a special keyword (ex: pressure)") self.pres_stream = self.CFG.process.publish_streams.pressure # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.pres_stream)
def on_start(self): super(CTDL1TemperatureTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('temperature'): raise BadRequest("For CTD transforms, please send the stream_id using a " "special keyword (ex: temperature)") self.temp_stream = self.CFG.process.publish_streams.temperature # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.temp_stream)
def on_start(self): super(TransformDataProcess, self).on_start() if dot.isEnabledFor(logging.INFO): pubsub_cli = PubsubManagementServiceProcessClient(process=self) self.streams = self.CFG.get_safe('process.publish_streams', {}) for k, v in self.streams.iteritems(): stream_route = pubsub_cli.read_stream_route(v) queue_name = re.sub(r'[ -]', '_', self.queue_name) dot.info( ' %s -> %s' % (queue_name, stream_route.routing_key[:-len('.stream')]))
def on_start(self): pubsub = PubsubManagementServiceProcessClient(process=self) if self.CFG.process.publish_streams.has_key("salinity"): self.sal_stream = self.CFG.process.publish_streams.salinity elif self.CFG.process.publish_streams.has_key("output"): self.sal_stream = self.CFG.process.publish_streams.output self.salinity = self.output self.CFG.process.stream_id = self.sal_stream self.stream_id = self.sal_stream self.stream_def = pubsub.read_stream_definition(stream_id=self.sal_stream) super(SalinityDoubler, self).on_start()
def on_start(self): super(CTDBP_L1_Transform, self).on_start() self.L1_stream_id = self.CFG.process.publish_streams.values()[0] # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) stream_def = pubsub.read_stream_definition(stream_id=self.L1_stream_id) self.stream_definition_id = stream_def._id self.temp_calibration_coeffs = self.CFG.process.calibration_coeffs['temp_calibration_coeffs'] self.pres_calibration_coeffs = self.CFG.process.calibration_coeffs['pres_calibration_coeffs'] self.cond_calibration_coeffs = self.CFG.process.calibration_coeffs['cond_calibration_coeffs']
def on_start(self): super(CTDBP_L0_all, self).on_start() self.L0_stream_id = self.CFG.process.publish_streams.values()[0] log.debug("the output stream: %s", self.L0_stream_id) pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_def_L0 = pubsub.read_stream_definition(stream_id=self.L0_stream_id) self.params = {'L0_stream' : self.stream_def_L0._id } log.debug("the params: %s", self.params)
def on_start(self): super(EventTriggeredTransform_B, self).on_start() self.awake = False if not self.CFG.process.publish_streams.has_key('output'): raise BadRequest("For event triggered transform, please send the stream_id " "using the special keyword, output") self.output = self.CFG.process.publish_streams.output # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.output)
def on_start(self): super(StreamGranuleLogger, self).on_start() pubsub_cli = PubsubManagementServiceProcessClient( process=self, node=self.container.node) # Get the stream(s) stream_id = self.CFG.get_safe('process.stream_id', '') if stream_id: self.subscription_id = pubsub_cli.create_subscription( '%s_sub' % self.id, stream_ids=[stream_id], exchange_name=self.queue_name) pubsub_cli.activate_subscription(self.subscription_id)
def on_start(self): super(CTDBP_L0_all, self).on_start() self.L0_stream_id = self.CFG.process.publish_streams.values()[0] log.debug("the output stream: %s", self.L0_stream_id) pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_def_L0 = pubsub.read_stream_definition( stream_id=self.L0_stream_id) self.params = {'L0_stream': self.stream_def_L0._id} log.debug("the params: %s", self.params)
def on_start(self): super(PresfL0Splitter, self).on_start() if not self.CFG.process.publish_streams.has_key('absolute_pressure'): raise BadRequest( "For the PresfL0Splitter, please send the stream_id using " "a special keyword (ex: absolute_pressure)") self.pres_stream = self.CFG.process.publish_streams.absolute_pressure # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition( stream_id=self.pres_stream)
def on_start(self): pubsub = PubsubManagementServiceProcessClient(process=self) if self.CFG.process.publish_streams.has_key('salinity'): self.sal_stream = self.CFG.process.publish_streams.salinity elif self.CFG.process.publish_streams.has_key('output'): self.sal_stream = self.CFG.process.publish_streams.output self.salinity = self.output self.CFG.process.stream_id = self.sal_stream self.stream_id = self.sal_stream self.stream_def = pubsub.read_stream_definition( stream_id=self.sal_stream) super(SalinityDoubler, self).on_start()
def on_start(self): super(SimpleCtdPublisher,self).on_start() pubsub_cli = PubsubManagementServiceProcessClient(process=self) self.stream_id = self.CFG.get_safe('process.stream_id',{}) self.interval = self.CFG.get_safe('process.interval', 1.0) #self.last_time = self.CFG.get_safe('process.last_time', 0) self.stream_def = pubsub_cli.read_stream_definition(stream_id=self.stream_id) self.pdict = self.stream_def.parameter_dictionary self.finished = gevent.event.Event() self.greenlet = gevent.spawn(self.publish_loop) self._stats['publish_count'] = 0 log.info('SimpleCTDPublisher started, publishing to %s', self.publisher.stream_route.__dict__)
def _get_param_dicts_from_streams(self): pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_def_cond = pubsub.read_stream_definition( stream_id=self.cond_stream) self.stream_def_pres = pubsub.read_stream_definition( stream_id=self.pres_stream) self.stream_def_temp = pubsub.read_stream_definition( stream_id=self.temp_stream) self.params = {} self.params['conductivity'] = self.stream_def_cond._id self.params['pressure'] = self.stream_def_pres._id self.params['temperature'] = self.stream_def_temp._id
def on_start(self): super(CTDBP_L0_all, self).on_start() if not self.CFG.process.publish_streams.has_key('L0_stream'): raise BadRequest("For CTD transforms, please send the stream_id for the L0_stream using " "a special keyword (L0_stream)") self.L0_stream_id = self.CFG.process.publish_streams.L0_stream log.debug("the output stream: %s", self.L0_stream_id) pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_def_L0 = pubsub.read_stream_definition(stream_id=self.L0_stream_id) self.params = {'L0_stream' : self.stream_def_L0._id } log.debug("the params: %s", self.params)
def on_start(self): TransformDataProcess.on_start(self) self.pubsub_management = PubsubManagementServiceProcessClient( process=self) self.stored_values = StoredValueManager(self.container) self.input_data_product_ids = self.CFG.get_safe( 'process.input_products', []) self.output_data_product_ids = self.CFG.get_safe( 'process.output_products', []) self.lookup_docs = self.CFG.get_safe('process.lookup_docs', []) self.new_lookups = Queue() self.lookup_monitor = EventSubscriber( event_type=OT.ExternalReferencesUpdatedEvent, callback=self._add_lookups, auto_delete=True) self.lookup_monitor.start()
def on_start(self): super(SimpleCtdPublisher, self).on_start() pubsub_cli = PubsubManagementServiceProcessClient(process=self) self.stream_id = self.CFG.get_safe('process.stream_id', {}) self.interval = self.CFG.get_safe('process.interval', 1.0) #self.last_time = self.CFG.get_safe('process.last_time', 0) self.stream_def = pubsub_cli.read_stream_definition( stream_id=self.stream_id) self.pdict = self.stream_def.parameter_dictionary self.finished = gevent.event.Event() self.greenlet = gevent.spawn(self.publish_loop) self._stats['publish_count'] = 0 log.info('SimpleCTDPublisher started, publishing to %s', self.publisher.stream_route.__dict__)
def on_start(self): super(EventTriggeredTransform_B, self).on_start() self.awake = False if not self.CFG.process.publish_streams.has_key('output'): raise BadRequest( "For event triggered transform, please send the stream_id " "using the special keyword, output") self.output = self.CFG.process.publish_streams.output # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition( stream_id=self.output)
def setUp(self): # Start container logging.disable(logging.ERROR) self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') # simulate preloading #preload_ion_params(self.container) logging.disable(logging.NOTSET) #Instantiate a process to represent the test process=VisualizationServiceTestProcess() # Now create client to DataProductManagementService self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process) self.damsclient = DataAcquisitionManagementServiceProcessClient(node=self.container.node, process=process) self.pubsubclient = PubsubManagementServiceProcessClient(node=self.container.node, process=process) self.ingestclient = IngestionManagementServiceProcessClient(node=self.container.node, process=process) self.imsclient = InstrumentManagementServiceProcessClient(node=self.container.node, process=process) self.dataproductclient = DataProductManagementServiceProcessClient(node=self.container.node, process=process) self.dataprocessclient = DataProcessManagementServiceProcessClient(node=self.container.node, process=process) self.datasetclient = DatasetManagementServiceProcessClient(node=self.container.node, process=process) self.process_dispatcher = ProcessDispatcherServiceProcessClient(node=self.container.node, process=process) self.data_retriever = DataRetrieverServiceProcessClient(node=self.container.node, process=process) self.vis_client = VisualizationServiceProcessClient(node=self.container.node, process=process) self.ctd_stream_def = SBE37_CDM_stream_definition()
def on_start(self): super(CTDBP_L1_Transform, self).on_start() self.L1_stream_id = self.CFG.process.publish_streams.values()[0] # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) stream_def = pubsub.read_stream_definition(stream_id=self.L1_stream_id) self.stream_definition_id = stream_def._id self.temp_calibration_coeffs = self.CFG.process.calibration_coeffs[ 'temp_calibration_coeffs'] self.pres_calibration_coeffs = self.CFG.process.calibration_coeffs[ 'pres_calibration_coeffs'] self.cond_calibration_coeffs = self.CFG.process.calibration_coeffs[ 'cond_calibration_coeffs']
class StreamPublisherRegistrar(object): """ A Data Management level object for creating a publisher for a stream This object manages registration of publishers for different streams and creates the abstracted endpoint with the publish method """ def __init__(self, process=None, node=None): """ Use the process's exchange name to publish messages to a stream """ self.process = process self.exchange_name = process.id self.node = node self.pubsub_client = PubsubManagementServiceProcessClient(process=process, node=node) xs_dot_xp = CFG.core_xps.science_data try: self.XS, self.XP = xs_dot_xp.split('.') except ValueError: raise PublisherError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp) def create_publisher(self, stream_id): """ Call pubsub service to register this exchange name (endpoint) to publish on a particular stream Return a stream publisher object to publish (send) messages on a particular stream """ log.debug('Creating publisher...') # Call the pubsub service to register the exchange name as a publisher for this stream stream_route = self.pubsub_client.register_producer(self.exchange_name, stream_id) # Create the Stream publisher, ready to publish messages to the stream return StreamPublisher(name=(self.XP, stream_route.routing_key), process=self.process, node=self.node)
def on_start(self): super(CTDBP_L0_all, self).on_start() if not self.CFG.process.publish_streams.has_key('L0_stream'): raise BadRequest("For CTD transforms, please send the stream_id for the L0_stream using " "a special keyword (L0_stream)") self.L0_stream_id = self.CFG.process.publish_streams.L0_stream log.debug("the output stream: %s", self.L0_stream_id) pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_def_L0 = pubsub.read_stream_definition(stream_id=self.L0_stream_id) self.params = {'L0_stream' : self.stream_def_L0._id } log.debug("the params: %s", self.params)
def on_start(self): super(CTDBP_DensityTransform, self).on_start() self.dens_stream_id = self.CFG.process.publish_streams.values()[0] lat = self.CFG.get_safe('process.lat',None) if lat is None: raise BadRequest('Latitude is required to determine density') lon = self.CFG.get_safe('process.lon',None) if lon is None: raise BadRequest('Longitude is required to determine density') # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.dens_stream_id) self.params = {'stream_def' : self.stream_definition._id, 'lat': lat, 'lon' : lon}
def on_start(self): super(CTDBP_L1_Transform, self).on_start() # Validate the CFG used to launch the transform has all the required fields if not self.CFG.process.publish_streams.has_key('L1_stream'): raise BadRequest("For CTDBP transforms, please send the stream_id for the L1_stream using " "a special keyword (L1_stream)") self.L1_stream_id = self.CFG.process.publish_streams.L1_stream # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) stream_def = pubsub.read_stream_definition(stream_id=self.L1_stream_id) self.stream_definition_id = stream_def._id self.temp_calibration_coeffs = self.CFG.process.calibration_coeffs['temp_calibration_coeffs'] self.pres_calibration_coeffs = self.CFG.process.calibration_coeffs['pres_calibration_coeffs'] self.cond_calibration_coeffs = self.CFG.process.calibration_coeffs['cond_calibration_coeffs']
def __init__(self, process=None, node=None): """ Use the process's exchange name to publish messages to a stream """ self.process = process self.exchange_name = process.id self.node = node self.pubsub_client = PubsubManagementServiceProcessClient( process=process, node=node) xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise PublisherError( 'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def __init__(self, process=None, container=None): """ Use the process's exchange name to publish messages to a stream """ self.process = process self.exchange_name = process.id self.container = container self.pubsub_client = PubsubManagementServiceProcessClient( process=process, node=container.node) xs_dot_xp = CFG.core_xps.science_data try: _, self.xp_base = xs_dot_xp.split('.') self._XS = self.container.ex_manager.default_xs except ValueError: raise PublisherError( 'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def on_start(self): super(CTDBP_L1_Transform, self).on_start() # Validate the CFG used to launch the transform has all the required fields if not self.CFG.process.publish_streams.has_key('L1_stream'): raise BadRequest("For CTDBP transforms, please send the stream_id for the L1_stream using " "a special keyword (L1_stream)") self.L1_stream_id = self.CFG.process.publish_streams.L1_stream calibration_coeffs= self.CFG.process.calibration_coeffs # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.L1_stream_id) self.params = {} self.params['stream_def_id'] = self.stream_definition._id self.params['calibration_coeffs'] = calibration_coeffs
def on_start(self): self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('Google DT Transform has no output streams.') super(VizTransformGoogleDT,self).on_start()
def on_start(self): super(CTDBP_DensityTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('density'): raise BadRequest("For CTD transforms, please send the stream_id using a special keyword (ex: density)") self.dens_stream_id = self.CFG.process.publish_streams.density lat = self.CFG.get_safe('process.lat',None) if lat is None: raise BadRequest('Latitude is required to determine density') lon = self.CFG.get_safe('process.lon',None) if lon is None: raise BadRequest('Lonitude is required to determine density') # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.dens_stream_id) self.params = {'stream_def' : self.stream_definition._id, 'lat': lat, 'lon' : lon}
def on_start(self): TransformDataProcess.on_start(self) self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.stored_values = StoredValueManager(self.container) self.input_data_product_ids = self.CFG.get_safe('process.input_products', []) self.output_data_product_ids = self.CFG.get_safe('process.output_products', []) self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[]) self.new_lookups = Queue() self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent,callback=self._add_lookups, auto_delete=True) self.lookup_monitor.start()
def __init__(self, process=None, stream_id='', stream_route=None, exchange_point='', routing_key=''): ''' Creates a StreamPublisher which publishes to the specified stream by default and is attached to the specified process. @param process The process which the subscriber is to be attached. @param stream_id Stream identifier for the publishing stream. @param stream_route A StreamRoute corresponding to the stream_id @param exchange_point The name of the exchange point, to be used in lieu of stream_route or stream_id @param routing_key The routing key to be used in lieu of stream_route or stream_id ''' super(StreamPublisher, self).__init__() validate_is_instance(process, BaseService, 'No valid process provided.') #-------------------------------------------------------------------------------- # The important part of publishing is the stream_route and there are three ways # to the stream route # - The Route is obtained from Pubsub Management with a stream id. # - The Route is obtained by combining exchange_point and the routing_key # but all other information is lost (credentials, etc.) # - The Route is obtained by being provided directly to __init__ #-------------------------------------------------------------------------------- self.stream_id = stream_id if stream_id: # Regardless of what's passed in for stream_route look it up, prevents mismatching pubsub_cli = PubsubManagementServiceProcessClient(process=process, node=process.container.node) self.stream_route = pubsub_cli.read_stream_route(stream_id) elif not stream_route: self.stream_route = None if exchange_point and routing_key: self.stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key) else: pubsub_cli = PubsubManagementServiceProcessClient(process=process, node=process.container.node) stream_id, stream_route = pubsub_cli.create_stream(process.id, exchange_point=exchange_point or 'void') self.stream_id = stream_id self.stream_route = stream_route else: self.stream_route = stream_route validate_is_instance(self.stream_route, StreamRoute, 'No valid stream route provided to publisher.') self.container = process.container self.xp = self.container.ex_manager.create_xp(self.stream_route.exchange_point) self.xp_route = self.xp.create_route(self.stream_route.routing_key)
def on_start(self): super(LastUpdateCache, self).on_start() self.couch_config = self.CFG.get('couch_storage') #self.datastore_name = self.couch_config.get('datastore_name','dm_cache') self.datastore_name = CACHE_DATASTORE_NAME try: self.datastore_profile = getattr( DataStore.DS_PROFILE, self.couch_config.get('datastore_profile', 'SCIDATA')) except AttributeError: self.datastore_profile = DataStore.DS_PROFILE.SCIDATA self.db = self.container.datastore_manager.get_datastore( ds_name=self.datastore_name, profile=self.datastore_profile) self.ps_cli = PubsubManagementServiceProcessClient(process=self)
class VizTransformGoogleDT(TransformDataProcess): """ This class is used for converting incoming data from CDM format to JSON style Google DataTables Note: One behaviour that this class is expected to achieve specifically is to determine if its supposed to work as a realtime transform (exists indefinitely and maintains a sliding window of data) or as a replay transform (one-shot). [2] This transform behaves as an instantaneous forwarder. There is no waiting for the entire stream to create the complete datatable. As the granules come in, they are translated to the datatable 'components'. Components, because we are not creating the actual datatable in this code. That's the job of the viz service to put all the components of a datatable together in JSON format before sending it to the client [3] The time stamp in the incoming stream can't be converted to the datetime object here because the Raw stream definition only expects regular primitives (strings, floats, ints etc) Usage: https://gist.github.com/3834918 """ output_bindings = ['google_dt_components'] def __init__(self): super(VizTransformGoogleDT, self).__init__() def on_start(self): self.pubsub_management = PubsubManagementServiceProcessClient(process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('Google DT Transform has no output streams.') super(VizTransformGoogleDT,self).on_start() def recv_packet(self, packet, in_stream_route, in_stream_id): log.info('Received packet') outgoing = VizTransformGoogleDTAlgorithm.execute(packet, params=self.get_stream_definition()) for stream_name in self.stream_names: publisher = getattr(self, stream_name) publisher.publish(outgoing) def get_stream_definition(self): stream_id = self.stream_ids[0] self.stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id) return self.stream_def._id
def on_start(self): super(CTDBP_DensityTransform, self).on_start() if not self.CFG.process.publish_streams.has_key('density'): raise BadRequest("For CTD transforms, please send the stream_id " "using a special keyword (ex: density)") self.dens_stream_id = self.CFG.process.publish_streams.density lat = self.CFG.get_safe('process.lat',None) if lat is None: raise BadRequest('Latitude is required to determine density') lon = self.CFG.get_safe('process.lon',None) if lon is None: raise BadRequest('Lonitude is required to determine density') # Read the parameter dict from the stream def of the stream pubsub = PubsubManagementServiceProcessClient(process=self) self.stream_definition = pubsub.read_stream_definition(stream_id=self.dens_stream_id) self.params = {'stream_def' : self.stream_definition._id, 'lat': lat, 'lon' : lon}
class VizTransformGoogleDT(TransformDataProcess): """ This class is used for converting incoming data from CDM format to JSON style Google DataTables Note: One behaviour that this class is expected to achieve specifically is to determine if its supposed to work as a realtime transform (exists indefinitely and maintains a sliding window of data) or as a replay transform (one-shot). [2] This transform behaves as an instantaneous forwarder. There is no waiting for the entire stream to create the complete datatable. As the granules come in, they are translated to the datatable 'components'. Components, because we are not creating the actual datatable in this code. That's the job of the viz service to put all the components of a datatable together in JSON format before sending it to the client [3] The time stamp in the incoming stream can't be converted to the datetime object here because the Raw stream definition only expects regular primitives (strings, floats, ints etc) Usage: https://gist.github.com/3834918 """ output_bindings = ['google_dt_components'] def __init__(self): super(VizTransformGoogleDT, self).__init__() def on_start(self): self.pubsub_management = PubsubManagementServiceProcessClient( process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('Google DT Transform has no output streams.') super(VizTransformGoogleDT, self).on_start() def recv_packet(self, packet, in_stream_route, in_stream_id): log.info('Received packet') outgoing = VizTransformGoogleDTAlgorithm.execute( packet, params=self.get_stream_definition()) for stream_name in self.stream_names: publisher = getattr(self, stream_name) publisher.publish(outgoing) def get_stream_definition(self): stream_id = self.stream_ids[0] self.stream_def = self.pubsub_management.read_stream_definition( stream_id=stream_id) return self.stream_def._id
def __init__(self, process=None, node=None): """ Use the process's exchange name to publish messages to a stream """ self.process = process self.exchange_name = process.id self.node = node self.pubsub_client = PubsubManagementServiceProcessClient(process=process, node=node) xs_dot_xp = CFG.core_xps.science_data try: self.XS, self.XP = xs_dot_xp.split('.') except ValueError: raise PublisherError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def on_start(self): #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG self.pubsub_management = PubsubManagementServiceProcessClient( process=self) self.ssclient = SchedulerServiceProcessClient(process=self) self.rrclient = ResourceRegistryServiceProcessClient(process=self) self.data_retriever_client = DataRetrieverServiceProcessClient( process=self) self.dsm_client = DatasetManagementServiceProcessClient(process=self) self.pubsub_client = PubsubManagementServiceProcessClient(process=self) self.stream_info = self.CFG.get_safe('process.publish_streams', {}) self.stream_names = self.stream_info.keys() self.stream_ids = self.stream_info.values() if not self.stream_names: raise BadRequest('MPL Transform has no output streams.') graph_time_periods = self.CFG.get_safe('graph_time_periods') # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours self.event_timer_interval = self.CFG.get_safe('graph_gen_interval') if self.event_timer_interval: event_origin = "Interval_Timer_Matplotlib" sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin) sub.start() self.interval_timer_id = self.ssclient.create_interval_timer( start_time="now", interval=self._str_to_secs(self.event_timer_interval), event_origin=event_origin, event_subtype="") super(VizTransformMatplotlibGraphs, self).on_start()
def on_start(self): pubsub_cli = PubsubManagementServiceProcessClient( process=self, node=self.container.node) # Get the stream(s) stream_id = self.CFG.get_safe('process.stream_id', '') query = StreamQuery(stream_ids=[ stream_id, ]) exchange_name = 'dispatcher_%s' % self.id subscription_id = pubsub_cli.create_subscription( query=query, exchange_name=exchange_name, name="SampleSubscription", description="Sample Subscription Description") stream_subscriber = StreamSubscriberRegistrar(process=self, node=self.container.node) def message_received(granule, h): rdt = RecordDictionaryTool.load_from_granule(granule) log.warn( 'Logging Record Dictionary received in logger subscription \n%s', rdt.pretty_print()) subscriber = stream_subscriber.create_subscriber( exchange_name=exchange_name, callback=message_received) subscriber.start() pubsub_cli.activate_subscription(subscription_id)
def __init__(self, process=None, container=None): """ Use the process's exchange name to publish messages to a stream """ self.process = process self.exchange_name = process.id self.container = container self.pubsub_client = PubsubManagementServiceProcessClient(process=process, node=container.node) xs_dot_xp = CFG.core_xps.science_data try: _, self.xp_base = xs_dot_xp.split('.') self._XS = self.container.ex_manager.default_xs except ValueError: raise PublisherError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def on_start(self): super(LastUpdateCache, self).on_start() self.couch_config = self.CFG.get('couch_storage') #self.datastore_name = self.couch_config.get('datastore_name','dm_cache') self.datastore_name = CACHE_DATASTORE_NAME try: self.datastore_profile = getattr(DataStore.DS_PROFILE,self.couch_config.get('datastore_profile','SCIDATA')) except AttributeError: self.datastore_profile = DataStore.DS_PROFILE.SCIDATA self.db = self.container.datastore_manager.get_datastore(ds_name=self.datastore_name, profile=self.datastore_profile) self.ps_cli = PubsubManagementServiceProcessClient(process=self)