def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
                end_time='-1', #Run FOREVER
                interval=3600*self.run_interval,
                event_origin=interval_key)
Exemple #2
0
    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''

    def on_initial_bootstrap(self, process, config, **kwargs):
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
                end_time=-1, #Run FOREVER
                interval=3600*24,
                event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta])
    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process = self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()
    def on_initial_bootstrap(self, process, config, **kwargs):
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
                end_time=-1, #Run FOREVER
                interval=3600*24,
                event_origin=interval_key)
    def on_start(self):
        super(DemoStreamAlertTransform,self).on_start()

        #-------------------------------------------------------------------------------------
        # Values that are passed in when the transform is launched
        #-------------------------------------------------------------------------------------
        self.instrument_variable_name = self.CFG.get_safe('process.variable_name', 'input_voltage')
        self.time_field_name = self.CFG.get_safe('process.time_field_name', 'preferred_timestamp')
        self.valid_values = self.CFG.get_safe('process.valid_values', [-200,200])
        self.timer_origin = self.CFG.get_safe('process.timer_origin', 'Interval Timer')
        self.timer_interval = self.CFG.get_safe('process.timer_interval', 6)

        # Check that valid_values is a list
        validate_is_instance(self.valid_values, list)

        # Start the timer
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=self)
        id = self._create_interval_timer_with_end_time(timer_interval=self.timer_interval, end_time=-1)
    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)
Exemple #8
0
class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''
    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(
            process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval',
                                         24)

        interval_key = uuid4().hex  # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(
            name='qc_post_processor',
            executable={
                'module': 'ion.processes.data.transforms.qc_post_processing',
                'class': 'QCPostProcessing'
            })
        process_definition_id = self.process_dispatcher.create_process_definition(
            process_definition)

        process_id = self.process_dispatcher.create_process(
            process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id,
                                                 process_id=process_id,
                                                 configuration=config)

        timer_id = self.scheduler_service.create_interval_timer(
            start_time=str(time.time()),
            end_time='-1',  #Run FOREVER
            interval=3600 * self.run_interval,
            event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(
            restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta if p['name']])
Exemple #9
0
    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(
            process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(
            process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process=self)

        self.stream_info = self.CFG.get_safe('process.publish_streams', {})
        self.stream_names = self.stream_info.keys()
        self.stream_ids = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods = self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent",
                                  callback=self.interval_timer_callback,
                                  origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(
                start_time="now",
                interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin,
                event_subtype="")

        super(VizTransformMatplotlibGraphs, self).on_start()
    def on_start(self):
        super(DemoStreamAlertTransform,self).on_start()

        #-------------------------------------------------------------------------------------
        # Values that are passed in when the transform is launched
        #-------------------------------------------------------------------------------------
        self.instrument_variable_name = self.CFG.get_safe('process.variable_name', 'input_voltage')
        self.time_field_name = self.CFG.get_safe('process.time_field_name', 'preferred_timestamp')
        self.valid_values = self.CFG.get_safe('process.valid_values', [-200,200])
        self.timer_origin = self.CFG.get_safe('process.timer_origin', 'Interval Timer')
        self.timer_interval = self.CFG.get_safe('process.timer_interval', 6)

        # Check that valid_values is a list
        validate_is_instance(self.valid_values, list)

        # Start the timer
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=self)
        id = self._create_interval_timer_with_end_time(timer_interval=self.timer_interval, end_time=-1)
class BootstrapQCPostProcessor(BootstrapPlugin):
    """
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    """

    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get("PYCC_MODE"):
            # This environment is an ion integration test
            log.info("PYCC_MODE: skipping qc_post_processor launch")
            return
        if self.process_exists(process, "qc_post_processor"):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe("service.qc_processing.run_interval", 24)

        interval_key = uuid4().hex  # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(
            name="qc_post_processor",
            executable={"module": "ion.processes.data.transforms.qc_post_processing", "class": "QCPostProcessing"},
        )
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)

        timer_id = self.scheduler_service.create_interval_timer(
            start_time=str(time.time()),
            end_time="-1",  # Run FOREVER
            interval=3600 * self.run_interval,
            event_origin=interval_key,
        )

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p["name"] for p in meta if p["name"]])
class BootstrapQCPostProcessor(BootstrapPlugin):
    '''
    Sets up one QC Post Processing worker and initiates
    the Scheduler Service's interval every 24 hours.
    '''

    def on_initial_bootstrap(self, process, config, **kwargs):
        # TODO: Temporary skip while refactoring QC work for M088
        return

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
                end_time='-1', #Run FOREVER
                interval=3600*self.run_interval,
                event_origin=interval_key)

    def process_exists(self, process, name):
        proc_ids, meta = process.container.resource_registry.find_resources(restype=RT.Process, id_only=True)
        return any([name in p['name'] for p in meta if p['name']])
class VizTransformMatplotlibGraphs(TransformDataProcess, TransformEventListener):

    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    output_bindings = ['graph_image_param_dict']


    def on_start(self):

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        event_timer_interval = self.CFG.get_safe('event_timer_interval')
        if event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=event_timer_interval,
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()

    def recv_packet(self, packet, in_stream_route, in_stream_id):
        log.info('Received packet')
        outgoing = VizTransformMatplotlibGraphsAlgorithm.execute(packet, params=self.get_stream_definition())
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(outgoing)

    def get_stream_definition(self):
        stream_id = self.stream_ids[0]
        stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
        return stream_def._id

    def process_event(self, msg, headers):

        return

    def interval_timer_callback(self, *args, **kwargs):
        #Find out the input data product to this process

        # retrieve data for every case of the output graph
        return

    def on_quit(self):

        #Cancel the timer
        if hasattr(self, 'interval_timer_id'):
            self.ssclient.cancel_timer(self.interval_timer_id)

        super(VizTransformMatplotlibGraphs,self).on_quit()
class DemoStreamAlertTransform(TransformStreamListener, TransformEventListener, TransformEventPublisher):

    def __init__(self):
        super(DemoStreamAlertTransform,self).__init__()

        # the queue of granules that arrive in between two timer events
        self.granules = gevent.queue.Queue()
        self.instrument_variable_name = None
        self.timer_origin = None
        self.timer_interval = None
        self.count = 0
        self.timer_cleanup = (None, None)

    def on_start(self):
        super(DemoStreamAlertTransform,self).on_start()

        #-------------------------------------------------------------------------------------
        # Values that are passed in when the transform is launched
        #-------------------------------------------------------------------------------------
        self.instrument_variable_name = self.CFG.get_safe('process.variable_name', 'input_voltage')
        self.time_field_name = self.CFG.get_safe('process.time_field_name', 'preferred_timestamp')
        self.valid_values = self.CFG.get_safe('process.valid_values', [-200,200])
        self.timer_origin = self.CFG.get_safe('process.timer_origin', 'Interval Timer')
        self.timer_interval = self.CFG.get_safe('process.timer_interval', 6)

        # Check that valid_values is a list
        validate_is_instance(self.valid_values, list)

        # Start the timer
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=self)
        id = self._create_interval_timer_with_end_time(timer_interval=self.timer_interval, end_time=-1)

    def on_quit(self):
        super(DemoStreamAlertTransform,self).on_quit()

        self.ssclient, sid = self.timer_cleanup
        DemoStreamAlertTransform._cleanup_timer(self.ssclient, sid)

    @staticmethod
    def _cleanup_timer(scheduler, schedule_id):
        """
        Do a friendly cancel of the scheduled event.
        If it fails, it's ok.
        """
        try:
            scheduler.cancel_timer(schedule_id)
        except:
            log.debug("Couldn't cancel")

    def now_utc(self):
        return time.mktime(datetime.datetime.utcnow().timetuple())

    def _create_interval_timer_with_end_time(self,timer_interval= None, end_time = None ):
        '''
        A convenience method to set up an interval timer with an end time
        '''
        self.timer_received_time = 0
        self.timer_interval = timer_interval

        start_time = self.now_utc()
        if not end_time:
            end_time = start_time + 2 * timer_interval + 1

        log.debug("got the end time here!! %s" % end_time)

        # Set up the interval timer. The scheduler will publish event with origin set as "Interval Timer"
        sid = self.ssclient.create_interval_timer(start_time="now" ,
            interval=self.timer_interval,
            end_time=end_time,
            event_origin="Interval Timer",
            event_subtype="")

        self.timer_cleanup =  (self.ssclient, sid)

        return sid

    def recv_packet(self, msg, stream_route, stream_id):
        '''
        The callback method. For situations like bad or no data, publish an alert event.

        @param msg granule
        @param stream_route StreamRoute object
        @param stream_id str
        '''

        log.debug("DemoStreamAlertTransform received a packet!")

        #-------------------------------------------------------------------------------------
        # Set up the config to use to pass info to the transform algorithm
        #-------------------------------------------------------------------------------------
        config = DotDict()
        config.valid_values = self.valid_values
        config.variable_name = self.instrument_variable_name
        config.time_field_name = self.instrument_variable_name

        #-------------------------------------------------------------------------------------
        # Store the granule received
        #-------------------------------------------------------------------------------------
        self.granules.put(msg)

        #-------------------------------------------------------------------------------------
        # Check for good and bad values in the granule
        #-------------------------------------------------------------------------------------
        bad_values, bad_value_times = AlertTransformAlgorithm.execute(msg, config = config)

        #-------------------------------------------------------------------------------------
        # If there are any bad values, publish an alert event for each of them, with information about their time stamp
        #-------------------------------------------------------------------------------------
        if bad_values:
            for bad_value, time_stamp in zip(bad_values, bad_value_times):
                # Create the event object
                event = DeviceStatusEvent(  origin = 'DemoStreamAlertTransform',
                    sub_type = self.instrument_variable_name,
                    value = bad_value,
                    time_stamp = time_stamp,
                    valid_values = self.valid_values,
                    state = DeviceStatusType.OUT_OF_RANGE,
                    description = "Event to deliver the status of instrument.")

                # Publish the event
                self.publisher._publish_event(  event_msg = event,
                    origin=event.origin,
                    event_type = event.type_)

    def process_event(self, msg, headers):
        """
        When timer events come, if no granule has arrived since the last timer event, publish an alarm
        """
        self.count += 1

        log.debug("Got a timer event::: count: %s" % self.count )

        if msg.origin == self.timer_origin:

            if self.granules.qsize() == 0:
                # Create the event object
                event = DeviceCommsEvent( origin = 'DemoStreamAlertTransform',
                    sub_type = self.instrument_variable_name,
                    state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
                    lapse_interval_seconds=self.timer_interval,
                    description = "Event to deliver the communications status of the instrument.")
                # Publish the event
                self.publisher._publish_event(  event_msg = event,
                    origin=event.origin,
                    event_type = event.type_)
            else:
                self.granules.queue.clear()
Exemple #15
0
class VizTransformMatplotlibGraphs(TransformStreamPublisher,
                                   TransformEventListener,
                                   TransformStreamListener):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    output_bindings = ['graph_image_param_dict']
    event_timer_interval = None

    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(
            process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(
            process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process=self)

        self.stream_info = self.CFG.get_safe('process.publish_streams', {})
        self.stream_names = self.stream_info.keys()
        self.stream_ids = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods = self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent",
                                  callback=self.interval_timer_callback,
                                  origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(
                start_time="now",
                interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin,
                event_subtype="")

        super(VizTransformMatplotlibGraphs, self).on_start()

    # when tranform is used as a data process
    def recv_packet(self, packet, in_stream_route, in_stream_id):
        #Check to see if the class instance was set up as a event triggered transform. If yes, skip the packet
        if self.event_timer_interval:
            return

        log.info('Received packet')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(
            packet, params=self.get_stream_definition())
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

    def get_stream_definition(self):
        stream_id = self.stream_ids[0]
        stream_def = self.pubsub_management.read_stream_definition(
            stream_id=stream_id)
        return stream_def._id

    def process_event(self, msg, headers):

        return

    def interval_timer_callback(self, *args, **kwargs):
        #Find out the input data product to this process
        in_dp_id = self.CFG.get_safe('in_dp_id')

        print " >>>>>>>>>>>>>> IN DP ID from cfg : ", in_dp_id

        # get the dataset_id associated with the data_product. Need it to do the data retrieval
        ds_ids, _ = self.rrclient.find_objects(in_dp_id, PRED.hasDataset,
                                               RT.Dataset, True)
        if ds_ids is None or not ds_ids:
            return None

        # retrieve data for the specified time interval. Setup up query from pass config first
        query = {}

        param_list_str = self.CFG.get_safe('parameters')
        if param_list_str:
            query['parameters'] = param_list_str.split(', ')
            # append time if not present in list of parameters
            if not 'time' in query['parameters']:
                query['parameters'].append('time')

        query['start_time'] = query['end_time'] = ntplib.system_to_ntp_time(
            time.time())  # Now
        query['stride_time'] = 1
        if self.CFG.get_safe('graph_time_period'):
            query['start_time'] = query['end_time'] - self._str_to_secs(
                self.CFG.get_safe('graph_time_period'))

        #print " >>>>>>>>>>>>>> QUERY = ", query

        #retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],{'start_time':start_time,'end_time':end_time})
        retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],
                                                                query=query)

        # add extra parameters to query passed in config that are not needed by data retrieval
        if self.CFG.get_safe('resolution'):
            query['resolution'] = self.CFG.get_safe('resolution')

        # send the granule through the Algorithm code to get the matplotlib graphs
        mpl_pdict_id = self.dsm_client.read_parameter_dictionary_by_name(
            'graph_image_param_dict', id_only=True)

        mpl_stream_def = self.pubsub_client.create_stream_definition(
            'mpl', parameter_dictionary_id=mpl_pdict_id)
        fileName = self.CFG.get_safe('graph_time_period')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(
            retrieved_granule,
            config=query,
            params=mpl_stream_def,
            fileName=fileName)

        if mpl_data_granule == None:
            return None

        # publish on all specified output streams
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

        return

    def _str_to_secs(self, time_period):
        # this method converts commonly used time periods to its actual seconds counterpart
        #separate alpha and numeric parts of the time period
        time_n = time_period.lower().rstrip('abcdefghijklmnopqrstuvwxyz ')
        time_a = time_period.lower().lstrip('0123456789. ')

        # determine if user specified, secs, mins, hours, days, weeks, months, years
        factor = None
        if time_a == 'sec' or time_a == "secs" or time_a == 'second' or time_a == "seconds":
            factor = 1
        if time_a == "min" or time_a == "mins" or time_a == "minute" or time_a == "minutes":
            factor = 60
        if time_a == "hr" or time_a == "hrs" or time_a == "hour" or time_a == "hours":
            factor = 60 * 60
        if time_a == "day" or time_a == "days":
            factor = 60 * 60 * 24
        if time_a == "wk" or time_a == "wks" or time_a == "week" or time_a == "weeks":
            factor = 60 * 60 * 24 * 7
        if time_a == "mon" or time_a == "mons" or time_a == "month" or time_a == "months":
            factor = 60 * 60 * 24 * 30
        if time_a == "yr" or time_a == "yrs" or time_a == "year" or time_a == "years":
            factor = 60 * 60 * 24 * 365

        time_period_secs = float(time_n) * factor
        return time_period_secs

    def on_quit(self):

        #Cancel the timer
        if hasattr(self, 'interval_timer_id'):
            self.ssclient.cancel_timer(self.interval_timer_id)

        super(VizTransformMatplotlibGraphs, self).on_quit()
class TestSchedulerService(IonIntegrationTestCase):

    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

    def tearDown(self):
        pass

    def now_utc(self):
        return time.mktime(datetime.datetime.utcnow().timetuple())

    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))


    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        start_time = datetime.datetime.utcnow()
        gevent.sleep((self.interval_timer_interval * 2) + 1)
        time_diff = (datetime.datetime.utcnow() - start_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertGreater(self.interval_timer_count, timer_counts)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

    def on_restart_callback(self, *args, **kwargs):
        self.interval_timer_count += 1
        log.debug("test_scheduler: on_restart_callback: time: " + str(self.now_utc()) + " count: " + str(self.interval_timer_count))

    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

    def interval_timer_callback_with_end_time(self, *args, **kwargs):
        self.interval_timer_received_time_2 = datetime.datetime.utcnow()
        self.interval_timer_count_2 += 1
        time_diff = math.fabs( ((self.interval_timer_received_time_2 - self.interval_timer_sent_time_2).total_seconds())
                               - (self.interval_timer_interval_2 * self.interval_timer_count_2) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback_with_end_time: time:" + str(self.interval_timer_received_time_2) + " count:" + str(self.interval_timer_count_2))


    def interval_timer_callback(self, *args, **kwargs):
        self.interval_timer_received_time = datetime.datetime.utcnow()
        self.interval_timer_count += 1
        time_diff = math.fabs( ((self.interval_timer_received_time - self.interval_timer_sent_time).total_seconds())
                               - (self.interval_timer_interval * self.interval_timer_count) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback: time:" + str(self.interval_timer_received_time) + " count:" + str(self.interval_timer_count))

    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))

    def single_timer_callback (self, *args, **kwargs):
        self.single_timer_count =+ 1
        log.debug("test_scheduler: single_timer_call_back: time:" + str(self.now_utc()) + " count:" + str(self.single_timer_count))

    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval,
            end_time=-1,
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 5
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in one days
        expires = time.mktime((datetime.datetime.utcnow() + timedelta(days=2)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are generated
        gevent.sleep(9)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.expire_sec_1) + math.floor(time_diff/self.expire_sec_2)

        # After waiting, validate only 2 events are generated.
        self.assertEqual(self.tod_count, 2, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.tod_count, timer_counts, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)



    def tod_callback(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count += 1
        if self.tod_count == 1:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_1)
            self.assertTrue(time_diff <= 2)
        elif self.tod_count == 2:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_2)
            self.assertTrue(time_diff <= 2)
        log.debug("test_scheduler: tod_callback: time:" + str(tod_receive_time) + " count:" + str(self.tod_count))

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Exemple #17
0
class DemoStreamAlertTransform(TransformStreamListener, TransformEventListener,
                               TransformEventPublisher):
    def __init__(self):
        super(DemoStreamAlertTransform, self).__init__()

        # the queue of granules that arrive in between two timer events
        self.granules = gevent.queue.Queue()
        self.instrument_variable_name = None
        self.timer_origin = None
        self.timer_interval = None
        self.timer_cleanup = (None, None)
        self.origin = ''

    def on_start(self):
        super(DemoStreamAlertTransform, self).on_start()

        #-------------------------------------------------------------------------------------
        # Values that are passed in when the transform is launched
        #-------------------------------------------------------------------------------------
        self.instrument_variable_name = self.CFG.get_safe(
            'process.variable_name', 'input_voltage')
        self.time_field_name = self.CFG.get_safe('process.time_field_name',
                                                 'preferred_timestamp')
        self.valid_values = self.CFG.get_safe('process.valid_values',
                                              [-200, 200])
        self.timer_origin = self.CFG.get_safe('process.timer_origin',
                                              'Interval Timer')
        self.timer_interval = self.CFG.get_safe('process.timer_interval', 6)

        # Check that valid_values is a list
        validate_is_instance(self.valid_values, list)

        # Start the timer
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node,
                                                      process=self)
        id = self._create_interval_timer_with_end_time(
            timer_interval=self.timer_interval, end_time=-1)

    def on_quit(self):
        super(DemoStreamAlertTransform, self).on_quit()

        self.ssclient, sid = self.timer_cleanup
        DemoStreamAlertTransform._cleanup_timer(self.ssclient, sid)

    @staticmethod
    def _cleanup_timer(scheduler, schedule_id):
        """
        Do a friendly cancel of the scheduled event.
        If it fails, it's ok.
        """
        try:
            scheduler.cancel_timer(schedule_id)
        except:
            log.debug("Couldn't cancel")

    def now_utc(self):
        return time.mktime(datetime.datetime.utcnow().timetuple())

    def _create_interval_timer_with_end_time(self,
                                             timer_interval=None,
                                             end_time=None):
        '''
        A convenience method to set up an interval timer with an end time
        '''
        self.timer_received_time = 0
        self.timer_interval = timer_interval

        start_time = self.now_utc()
        if not end_time:
            end_time = start_time + 2 * timer_interval + 1

        # Set up the interval timer. The scheduler will publish event with origin set as "Interval Timer"
        sid = self.ssclient.create_interval_timer(
            start_time="now",
            interval=self.timer_interval,
            end_time=end_time,
            event_origin="Interval Timer",
            event_subtype="")

        self.timer_cleanup = (self.ssclient, sid)

        return sid

    def recv_packet(self, msg, stream_route, stream_id):
        '''
        The callback method. For situations like bad or no data, publish an alert event.

        @param msg granule
        @param stream_route StreamRoute object
        @param stream_id str
        '''

        log.debug("DemoStreamAlertTransform received a packet!: %s" % msg)
        log.debug("type of packet received by transform: %s", type(msg))

        #-------------------------------------------------------------------------------------
        # Set up the config to use to pass info to the transform algorithm
        #-------------------------------------------------------------------------------------
        config = DotDict()
        config.valid_values = self.valid_values
        config.variable_name = self.instrument_variable_name
        config.time_field_name = self.time_field_name

        #-------------------------------------------------------------------------------------
        # Store the granule received
        #-------------------------------------------------------------------------------------
        self.granules.put(msg)

        #-------------------------------------------------------------------------------------
        # Check for good and bad values in the granule
        #-------------------------------------------------------------------------------------
        bad_values, bad_value_times, self.origin = AlertTransformAlgorithm.execute(
            msg, config=config)

        log.debug(
            "DemoStreamAlertTransform got the origin of the event as: %s" %
            self.origin)

        #-------------------------------------------------------------------------------------
        # If there are any bad values, publish an alert event for the granule
        #-------------------------------------------------------------------------------------
        if bad_values:
            # Publish the event
            self.publisher.publish_event(
                event_type='DeviceStatusEvent',
                origin=self.origin,
                origin_type='PlatformDevice',
                sub_type=self.instrument_variable_name,
                values=bad_values,
                time_stamps=bad_value_times,
                valid_values=self.valid_values,
                state=DeviceStatusType.OUT_OF_RANGE,
                description="Event to deliver the status of instrument.")

            log.debug("DemoStreamAlertTransform published a BAD DATA event")

    def process_event(self, msg, headers):
        """
        When timer events come, if no granule has arrived since the last timer event, publish an alarm
        """
        if msg.origin == self.timer_origin and self.origin:

            if self.granules.qsize() == 0:
                # Publish the event
                self.publisher.publish_event(
                    event_type='DeviceCommsEvent',
                    origin=self.origin,
                    origin_type='PlatformDevice',
                    sub_type=self.instrument_variable_name,
                    time_stamp=int(time.time() +
                                   2208988800),  # granules use NTP not unix
                    #                    time_stamp = get_ion_ts(),
                    state=DeviceCommsType.DATA_DELIVERY_INTERRUPTION,
                    lapse_interval_seconds=self.timer_interval,
                    description=
                    "Event to deliver the communications status of the instrument."
                )

                log.debug("DemoStreamAlertTransform published a NO DATA event")

            else:
                self.granules.queue.clear()
class VizTransformMatplotlibGraphs(TransformStreamPublisher, TransformEventListener, TransformStreamListener):

    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    output_bindings = ['graph_image_param_dict']
    event_timer_interval = None


    def on_start(self):
        #print ">>>>>>>>>>>>>>>>>>>>>> MPL CFG = ", self.CFG

        self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
        self.ssclient = SchedulerServiceProcessClient(process=self)
        self.rrclient = ResourceRegistryServiceProcessClient(process=self)
        self.data_retriever_client = DataRetrieverServiceProcessClient(process=self)
        self.dsm_client = DatasetManagementServiceProcessClient(process=self)
        self.pubsub_client = PubsubManagementServiceProcessClient(process = self)

        self.stream_info  = self.CFG.get_safe('process.publish_streams',{})
        self.stream_names = self.stream_info.keys()
        self.stream_ids   = self.stream_info.values()

        if not self.stream_names:
            raise BadRequest('MPL Transform has no output streams.')

        graph_time_periods= self.CFG.get_safe('graph_time_periods')

        # If this is meant to be an event driven process, schedule an event to be generated every few minutes/hours
        self.event_timer_interval = self.CFG.get_safe('graph_gen_interval')
        if self.event_timer_interval:
            event_origin = "Interval_Timer_Matplotlib"
            sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
            sub.start()

            self.interval_timer_id = self.ssclient.create_interval_timer(start_time="now" , interval=self._str_to_secs(self.event_timer_interval),
                event_origin=event_origin, event_subtype="")

        super(VizTransformMatplotlibGraphs,self).on_start()

    # when tranform is used as a data process
    def recv_packet(self, packet, in_stream_route, in_stream_id):
        #Check to see if the class instance was set up as a event triggered transform. If yes, skip the packet
        if self.event_timer_interval:
            return

        log.info('Received packet')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(packet, params=self.get_stream_definition())
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

    def get_stream_definition(self):
        stream_id = self.stream_ids[0]
        stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
        return stream_def._id

    def process_event(self, msg, headers):

        return

    def interval_timer_callback(self, *args, **kwargs):
        #Find out the input data product to this process
        in_dp_id = self.CFG.get_safe('in_dp_id')

        print " >>>>>>>>>>>>>> IN DP ID from cfg : ", in_dp_id

        # get the dataset_id associated with the data_product. Need it to do the data retrieval
        ds_ids,_ = self.rrclient.find_objects(in_dp_id, PRED.hasDataset, RT.Dataset, True)
        if ds_ids is None or not ds_ids:
            return None

        # retrieve data for the specified time interval. Setup up query from pass config first
        query = {}

        param_list_str = self.CFG.get_safe('parameters')
        if param_list_str:
            query['parameters'] = param_list_str.split(', ')
            # append time if not present in list of parameters
            if not 'time' in query['parameters']:
                query['parameters'].append('time')


        query['start_time'] = query['end_time'] = ntplib.system_to_ntp_time(time.time()) # Now
        query['stride_time'] = 1
        if self.CFG.get_safe('graph_time_period'):
            query['start_time'] = query['end_time'] - self._str_to_secs(self.CFG.get_safe('graph_time_period'))

        #print " >>>>>>>>>>>>>> QUERY = ", query

        #retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0],{'start_time':start_time,'end_time':end_time})
        retrieved_granule = self.data_retriever_client.retrieve(ds_ids[0], query=query)

        # add extra parameters to query passed in config that are not needed by data retrieval
        if self.CFG.get_safe('resolution'):
            query['resolution'] = self.CFG.get_safe('resolution')

        # send the granule through the Algorithm code to get the matplotlib graphs
        mpl_pdict_id = self.dsm_client.read_parameter_dictionary_by_name('graph_image_param_dict',id_only=True)

        mpl_stream_def = self.pubsub_client.create_stream_definition('mpl', parameter_dictionary_id=mpl_pdict_id)
        fileName = self.CFG.get_safe('graph_time_period')
        mpl_data_granule = VizTransformMatplotlibGraphsAlgorithm.execute(retrieved_granule, config=query, params=mpl_stream_def, fileName=fileName)

        if mpl_data_granule == None:
            return None

        # publish on all specified output streams
        for stream_name in self.stream_names:
            publisher = getattr(self, stream_name)
            publisher.publish(mpl_data_granule)

        return

    def _str_to_secs(self, time_period):
        # this method converts commonly used time periods to its actual seconds counterpart
        #separate alpha and numeric parts of the time period
        time_n = time_period.lower().rstrip('abcdefghijklmnopqrstuvwxyz ')
        time_a = time_period.lower().lstrip('0123456789. ')

        # determine if user specified, secs, mins, hours, days, weeks, months, years
        factor = None
        if time_a == 'sec' or time_a == "secs" or time_a == 'second' or time_a == "seconds":
            factor = 1
        if time_a == "min" or time_a == "mins" or time_a == "minute" or time_a == "minutes":
            factor = 60
        if time_a == "hr" or time_a == "hrs" or time_a == "hour" or time_a == "hours":
            factor = 60 * 60
        if time_a == "day" or time_a == "days":
            factor = 60 * 60 * 24
        if time_a == "wk" or time_a == "wks" or time_a == "week" or time_a == "weeks":
            factor = 60 * 60 * 24 * 7
        if time_a == "mon" or time_a == "mons" or time_a == "month" or time_a == "months":
            factor = 60 * 60 * 24 * 30
        if time_a == "yr" or time_a == "yrs" or time_a == "year" or time_a == "years":
            factor = 60 * 60 * 24 * 365

        time_period_secs = float(time_n) * factor
        return time_period_secs


    def on_quit(self):

        #Cancel the timer
        if hasattr(self, 'interval_timer_id'):
            self.ssclient.cancel_timer(self.interval_timer_id)

        super(VizTransformMatplotlibGraphs,self).on_quit()
class TestSchedulerService(IonIntegrationTestCase):

    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

    def tearDown(self):
        pass

    def now_utc(self):
        return time.mktime(datetime.datetime.utcnow().timetuple())

    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)


        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate only 2 events are published
        self.assertEqual(self.interval_timer_count, 2)

    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        # Validate 1 event is published
        self.assertEqual(self.interval_timer_count, 1)

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        # Validate 1 event is published
        self.assertGreater(self.interval_timer_count, 2)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

    def on_restart_callback(self, *args, **kwargs):
        self.interval_timer_count += 1

    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 2

        event_origin = "Interval Timer"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 5
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time - start_time) + self.interval_timer_interval + 1)

        # Validate only 2 events are published
        self.assertEqual(self.interval_timer_count, 2)

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

    def interval_timer_callback(self, *args, **kwargs):
        self.interval_timer_received_time = datetime.datetime.utcnow()
        self.interval_timer_count += 1
        time_diff = math.fabs( ((self.interval_timer_received_time - self.interval_timer_sent_time).total_seconds())
                               - (self.interval_timer_interval * self.interval_timer_count) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)

    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="ResourceEvent", callback=self.single_timer_call_back, origin=event_origin)
        sub.start()

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0)


    def single_timer_call_back (self, *args, **kwargs):
        self.single_timer_count =+ 1

    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="ResourceEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()

        id = self.ssclient.create_interval_timer(start_time= self.now_utc(), interval=self.interval_timer_interval,
            end_time=-1,
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)

        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, 4)

    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 5
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="ResourceEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()

        # Expires in one days
        expires = time.mktime((datetime.datetime.utcnow() + timedelta(days=2)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.assertEqual(type(id), str)

        # Wait until all events are generated
        gevent.sleep(9)

        # After waiting for 15 seconds, validate only 2 events are generated.
        self.assertTrue(self.tod_count == 2)

        # Cancel the timer
        self.ssclient.cancel_timer(id)


    def tod_callback(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count += 1
        if self.tod_count == 1:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_1)
            self.assertTrue(time_diff <= 2)
        elif self.tod_count == 2:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_2)
            self.assertTrue(time_diff <= 2)
Exemple #20
0
class TestSchedulerService(IonIntegrationTestCase):

    def setUp(self):
        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        process = FakeProcess()
        self.ssclient = SchedulerServiceProcessClient(node=self.container.node, process=process)
        self.rrclient = ResourceRegistryServiceProcessClient(node=self.container.node, process=process)

    def tearDown(self):
        pass

    def now_utc(self):
        return time.time()

    def test_create_interval_timer(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_233"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 10
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until two events are published
        gevent.sleep((self.interval_timer_interval * 2) + 1)

        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

        # Validate the number of timer counts
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))


    def test_system_restart(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # cancel the timer
        # wait until after next interval to verify that timer was correctly cancelled

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval_Timer_4444"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.on_restart_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time = start_time + 20
        id = self.ssclient.create_interval_timer(start_time="now", interval=self.interval_timer_interval,
            end_time=self.interval_timer_end_time,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Validate the timer is stored in RR
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until 1 event is published
        gevent.sleep((self.interval_timer_interval) + 1)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

        self.ssclient.on_system_restart()

        # after system restart, validate the timer is restored
        ss = self.rrclient.read(id)
        self.assertEqual(ss.entry.event_origin, event_origin)

        # Wait until another event is published
        start_time = datetime.datetime.utcnow()
        gevent.sleep((self.interval_timer_interval * 2) + 1)
        time_diff = (datetime.datetime.utcnow() - start_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)

        # Validate the number of events generated
        self.assertGreater(self.interval_timer_count, timer_counts)

        #Cancle the timer
        ss = self.ssclient.cancel_timer(id)

        # wait until after next interval to verify that timer was correctly cancelled
        gevent.sleep(self.interval_timer_interval)

        # Validate the timer correctly cancelled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate the timer is removed from resource regsitry
        with self.assertRaises(NotFound):
            self.rrclient.read(id)

    def on_restart_callback(self, *args, **kwargs):
        self.interval_timer_count += 1
        log.debug("test_scheduler: on_restart_callback: time: " + str(self.now_utc()) + " count: " + str(self.interval_timer_count))

    def test_create_interval_timer_with_end_time(self):
        # create the interval timer resource
        # create the event listener
        # call scheduler to set the timer
        # receive a few intervals, validate that arrival time is as expected
        # Validate no more events are published after end_time expires
        # Validate the timer was canceled after the end_time expires

        self.interval_timer_count_2 = 0
        self.interval_timer_sent_time_2 = 0
        self.interval_timer_received_time_2 = 0
        self.interval_timer_interval_2 = 3

        event_origin = "Interval_Timer_2"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback_with_end_time, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        start_time = self.now_utc()
        self.interval_timer_end_time_2 = start_time + 7
        id = self.ssclient.create_interval_timer(start_time="now" , interval=self.interval_timer_interval_2,
            end_time=self.interval_timer_end_time_2,
            event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time_2 = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are published
        gevent.sleep((self.interval_timer_end_time_2 - start_time) + self.interval_timer_interval_2 + 1)

        # Validate the number of events generated
        self.assertEqual(self.interval_timer_count_2, 2, "Invalid number of timeouts generated. Number of event: %d Expected: 2 Timer id: %s " %(self.interval_timer_count_2, id))

        # Validate the timer was canceled after the end_time is expired
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

    def interval_timer_callback_with_end_time(self, *args, **kwargs):
        self.interval_timer_received_time_2 = datetime.datetime.utcnow()
        self.interval_timer_count_2 += 1
        time_diff = math.fabs( ((self.interval_timer_received_time_2 - self.interval_timer_sent_time_2).total_seconds())
                               - (self.interval_timer_interval_2 * self.interval_timer_count_2) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback_with_end_time: time:" + str(self.interval_timer_received_time_2) + " count:" + str(self.interval_timer_count_2))


    def interval_timer_callback(self, *args, **kwargs):
        self.interval_timer_received_time = datetime.datetime.utcnow()
        self.interval_timer_count += 1
        time_diff = math.fabs( ((self.interval_timer_received_time - self.interval_timer_sent_time).total_seconds())
                               - (self.interval_timer_interval * self.interval_timer_count) )
        # Assert expire time is within +-10 seconds
        self.assertTrue(time_diff <= 10)
        log.debug("test_scheduler: interval_timer_callback: time:" + str(self.interval_timer_received_time) + " count:" + str(self.interval_timer_count))

    def test_cancel_single_timer(self):
        # test creating a new timer that is one-time-only

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer

        # create then cancel the timer, verify that event is not received

        # create the timer resource
        # create the event listener
        # call scheduler to set the timer
        # call scheduler to cancel the timer
        # wait until after expiry to verify that event is not sent
        self.single_timer_count = 0
        event_origin = "Time_of_Day"

        sub = EventSubscriber(event_type="TimerEvent", callback=self.single_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        now = datetime.datetime.utcnow() + timedelta(seconds=3)
        times_of_day =[{'hour': str(now.hour),'minute' : str(now.minute), 'second':str(now.second) }]
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day,  expires=self.now_utc()+3, event_origin=event_origin, event_subtype="test")
        self.assertEqual(type(id), str)
        self.ssclient.cancel_timer(id)
        gevent.sleep(3)

        # Validate the event is not generated
        self.assertEqual(self.single_timer_count, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.single_timer_count, id))

    def single_timer_callback (self, *args, **kwargs):
        self.single_timer_count =+ 1
        log.debug("test_scheduler: single_timer_call_back: time:" + str(self.now_utc()) + " count:" + str(self.single_timer_count))

    def test_create_forever_interval_timer(self):
        # Test creating interval timer that runs forever

        self.interval_timer_count = 0
        self.interval_timer_sent_time = 0
        self.interval_timer_received_time = 0
        self.interval_timer_interval = 3

        event_origin = "Interval Timer Forever"
        sub = EventSubscriber(event_type="TimerEvent", callback=self.interval_timer_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        id = self.ssclient.create_interval_timer(start_time=str(self.now_utc()), interval=self.interval_timer_interval,
            end_time="-1",
            event_origin=event_origin, event_subtype=event_origin)
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait for 4 events to be published
        gevent.sleep((self.interval_timer_interval * 4) + 1)
        self.ssclient.cancel_timer(id)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.interval_timer_interval)


        # Validate the timer id is invalid once it has been canceled
        with self.assertRaises(BadRequest):
            self.ssclient.cancel_timer(id)

        # Validate events are not generated after canceling the timer
        self.assertEqual(self.interval_timer_count, timer_counts, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.interval_timer_count, timer_counts, id))

    def test_timeoffday_timer(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time Of Day2"
        self.expire_sec_1 = 4
        self.expire_sec_2 = 5
        self.tod_count = 0
        expire1 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_1)
        expire2 = datetime.datetime.utcnow() + timedelta(seconds=self.expire_sec_2)
        # Create two timers
        times_of_day =[{'hour': str(expire1.hour),'minute' : str(expire1.minute), 'second':str(expire1.second) },
                       {'hour': str(expire2.hour),'minute' : str(expire2.minute), 'second':str(expire2.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in one days
        expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=2)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait until all events are generated
        gevent.sleep(9)
        time_diff = (datetime.datetime.utcnow() - self.interval_timer_sent_time).seconds
        timer_counts =  math.floor(time_diff/self.expire_sec_1) + math.floor(time_diff/self.expire_sec_2)

        # After waiting, validate only 2 events are generated.
        self.assertEqual(self.tod_count, 2, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: %d Timer id: %s " %(self.tod_count, timer_counts, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)

    def tod_callback(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count += 1
        if self.tod_count == 1:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_1)
            self.assertTrue(time_diff <= 2)
        elif self.tod_count == 2:
            time_diff = math.fabs((tod_receive_time - self.tod_sent_time).total_seconds() - self.expire_sec_2)
            self.assertTrue(time_diff <= 2)
        log.debug("test_scheduler: tod_callback: time:" + str(tod_receive_time) + " count:" + str(self.tod_count))

    def test_timeoffday_timer_in_past_seconds(self):
        # test creating a new timer that is one-time-only
        # create the timer resource
        # get the current time, set the timer to several seconds from current time
        # create the event listener
        # call scheduler to set the timer
        # verify that  event arrival is within one/two seconds of current time

        event_origin = "Time_Of_Day3"
        expire_sec = -4
        self.tod_count2 = 0
        now = datetime.datetime.utcnow()
        expire1 = now + timedelta(seconds=expire_sec)
        # Create two timers
        times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires in 3 days
        expires = calendar.timegm((datetime.datetime.utcnow() + timedelta(days=3)).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")
        self.interval_timer_sent_time = datetime.datetime.utcnow()
        self.assertEqual(type(id), str)

        # Wait and see if the any events are generated
        gevent.sleep(5)

        # After waiting, validate no event is generated
        self.assertEqual(self.tod_count2, 0, "Invalid number of timeouts generated. Number of timeout: %d Expected timeout: 0 Timer id: %s " %(self.tod_count2, id))

        # Cancel the timer
        self.ssclient.cancel_timer(id)

        # This is example for the following case
        # Example current time is 8:00AM. User setups a timer for 6:00AM. Since it is 8am, it tries to
        #   setup a timer for tomorrow 6am but the expire time is set at 5AM tomorrow
        event_origin = "Time_Of_Day4"
        expire_sec = -4
        self.tod_count2 = 0
        now = datetime.datetime.utcnow()
        expire1 = now + timedelta(seconds=expire_sec)
        times_of_day = [{'hour': str(expire1.hour), 'minute': str(expire1.minute), 'second': str(expire1.second)}]

        sub = EventSubscriber(event_type="TimerEvent", callback=self.tod_callback2, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        # Expires before the first event
        time_delta = timedelta(days=1) + timedelta(seconds=-(abs(expire_sec*2)))   # Notice the minus sign. It expires before the first event
        expires = calendar.timegm((now + time_delta).timetuple())
        self.tod_sent_time = datetime.datetime.utcnow()
        with self.assertRaises(BadRequest):
            id = self.ssclient.create_time_of_day_timer(times_of_day=times_of_day, expires=expires, event_origin=event_origin, event_subtype="")

    def tod_callback2(self, *args, **kwargs):
        tod_receive_time = datetime.datetime.utcnow()
        self.tod_count2 += 1
        log.debug("test_scheduler: tod_callback2: time:")

    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode')
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  end_time="-1",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})