Пример #1
0
    def test_polled_time_no_interval_not_started(self):
        """
        Try to setup some jobs with the scheduler before the scheduler has been started.
        Then try to startup and see if the job is setup properly.
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        self._scheduler = PolledScheduler()
        self.assertFalse(self._scheduler.running)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval)
        self.assertIsNotNone(job)
        self.assertEqual(len(self._scheduler.get_jobs()), 0)
        self.assertEqual(len(self._scheduler._pending_jobs), 1)

        self._scheduler.start()

        log.debug("JOBS: %s" % self._scheduler.get_jobs())
        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
Пример #2
0
    def test_polled_time(self):
        """
        Test a polled job with an interval.  Also test some exceptions
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval, max_interval)
        self.assertEqual(len(self._scheduler.get_jobs()), 1)
        self.assert_event_triggered(now + max_interval)

        # after a triggered event the min time should be extended.
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(1)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assert_event_triggered(now + min_interval + max_interval)

        # after a polled event the wait time should also be exited
        self.assert_event_triggered(now + min_interval + max_interval +
                                    max_interval)

        # Test exceptions. Job name doesn't exist
        with self.assertRaises(LookupError):
            self._scheduler.run_polled_job('foo')

        # Verify that an exception is raised if we try to add a job with the same name
        with self.assertRaises(ValueError):
            job = self._scheduler.add_polled_job(self._callback, test_name,
                                                 min_interval, max_interval)
Пример #3
0
    def test_polled_time(self):
        """
        Test a polled job with an interval.  Also test some exceptions
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval, max_interval)
        self.assertEqual(len(self._scheduler.get_jobs()), 1)
        self.assert_event_triggered(now+max_interval)

        # after a triggered event the min time should be extended.
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(1)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assert_event_triggered(now + min_interval + max_interval)

        # after a polled event the wait time should also be exited
        self.assert_event_triggered(now + min_interval + max_interval + max_interval)

        # Test exceptions. Job name doesn't exist
        with self.assertRaises(LookupError):
            self._scheduler.run_polled_job('foo')

        # Verify that an exception is raised if we try to add a job with the same name
        with self.assertRaises(ValueError):
            job = self._scheduler.add_polled_job(self._callback, test_name, min_interval, max_interval)
Пример #4
0
    def test_polled_job(self):
        """
        Test features of the specialized job class that we overloaded.
        """
        now = datetime.datetime.now()
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)
        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        job = PolledIntervalJob(trigger, self._callback, [], {}, 1, 1, name='test_job')
        self.assertIsNotNone(job)
        log.debug("H: %s" % repr(job))
        next_time = job.compute_next_run_time(now)
        self.assert_datetime_close(next_time, now + max_interval)
        self.assertEqual(job.name, 'test_job')

        self.assertTrue(job.ready_to_run())
        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)

        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(job.ready_to_run())

        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)
Пример #5
0
    def test_polled_time_no_interval_not_started(self):
        """
        Try to setup some jobs with the scheduler before the scheduler has been started.
        Then try to startup and see if the job is setup properly.
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        self._scheduler = PolledScheduler()
        self.assertFalse(self._scheduler.running)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval)
        self.assertIsNotNone(job)
        self.assertEqual(len(self._scheduler.get_jobs()), 0)
        self.assertEqual(len(self._scheduler._pending_jobs), 1)

        self._scheduler.start()

        log.debug("JOBS: %s" % self._scheduler.get_jobs())
        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
Пример #6
0
    def test_polled_job(self):
        """
        Test features of the specialized job class that we overloaded.
        """
        now = datetime.datetime.now()
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)
        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        job = PolledIntervalJob(trigger,
                                self._callback, [], {},
                                1,
                                1,
                                name='test_job')
        self.assertIsNotNone(job)
        log.debug("H: %s" % repr(job))
        next_time = job.compute_next_run_time(now)
        self.assert_datetime_close(next_time, now + max_interval)
        self.assertEqual(job.name, 'test_job')

        self.assertTrue(job.ready_to_run())
        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)

        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(job.ready_to_run())

        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)
Пример #7
0
    def setUp(self):
        """
        Setup the test case
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()
        self._triggered = []

        self.assertTrue(self._scheduler.daemonic)
Пример #8
0
 def __init__(self, config=None):
     """
     config structure:
     {
         test_name: {
             trigger: {}
             callback: some_function
         }
     }
     @param config: job configuration structure.
     """
     self._scheduler = PolledScheduler()
     if (config):
         self.add_config(config)
    def _build_scheduler(self):
        """
        Build a scheduler for periodic status updates
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()

        def event_callback(event):
            log.info("driver job triggered, raise event: %s" % event)
            self._fsm.on_event(event)

        # Dynamically create the method and add it
        method = partial(event_callback, RSNPlatformDriverEvent.GET_ENG_DATA)

        self._job = self._scheduler.add_interval_job(
            method, seconds=self.oms_sample_rate)
Пример #10
0
    def test_trigger_string(self):
        """
        test the str and repr methods
        """
        now = datetime.datetime.now()
        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3),
            now)

        self.assertEqual(str(trigger), "min_interval[0:00:01] max_interval[0:00:03]")
        self.assertEqual(repr(trigger), "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=datetime.timedelta(0, 3))>")

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1), None, now)
        self.assertEqual(str(trigger), "min_interval[0:00:01] max_interval[None]")
        self.assertEqual(repr(trigger), "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=None)>")
Пример #11
0
    def setUp(self):
        """
        Setup the test case
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()
        self._triggered =[]

        self.assertTrue(self._scheduler.daemonic)
Пример #12
0
    def test_elapse_time(self):
        """
        Test with elapse time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        now = datetime.datetime.now()
        interval = PolledScheduler.interval(seconds=3)

        job = self._scheduler.add_interval_job(self._callback, seconds=3)
        self.assert_event_triggered(now + interval)
        self.assert_event_triggered(now + interval + interval)
        self.assert_event_triggered(now + interval + interval + interval)
Пример #13
0
 def __init__(self, config = None):
     """
     config structure:
     {
         test_name: {
             trigger: {}
             callback: some_function
         }
     }
     @param config: job configuration structure.
     """
     self._scheduler = PolledScheduler()
     if(config):
         self.add_config(config)
Пример #14
0
    def test_trigger_string(self):
        """
        test the str and repr methods
        """
        now = datetime.datetime.now()
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3),
                                        now)

        self.assertEqual(str(trigger),
                         "min_interval[0:00:01] max_interval[0:00:03]")
        self.assertEqual(
            repr(trigger),
            "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=datetime.timedelta(0, 3))>"
        )

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        None, now)
        self.assertEqual(str(trigger),
                         "min_interval[0:00:01] max_interval[None]")
        self.assertEqual(
            repr(trigger),
            "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=None)>"
        )
Пример #15
0
    def _build_scheduler(self):
        """
        Build a scheduler for periodic status updates
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()

        def event_callback(event):
            log.debug("driver job triggered, raise event: %s" % event)
            self._fsm.on_event(event)

        # Dynamically create the method and add it
        method = partial(event_callback, RSNPlatformDriverEvent.GET_ENG_DATA)

        self._job = self._scheduler.add_interval_job(method, seconds=self.oms_sample_rate)
Пример #16
0
    def test_elapse_time(self):
        """
        Test with elapse time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        now = datetime.datetime.now()
        interval = PolledScheduler.interval(seconds=3)

        job = self._scheduler.add_interval_job(self._callback, seconds=3)
        self.assert_event_triggered(now + interval)
        self.assert_event_triggered(now + interval + interval)
        self.assert_event_triggered(now + interval + interval + interval)

        # Now shutdown the scheduler and verify we aren't firing events
        self._scheduler.shutdown()
        self._triggered = []
        self.assert_event_not_triggered()
Пример #17
0
    def test_polled_time_no_interval(self):
        """
        Test the scheduler with a polled job with no interval
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval)

        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
Пример #18
0
    def test_elapse_time(self):
        """
        Test with elapse time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        now = datetime.datetime.now()
        interval = PolledScheduler.interval(seconds=3)

        job = self._scheduler.add_interval_job(self._callback, seconds=3)
        self.assert_event_triggered(now + interval)
        self.assert_event_triggered(now + interval + interval)
        self.assert_event_triggered(now + interval + interval + interval)

        # Now shutdown the scheduler and verify we aren't firing events
        self._scheduler.shutdown()
        self._triggered = []
        self.assert_event_not_triggered()
Пример #19
0
    def test_polled_time_no_interval(self):
        """
        Test the scheduler with a polled job with no interval
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval)

        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
Пример #20
0
class DriverScheduler(object):
    """
    Class to facilitate event scheduling in drivers.
    jobs.
    """

    def __init__(self, config = None):
        """
        config structure:
        {
            test_name: {
                trigger: {}
                callback: some_function
            }
        }
        @param config: job configuration structure.
        """
        self._scheduler = PolledScheduler()
        if(config):
            self.add_config(config)

    def shutdown(self):
        self._scheduler.shutdown()

    def run_job(self, name):
        """
        Try to run a polled job with the passed in name.  If it
        runs then return true, otherwise false.
        @param name: name of the job
        @raise LookupError if we fail to find the job
        """
        return self._scheduler.run_polled_job(name)

    def add_config(self, config):
        """
        Add new jobs to the scheduler using the passed in config
        config structure:
        {
            test_name: {
                trigger: {}
                callback: some_function
            }
        }
        @param config: job configuration structure.
        @raise SchedulerException if we fail to add the job
        """
        if(not isinstance(config, dict)):
            raise SchedulerException("scheduler config not a dict")

        if(len(config.keys()) == 0):
            raise SchedulerException("scheduler config empty")

        for (name, config) in config.items():
            try:
                self._add_job(name, config)
            except ValueError as e:
                raise SchedulerException("failed to schedule job: %s" % e)
            except TypeError as e:
                raise SchedulerException("failed to schedule job: %s" % e)

        if(not self._scheduler.running):
            self._scheduler.start()

    def remove_job(self, callback):
        self._scheduler.unschedule_func(callback)
    
    def _add_job(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        log.debug(" Config name: %s value: %s" % (name, config))

        if(config == None):
            raise SchedulerException("job config empty")

        if(not isinstance(config, dict)):
            raise SchedulerException("job config not a dict")

        trigger = self._get_trigger_from_config(config)

        trigger_type = trigger.get(DriverSchedulerConfigKey.TRIGGER_TYPE)
        if(trigger_type == None):
            raise SchedulerException("trigger type missing")

        if(trigger_type == TriggerType.ABSOLUTE):
            self._add_job_absolute(name, config)
        elif(trigger_type == TriggerType.CRON):
            self._add_job_cron(name, config)
        elif(trigger_type == TriggerType.INTERVAL):
            self._add_job_interval(name, config)
        elif(trigger_type == TriggerType.POLLED_INTERVAL):
            self._add_job_polled_interval(name, config)
        else:
            raise SchedulerException("unknown trigger type '%s'" % trigger_type)

    def _get_trigger_from_config(self, config):
        """
        get and validate the trigger dictionary from the config object.
        @param config: configuration object to inspect
        @return: dictionary from the config for the trigger config
        """
        trigger = config.get(DriverSchedulerConfigKey.TRIGGER)
        if(trigger == None):
            raise SchedulerException("trigger definition missing")
        if(not isinstance(trigger, dict)):
            raise SchedulerException("config missing trigger definition")

        return trigger

    def _get_callback_from_config(self, config):
        """
        get and verify the callback parameter from a job config.
        @param config: configuration object to inspect
        @return: callback method from the config for the trigger config
        """
        callback = config.get(DriverSchedulerConfigKey.CALLBACK)
        if(callback == None):
            raise SchedulerException("callback definition missing")
        if(not callable(callback)):
            raise SchedulerException("callback incorrect type: '%s'" % type(callback))

        return callback

    def _add_job_absolute(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if(not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        dt = trigger.get(DriverSchedulerConfigKey.DATE)
        if(dt == None):
            raise SchedulerException("trigger missing parameter: %s" % DriverSchedulerConfigKey.DATE)

        self._scheduler.add_date_job(callback, dt)

    def _add_job_cron(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if(not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        year = trigger.get(DriverSchedulerConfigKey.YEAR)
        month = trigger.get(DriverSchedulerConfigKey.MONTH)
        day = trigger.get(DriverSchedulerConfigKey.DAY)
        week = trigger.get(DriverSchedulerConfigKey.WEEK)
        day_of_week = trigger.get(DriverSchedulerConfigKey.DAY_OF_WEEK)
        hour = trigger.get(DriverSchedulerConfigKey.HOUR)
        minute = trigger.get(DriverSchedulerConfigKey.MINUTE)
        second = trigger.get(DriverSchedulerConfigKey.SECOND)

        if(year==None and month==None and day==None and week==None and
           day_of_week==None and hour==None and minute==None and second==None):
            raise SchedulerException("at least one cron parameter required!")

        self._scheduler.add_cron_job(callback, year=year, month=month, day=day, week=week,
                                     day_of_week=day_of_week, hour=hour, minute=minute, second=second)

    def _add_job_interval(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if(not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        weeks = trigger.get(DriverSchedulerConfigKey.WEEKS, 0)
        days = trigger.get(DriverSchedulerConfigKey.DAYS, 0)
        hours = trigger.get(DriverSchedulerConfigKey.HOURS, 0)
        minutes = trigger.get(DriverSchedulerConfigKey.MINUTES, 0)
        seconds = trigger.get(DriverSchedulerConfigKey.SECONDS, 0)

        if(not (weeks or days or hours or minutes or seconds)):
            raise SchedulerException("at least interval parameter required!")

        self._scheduler.add_interval_job(callback, weeks=weeks, days=days, hours=hours,
                                                   minutes=minutes, seconds=seconds)

    def _add_job_polled_interval(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if(not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        min_interval = trigger.get(DriverSchedulerConfigKey.MINIMAL_INTERVAL)
        max_interval = trigger.get(DriverSchedulerConfigKey.MAXIMUM_INTERVAL)

        if(min_interval == None):
            raise SchedulerException("%s missing from trigger configuration" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)
        if(not isinstance(min_interval, dict)):
            raise SchedulerException("%s trigger configuration not a dict" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)

        min_weeks = min_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
        min_days = min_interval.get(DriverSchedulerConfigKey.DAYS, 0)
        min_hours = min_interval.get(DriverSchedulerConfigKey.HOURS, 0)
        min_minutes = min_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
        min_seconds = min_interval.get(DriverSchedulerConfigKey.SECONDS, 0)

        if(not (min_weeks or min_days or min_hours or min_minutes or min_seconds)):
            raise SchedulerException("at least interval parameter required!")

        min_interval_obj = self._scheduler.interval(min_weeks, min_days, min_hours, min_minutes, min_seconds)

        max_interval_obj = None
        if(max_interval != None):
            if(not isinstance(max_interval, dict)):
                raise SchedulerException("%s trigger configuration not a dict" % DriverSchedulerConfigKey.MINIMAL_INTERVAL)

            max_weeks = max_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
            max_days = max_interval.get(DriverSchedulerConfigKey.DAYS, 0)
            max_hours = max_interval.get(DriverSchedulerConfigKey.HOURS, 0)
            max_minutes = max_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
            max_seconds = max_interval.get(DriverSchedulerConfigKey.SECONDS, 0)

            if(max_weeks or max_days or max_hours or max_minutes or max_seconds):
                max_interval_obj = self._scheduler.interval(max_weeks, max_days, max_hours, max_minutes, max_seconds)

        self._scheduler.add_polled_job(callback, name, min_interval_obj, max_interval_obj)
class RSNPlatformDriver(PlatformDriver):
    """
    The main RSN OMS platform driver class.
    """
    def __init__(self, event_callback):
        """
        Creates an RSNPlatformDriver instance.
        @param event_callback  Listener of events generated by this driver
        """
        PlatformDriver.__init__(self, event_callback)

        # CIOMSClient instance created by connect() and destroyed by disconnect():
        self._rsn_oms = None

        # URL for the event listener registration/unregistration (based on
        # web server launched by ServiceGatewayService, since that's the
        # service in charge of receiving/relaying the OMS events).
        # NOTE: (as proposed long ago), this kind of functionality should
        # actually be provided by some component more in charge of the RSN
        # platform netwokr as a whole -- as opposed to platform-specific).
        self.listener_url = None

        # scheduler config is a bit redundant now, but if we ever want to
        # re-initialize a scheduler we will need it.
        self._scheduler = None

    def _filter_capabilities(self, events):
        """
        """
        events_out = [x for x in events if RSNPlatformDriverCapability.has(x)]
        return events_out

    def validate_driver_configuration(self, driver_config):
        """
        Driver config must include 'oms_uri' entry.
        """
        if 'oms_uri' not in driver_config:
            log.error("'oms_uri' not present in driver_config = %r",
                      driver_config)
            raise PlatformDriverException(
                msg="driver_config does not indicate 'oms_uri'")

    def _configure(self, driver_config):
        """
        Nothing special done here, only calls super.configure(driver_config)

        @param driver_config with required 'oms_uri' entry.
        """

        log.error("%r: _configure...", self._platform_id)

        PlatformDriver._configure(self, driver_config)

        self.nodeCfg = NodeConfiguration()

        self._platform_id = driver_config['node_id']
        self.nodeCfg.openNode(
            self._platform_id,
            driver_config['driver_config_file']['node_cfg_file'])

        self.nms_source = self.nodeCfg.node_meta_data['nms_source']

        self.oms_sample_rate = self.nodeCfg.node_meta_data['oms_sample_rate']

        self.nodeCfg.Print()

        self._construct_resource_schema()

        self._lastRcvSampleTime = {}

    def _build_scheduler(self):
        """
        Build a scheduler for periodic status updates
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()

        def event_callback(event):
            log.info("driver job triggered, raise event: %s" % event)
            self._fsm.on_event(event)

        # Dynamically create the method and add it
        method = partial(event_callback, RSNPlatformDriverEvent.GET_ENG_DATA)

        self._job = self._scheduler.add_interval_job(
            method, seconds=self.oms_sample_rate)

    def _delete_scheduler(self):
        """
        Remove the autosample schedule.
        """
        try:
            self._scheduler.unschedule_job(self._job)
        except KeyError:
            log.info('Failed to remove scheduled job for ACQUIRE_SAMPLE')

        self._scheduler.shutdown()

    def _construct_resource_schema(self):
        """
        """
        parameters = deepcopy(self._param_dict)

        for k, v in parameters.iteritems():
            read_write = v.get('read_write', None)
            if read_write == 'write':
                v['visibility'] = 'READ_WRITE'
            else:
                v['visibility'] = 'READ_ONLY'

        commands = {
            RSNPlatformDriverEvent.TURN_ON_PORT: {
                "display_name": "Port Power On",
                "description": "Activate port power.",
                "args": [],
                "kwargs": {
                    'port_id': {
                        "required": True,
                        "type": "string",
                    }
                }
            },
            RSNPlatformDriverEvent.TURN_OFF_PORT: {
                "display_name": "Port Power Off",
                "description": "Deactivate port power.",
                "args": [],
                "kwargs": {
                    'port_id': {
                        "required": True,
                        "type": "string",
                    }
                }
            }
        }

        self._resource_schema['parameters'] = parameters
        self._resource_schema['commands'] = commands

    def _ping(self):
        """
        Verifies communication with external platform returning "PONG" if
        this verification completes OK.

        @retval "PONG" iff all OK.
        @raise PlatformConnectionException Cannot ping external platform or
               got unexpected response.
        """
        log.debug("%r: pinging OMS...", self._platform_id)
        self._verify_rsn_oms('_ping')

        try:
            retval = self._rsn_oms.hello.ping()
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot ping: %s" % str(e))

        if retval is None or retval.upper() != "PONG":
            raise PlatformConnectionException(
                msg="Unexpected ping response: %r" % retval)

        log.debug("%r: ping completed: response: %s", self._platform_id,
                  retval)

        return "PONG"

    def _connect(self, recursion=None):
        """
        Creates an CIOMSClient instance, does a ping to verify connection,
        and starts event dispatch.
        """
        log.info("%r: _connect...", self._platform_id)

        # create CIOMSClient:
        oms_uri = self._driver_config['oms_uri']
        log.debug("%r: creating CIOMSClient instance with oms_uri=%r",
                  self._platform_id, oms_uri)
        self._rsn_oms = CIOMSClientFactory.create_instance(oms_uri)
        log.debug("%r: CIOMSClient instance created: %s", self._platform_id,
                  self._rsn_oms)

        # ping to verify connection:
        self._ping()
        self._build_scheduler()  # then start calling it every X seconds

    def _disconnect(self, recursion=None):
        log.info("%r: _disconnect...", self._platform_id)

        CIOMSClientFactory.destroy_instance(self._rsn_oms)
        self._rsn_oms = None
        log.debug("%r: CIOMSClient instance destroyed", self._platform_id)

        self._delete_scheduler()
        self._scheduler = None

    def get_metadata(self):
        """
        """
        return self.nodeCfg.node_meta_data

    def get_eng_data(self):
        log.debug("%r: get_eng_data...", self._platform_id)

        ntp_time = ntplib.system_to_ntp_time(time.time())
        max_time = ntp_time - self.oms_sample_rate * 10

        for key, stream in self.nodeCfg.node_streams.iteritems():
            log.debug("%r Stream(%s)", self._platform_id, key)
            if key not in self._lastRcvSampleTime:
                self._lastRcvSampleTime[key] = max_time

            if self._lastRcvSampleTime[
                    key] < max_time:  # prevent the max lookback time getting to big
                self._lastRcvSampleTime[
                    key] = max_time  # if we stop getting data for some reason

            attrs = []
            for instance in stream:
                # add a little bit of time to the last received so we don't get one we already have again
                attrs = [(k, self._lastRcvSampleTime[key] + 0.1)
                         for k in stream[instance]]

                if attrs:
                    return_dict = self.get_attribute_values_from_oms(
                        attrs)  # go get the data from the OMS

                    ts_list = self.get_all_returned_timestamps(
                        return_dict)  # get the list of all unique timestamps

                    # for each timestamp create a particle and emit it
                    for ts in sorted(ts_list):
                        # go get the list at this timestamp
                        onetimestamp_attrs = self.get_single_timestamp_list(
                            stream, ts, return_dict)
                        # scale the attrs and convert the names to ion
                        ion_onetimestamp_attrs = self.convert_attrs_to_ion(
                            stream, onetimestamp_attrs)
                        pad_particle = PlatformParticle(
                            ion_onetimestamp_attrs,
                            preferred_timestamp=DataParticleKey.
                            INTERNAL_TIMESTAMP)

                        pad_particle.set_internal_timestamp(timestamp=ts)
                        pad_particle._data_particle_type = key  # stream name
                        json_message = pad_particle.generate()

                        event = {
                            'type':
                            DriverAsyncEvent.SAMPLE,
                            'value':
                            json_message,
                            'time':
                            time.time(),
                            'instance':
                            '%s-%s' %
                            (self.nodeCfg.
                             node_meta_data['reference_designator'], instance),
                        }

                        self._send_event(event)
                        self._lastRcvSampleTime[key] = ts

    def get_attribute_values(self, attrs):
        """Simple wrapper method for compatibility.
        """
        return self.get_attribute_values_from_oms(attrs)

    def get_attribute_values_from_oms(self, attrs):
        """
        """
        def _verify_returned_dict(attr_dict):
            try:
                for key in attr_dict.iterkeys():
                    value_list = attr_dict[key]
                    if value_list == 'INVALID_ATTRIBUTE_ID':
                        continue

                    if not isinstance(value_list, list):
                        raise PlatformException(
                            msg="Error in getting values for attribute %s.  %s"
                            % (key, value_list))

                    if value_list and value_list[0][
                            0] == "ERROR_DATA_REQUEST_TOO_FAR_IN_PAST":
                        raise PlatformException(
                            msg="Time requested for %s too far in the past" %
                            key)
            except AttributeError:
                raise PlatformException(
                    msg="Error returned in requesting attributes: %s" %
                    attr_dict)

        if not isinstance(attrs, (list, tuple)):
            raise PlatformException(
                'get_attribute_values: attrs argument must be a '
                'list [(attrName, from_time), ...]. Given: %s', attrs)

        self._verify_rsn_oms('get_attribute_values_from_oms')

        log.debug("get_attribute_values: attrs=%s", self._platform_id)
        log.debug("get_attribute_values: attrs=%s", attrs)

        try:
            response = self._rsn_oms.attr.get_platform_attribute_values(
                self._platform_id, attrs)
        except Exception as e:
            raise PlatformConnectionException(
                msg=
                "get_attribute_values_from_oms Cannot get_platform_attribute_values: %s"
                % str(e))

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_returned_dict(dic_plat)

        # reported timestamps are already in NTP. Just return the dict:
        return dic_plat

    def get_all_returned_timestamps(self, attrs):
        ts_list = []

        # go through all of the returned values and get the unique timestamps. Each
        # particle will have data for a unique timestamp
        for attr_id, attr_vals in attrs.iteritems():
            if not (isinstance(attr_vals, list)):
                log.debug("Invalid attr_vals %s attrs=%s", attr_id,
                          attr_vals)  # in case we get an INVALID_ATTRIBUTE_ID
            else:
                for v, ts in attr_vals:
                    if ts not in ts_list:
                        ts_list.append(ts)

        return ts_list

    def get_single_timestamp_list(self, stream, ts_in, attrs):
        # create a list of sample data from just the single timestamp
        new_attrs = []  # key value list for this timestamp

        for key in stream:  # assuming we will put all values in stream even if we didn't get a sample this time
            if key in attrs:
                for v, ts in attrs[key]:
                    if ts == ts_in:
                        new_attrs.append((key, v))
                        continue

        return new_attrs

    def convert_attrs_to_ion(self, stream, attrs):
        """
        """
        attrs_return = []

        # convert back to ION parameter name and scale from OMS to ION
        for key, v in attrs:
            scale_factor = stream[key]['scale_factor']
            if v is None:
                attrs_return.append((stream[key]['ion_parameter_name'], v))
            else:
                attrs_return.append(
                    (stream[key]['ion_parameter_name'], v * scale_factor))

        return attrs_return

    def _verify_platform_id_in_response(self, response):
        """
        Verifies the presence of my platform_id in the response.

        @param response Dictionary returned by _rsn_oms

        @retval response[self._platform_id]
        """
        if self._platform_id not in response:
            msg = "unexpected: response does not contain entry for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)

        if response[self._platform_id] == InvalidResponse.PLATFORM_ID:
            msg = "response reports invalid platform_id for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)
        else:
            return response[self._platform_id]

    def set_overcurrent_limit(self, port_id, milliamps, microseconds, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(
                        msg="Error in setting overcurrent for port %s: %s" %
                        (port_id, message))
            except KeyError:
                raise PlatformException(msg="Error in response: %s" % rsp)

        self._verify_rsn_oms('set_overcurrent_limit')
        oms_port_cntl_id = self._verify_and_return_oms_port(
            port_id, 'set_overcurrent_limit')

        try:
            response = self._rsn_oms.port.set_over_current(
                self._platform_id, oms_port_cntl_id, int(milliamps),
                int(microseconds), src)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot set_overcurrent_limit: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(
            port_id, oms_port_cntl_id, response)
        log.debug("set_overcurrent_limit = %s", response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def turn_on_port(self, port_id, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(
                        msg="Error in turning on port %s: %s" %
                        (port_id, message))
            except KeyError:
                raise PlatformException(
                    msg="Error in turn on port response: %s" % rsp)

        self._verify_rsn_oms('turn_on_port')
        oms_port_cntl_id = self._verify_and_return_oms_port(
            port_id, 'turn_on_port')

        log.debug("%r: turning on port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id, oms_port_cntl_id)

        try:
            response = self._rsn_oms.port.turn_on_platform_port(
                self._platform_id, oms_port_cntl_id, src)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot turn_on_platform_port: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(
            port_id, oms_port_cntl_id, response)
        log.debug("%r: turn_on_platform_port response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def turn_off_port(self, port_id, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(
                        msg="Error in turning off port %s: %s" %
                        (port_id, message))
            except KeyError:
                raise PlatformException(
                    msg="Error in turn off port response: %s" % rsp)

        self._verify_rsn_oms('turn_off_port')
        oms_port_cntl_id = self._verify_and_return_oms_port(
            port_id, 'turn_off_port')

        log.debug("%r: turning off port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id, oms_port_cntl_id)

        try:
            response = self._rsn_oms.port.turn_off_platform_port(
                self._platform_id, oms_port_cntl_id, src)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot turn_off_platform_port: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(
            port_id, oms_port_cntl_id, response)
        log.debug("%r: turn_off_platform_port response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def start_profiler_mission(self, mission_name, src):
        def _verify_response(rsp):
            try:
                message = rsp[mission_name]

                if not message.startswith('OK'):
                    raise PlatformException(
                        msg="Error in starting mission %s: %s" %
                        (mission_name, message))
            except KeyError:
                raise PlatformException(
                    msg="Error in starting mission response: %s" % rsp)

        self._verify_rsn_oms('start_profiler_mission')

        try:
            response = self._rsn_oms.profiler.start_mission(
                self._platform_id, mission_name, src)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot start_profiler_mission: %s" % str(e))

        log.debug("%r: start_profiler_mission response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def stop_profiler_mission(self, flag, src):
        def _verify_response(rsp):
            if not rsp.startswith('OK'):
                raise PlatformException(msg="Error in stopping profiler: %s" %
                                        rsp)

        self._verify_rsn_oms('stop_profiler_mission')

        try:
            response = self._rsn_oms.profiler.stop_mission(
                self._platform_id, flag, src)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot stop_profiler_mission: %s" % str(e))

        log.debug("%r: stop_profiler_mission response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def get_mission_status(self, *args, **kwargs):
        self._verify_rsn_oms('get_mission_status')

        try:
            response = self._rsn_oms.profiler.get_mission_status(
                self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot get_mission_status: %s" % str(e))

        log.debug("%r: get_mission_status response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)

        return dic_plat  # note: return the dic for the platform

    def get_available_missions(self, *args, **kwargs):
        self._verify_rsn_oms('get_available_missions')

        try:
            response = self._rsn_oms.profiler.get_available_missions(
                self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(
                msg="Cannot get_available_missions: %s" % str(e))

        log.debug("%r: get_available_missions response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)

        return dic_plat  # note: return the dic for the platform

    def _verify_rsn_oms(self, method_name):
        if self._rsn_oms is None:
            raise PlatformConnectionException(
                "Cannot %s: _rsn_oms object required (created via connect() call)"
                % method_name)

    def _verify_and_return_oms_port(self, port_id, method_name):
        if port_id not in self.nodeCfg.node_port_info:
            raise PlatformConnectionException("Cannot %s: Invalid Port ID" %
                                              method_name)

        return self.nodeCfg.node_port_info[port_id]['port_oms_port_cntl_id']

    def _convert_port_id_from_oms_to_ci(self, port_id, oms_port_cntl_id,
                                        response):
        """
        Converts the OMS port id into the original one provided.
        """
        if response[self._platform_id].get(oms_port_cntl_id, None):
            return {
                self._platform_id: {
                    port_id:
                    response[self._platform_id].get(oms_port_cntl_id, None)
                }
            }

        return response

    ###############################################
    # External event handling:

    def _register_event_listener(self, url):
        """
        Registers given url for all event types.
        """
        self._verify_rsn_oms('_register_event_listener')

        try:
            already_registered = self._rsn_oms.event.get_registered_event_listeners(
            )
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot get registered event listeners: %s" %
                (self._platform_id, e))

        if url in already_registered:
            log.debug("listener %r was already registered", url)
            return

        try:
            result = self._rsn_oms.event.register_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot register_event_listener: %s" %
                (self._platform_id, e))

        log.debug("%r: register_event_listener(%r) => %s", self._platform_id,
                  url, result)

    def _unregister_event_listener(self, url):
        """
        Unregisters given url for all event types.
        """
        self._verify_rsn_oms('_unregister_event_listener')

        try:
            result = self._rsn_oms.event.unregister_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot unregister_event_listener: %s" %
                (self._platform_id, e))

        log.debug("%r: unregister_event_listener(%r) => %s", self._platform_id,
                  url, result)

    ##############################################################
    # GET
    ##############################################################

    def get(self, *args, **kwargs):

        if 'attrs' in kwargs:
            attrs = kwargs['attrs']
            result = self.get_attribute_values(attrs)
            return result

        if 'metadata' in kwargs:
            result = self.get_metadata()
            return result

        return super(RSNPlatformDriver, self).get(*args, **kwargs)

    ##############################################################
    # EXECUTE
    ##############################################################

    def execute(self, cmd, *args, **kwargs):
        """
        Executes the given command.

        @param cmd   command

        @return  result of the execution
        """
        if cmd == RSNPlatformDriverEvent.TURN_ON_PORT:
            result = self.turn_on_port(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.TURN_OFF_PORT:
            result = self.turn_off_port(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS:
            result = self.set_overcurrent_limit(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.START_PROFILER_MISSION:
            result = self.start_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.STOP_PROFILER_MISSION:
            result = self.stop_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_MISSION_STATUS:
            result = self.get_mission_status(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS:
            result = self.get_available_missions(*args, **kwargs)

        else:
            result = super(RSNPlatformDriver, self).execute(cmd, args, kwargs)

        return result

    def _handler_connected_start_profiler_mission(self, *args, **kwargs):
        """
        """
        profile_mission_name = kwargs.get('profile_mission_name')
        if profile_mission_name is None:
            raise InstrumentException(
                'start_profiler_mission: missing profile_mission_name argument'
            )

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing src argument')

        try:
            result = self.start_profiler_mission(profile_mission_name, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(
                RSNPlatformDriverEvent.START_PROFILER_MISSION, args, kwargs, e)

    def _handler_connected_stop_profiler_mission(self, *args, **kwargs):
        """
        """
        flag = kwargs.get('flag', None)
        if flag is None:
            raise InstrumentException(
                '_handler_connected_stop_profiler_mission: missing flag argument'
            )

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing src argument')

        try:
            result = self.stop_profiler_mission(flag, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(
                RSNPlatformDriverEvent.STOP_PROFILER_MISSION, args, kwargs, e)

    def _handler_connected_get_mission_status(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_mission_status()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(
                RSNPlatformDriverEvent.GET_MISSION_STATUS, args, kwargs, e)

    def _handler_connected_get_available_missions(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_available_missions()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(
                RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS, args, kwargs, e)

    def _handler_connected_get_eng_data(self, *args, **kwargs):
        """
        """

        try:
            result = self.get_eng_data()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_ENG_DATA,
                                         args, kwargs, e)

    def _handler_connected_set_port_over_current_limits(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing port_id argument')

        milliamps = kwargs.get('milliamps', None)
        if milliamps is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing milliamps argument')

        microseconds = kwargs.get('microseconds', None)
        if milliamps is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing microseconds argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException(
                'set_port_over_current_limits: missing src argument')

        try:
            result = self.set_overcurrent_limit(port_id, milliamps,
                                                microseconds, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(
                RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS, args,
                kwargs, e)

    def _handler_connected_turn_on_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing src argument')

        try:
            result = self.turn_on_port(port_id, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_ON_PORT,
                                         args, kwargs, e)

    def _handler_connected_turn_off_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException(
                'turn_off_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_off_port: missing src argument')

        try:
            result = self.turn_off_port(port_id, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT,
                                         args, kwargs, e)

    ##############################################################
    # RSN Platform driver FSM setup
    ##############################################################

    def _construct_fsm(self,
                       states=RSNPlatformDriverState,
                       events=RSNPlatformDriverEvent,
                       enter_event=RSNPlatformDriverEvent.ENTER,
                       exit_event=RSNPlatformDriverEvent.EXIT):
        """
        """
        super(RSNPlatformDriver, self)._construct_fsm(states, events,
                                                      enter_event, exit_event)

        # CONNECTED state event handlers we add in this class:
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.TURN_ON_PORT,
                              self._handler_connected_turn_on_port)
        self._fsm.add_handler(
            PlatformDriverState.CONNECTED,
            RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS,
            self._handler_connected_set_port_over_current_limits)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.TURN_OFF_PORT,
                              self._handler_connected_turn_off_port)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.START_PROFILER_MISSION,
                              self._handler_connected_start_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.STOP_PROFILER_MISSION,
                              self._handler_connected_stop_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.GET_MISSION_STATUS,
                              self._handler_connected_get_mission_status)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS,
                              self._handler_connected_get_available_missions)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              RSNPlatformDriverEvent.GET_ENG_DATA,
                              self._handler_connected_get_eng_data)
        self._fsm.add_handler(PlatformDriverState.CONNECTED,
                              ScheduledJob.ACQUIRE_SAMPLE,
                              self._handler_connected_get_eng_data)
Пример #22
0
class DriverScheduler(object):
    """
    Class to facilitate event scheduling in drivers.
    jobs.
    """
    def __init__(self, config=None):
        """
        config structure:
        {
            test_name: {
                trigger: {}
                callback: some_function
            }
        }
        @param config: job configuration structure.
        """
        self._scheduler = PolledScheduler()
        if (config):
            self.add_config(config)

    def shutdown(self):
        self._scheduler.shutdown()

    def run_job(self, name):
        """
        Try to run a polled job with the passed in name.  If it
        runs then return true, otherwise false.
        @param name: name of the job
        @raise LookupError if we fail to find the job
        """
        return self._scheduler.run_polled_job(name)

    def add_config(self, config):
        """
        Add new jobs to the scheduler using the passed in config
        config structure:
        {
            test_name: {
                trigger: {}
                callback: some_function
            }
        }
        @param config: job configuration structure.
        @raise SchedulerException if we fail to add the job
        """
        if (not isinstance(config, dict)):
            raise SchedulerException("scheduler config not a dict")

        if (len(config.keys()) == 0):
            raise SchedulerException("scheduler config empty")

        for (name, config) in config.items():
            try:
                self._add_job(name, config)
            except ValueError as e:
                raise SchedulerException("failed to schedule job: %s" % e)
            except TypeError as e:
                raise SchedulerException("failed to schedule job: %s" % e)

        if (not self._scheduler.running):
            self._scheduler.start()

    def remove_job(self, callback):
        self._scheduler.unschedule_func(callback)

    def _add_job(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        log.debug(" Config name: %s value: %s" % (name, config))

        if (config == None):
            raise SchedulerException("job config empty")

        if (not isinstance(config, dict)):
            raise SchedulerException("job config not a dict")

        trigger = self._get_trigger_from_config(config)

        trigger_type = trigger.get(DriverSchedulerConfigKey.TRIGGER_TYPE)
        if (trigger_type == None):
            raise SchedulerException("trigger type missing")

        if (trigger_type == TriggerType.ABSOLUTE):
            self._add_job_absolute(name, config)
        elif (trigger_type == TriggerType.CRON):
            self._add_job_cron(name, config)
        elif (trigger_type == TriggerType.INTERVAL):
            self._add_job_interval(name, config)
        elif (trigger_type == TriggerType.POLLED_INTERVAL):
            self._add_job_polled_interval(name, config)
        else:
            raise SchedulerException("unknown trigger type '%s'" %
                                     trigger_type)

    def _get_trigger_from_config(self, config):
        """
        get and validate the trigger dictionary from the config object.
        @param config: configuration object to inspect
        @return: dictionary from the config for the trigger config
        """
        trigger = config.get(DriverSchedulerConfigKey.TRIGGER)
        if (trigger == None):
            raise SchedulerException("trigger definition missing")
        if (not isinstance(trigger, dict)):
            raise SchedulerException("config missing trigger definition")

        return trigger

    def _get_callback_from_config(self, config):
        """
        get and verify the callback parameter from a job config.
        @param config: configuration object to inspect
        @return: callback method from the config for the trigger config
        """
        callback = config.get(DriverSchedulerConfigKey.CALLBACK)
        if (callback == None):
            raise SchedulerException("callback definition missing")
        if (not callable(callback)):
            raise SchedulerException("callback incorrect type: '%s'" %
                                     type(callback))

        return callback

    def _add_job_absolute(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if (not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        dt = trigger.get(DriverSchedulerConfigKey.DATE)
        if (dt == None):
            raise SchedulerException("trigger missing parameter: %s" %
                                     DriverSchedulerConfigKey.DATE)

        self._scheduler.add_date_job(callback, dt)

    def _add_job_cron(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if (not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        year = trigger.get(DriverSchedulerConfigKey.YEAR)
        month = trigger.get(DriverSchedulerConfigKey.MONTH)
        day = trigger.get(DriverSchedulerConfigKey.DAY)
        week = trigger.get(DriverSchedulerConfigKey.WEEK)
        day_of_week = trigger.get(DriverSchedulerConfigKey.DAY_OF_WEEK)
        hour = trigger.get(DriverSchedulerConfigKey.HOUR)
        minute = trigger.get(DriverSchedulerConfigKey.MINUTE)
        second = trigger.get(DriverSchedulerConfigKey.SECOND)

        if (year == None and month == None and day == None and week == None
                and day_of_week == None and hour == None and minute == None
                and second == None):
            raise SchedulerException("at least one cron parameter required!")

        self._scheduler.add_cron_job(callback,
                                     year=year,
                                     month=month,
                                     day=day,
                                     week=week,
                                     day_of_week=day_of_week,
                                     hour=hour,
                                     minute=minute,
                                     second=second)

    def _add_job_interval(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if (not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        weeks = trigger.get(DriverSchedulerConfigKey.WEEKS, 0)
        days = trigger.get(DriverSchedulerConfigKey.DAYS, 0)
        hours = trigger.get(DriverSchedulerConfigKey.HOURS, 0)
        minutes = trigger.get(DriverSchedulerConfigKey.MINUTES, 0)
        seconds = trigger.get(DriverSchedulerConfigKey.SECONDS, 0)

        if (not (weeks or days or hours or minutes or seconds)):
            raise SchedulerException("at least interval parameter required!")

        self._scheduler.add_interval_job(callback,
                                         weeks=weeks,
                                         days=days,
                                         hours=hours,
                                         minutes=minutes,
                                         seconds=seconds)

    def _add_job_polled_interval(self, name, config):
        """
        Add a new job to the scheduler based on the trigger configuration
        @param name: name of the job
        @param config: job configuration
        @raise SchedulerError if we fail to add the job
        """
        if (not isinstance(config, dict)):
            raise SchedulerException("config not a dict")

        callback = self._get_callback_from_config(config)
        trigger = self._get_trigger_from_config(config)

        min_interval = trigger.get(DriverSchedulerConfigKey.MINIMAL_INTERVAL)
        max_interval = trigger.get(DriverSchedulerConfigKey.MAXIMUM_INTERVAL)

        if (min_interval == None):
            raise SchedulerException("%s missing from trigger configuration" %
                                     DriverSchedulerConfigKey.MINIMAL_INTERVAL)
        if (not isinstance(min_interval, dict)):
            raise SchedulerException("%s trigger configuration not a dict" %
                                     DriverSchedulerConfigKey.MINIMAL_INTERVAL)

        min_weeks = min_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
        min_days = min_interval.get(DriverSchedulerConfigKey.DAYS, 0)
        min_hours = min_interval.get(DriverSchedulerConfigKey.HOURS, 0)
        min_minutes = min_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
        min_seconds = min_interval.get(DriverSchedulerConfigKey.SECONDS, 0)

        if (not (min_weeks or min_days or min_hours or min_minutes
                 or min_seconds)):
            raise SchedulerException("at least interval parameter required!")

        min_interval_obj = self._scheduler.interval(min_weeks, min_days,
                                                    min_hours, min_minutes,
                                                    min_seconds)

        max_interval_obj = None
        if (max_interval != None):
            if (not isinstance(max_interval, dict)):
                raise SchedulerException(
                    "%s trigger configuration not a dict" %
                    DriverSchedulerConfigKey.MINIMAL_INTERVAL)

            max_weeks = max_interval.get(DriverSchedulerConfigKey.WEEKS, 0)
            max_days = max_interval.get(DriverSchedulerConfigKey.DAYS, 0)
            max_hours = max_interval.get(DriverSchedulerConfigKey.HOURS, 0)
            max_minutes = max_interval.get(DriverSchedulerConfigKey.MINUTES, 0)
            max_seconds = max_interval.get(DriverSchedulerConfigKey.SECONDS, 0)

            if (max_weeks or max_days or max_hours or max_minutes
                    or max_seconds):
                max_interval_obj = self._scheduler.interval(
                    max_weeks, max_days, max_hours, max_minutes, max_seconds)

        self._scheduler.add_polled_job(callback, name, min_interval_obj,
                                       max_interval_obj)
class RSNPlatformDriver(PlatformDriver):
    """
    The main RSN OMS platform driver class.
    """
    def __init__(self, event_callback):
        """
        Creates an RSNPlatformDriver instance.

        @param pnode           Root PlatformNode defining the platform network
                               rooted at this platform.
        @param event_callback  Listener of events generated by this driver
        """
        PlatformDriver.__init__(self, event_callback)

        # CIOMSClient instance created by connect() and destroyed by disconnect():
        self._rsn_oms = None

 

        # URL for the event listener registration/unregistration (based on
        # web server launched by ServiceGatewayService, since that's the
        # service in charge of receiving/relaying the OMS events).
        # NOTE: (as proposed long ago), this kind of functionality should
        # actually be provided by some component more in charge of the RSN
        # platform netwokr as a whole -- as opposed to platform-specific).
        self.listener_url = None
        
               # scheduler config is a bit redundant now, but if we ever want to
        # re-initialize a scheduler we will need it.
        self._scheduler = None
        
        
     

        
        

    def _filter_capabilities(self, events):
        """
        """
        events_out = [x for x in events if RSNPlatformDriverCapability.has(x)]
        return events_out

    def validate_driver_configuration(self, driver_config):
        """
        Driver config must include 'oms_uri' entry.
        """
        if not 'oms_uri' in driver_config:
            log.error("'oms_uri' not present in driver_config = %s", driver_config)
            raise PlatformDriverException(msg="driver_config does not indicate 'oms_uri'")


    def _configure(self, driver_config):
        """
        Nothing special done here, only calls super.configure(driver_config)

        @param driver_config with required 'oms_uri' entry.
        """
        PlatformDriver._configure(self, driver_config)

        self.nodeCfg = NodeConfiguration()
 
         
        self._platform_id = driver_config['node_id']
            
        
        self.nodeCfg.OpenNode(self._platform_id,driver_config['driver_config_file']['node_cfg_file'])

        
        if 'nms_source' in self.nodeCfg.node_meta_data :
            self.nms_source = self.nodeCfg.node_meta_data['nms_source']
        else:
            self.nms_source = 1
            
            
        if 'oms_sample_rate' in self.nodeCfg.node_meta_data :
            self.oms_sample_rate = self.nodeCfg.node_meta_data['oms_sample_rate']
        else:
            self.oms_sample_rate = 60
        




        self.nodeCfg.Print();
        
        self._construct_resource_schema()
        
        
    def _build_scheduler(self):
        """
        Build a scheduler for periodic status updates
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()
        
        def event_callback(self, event):
            log.info("driver job triggered, raise event: %s" % event)
            self._fsm.on_event(event)

        # Dynamically create the method and add it
        method = partial(event_callback, self, RSNPlatformDriverEvent.GET_ENG_DATA)
        
        
        self._job = self._scheduler.add_interval_job(method, seconds=self.oms_sample_rate)
       

    def _delete_scheduler(self):
        """
        Remove the autosample schedule.
        """
        try:
            self._scheduler.unschedule_job(self._job)
            self._scheduler.shutdown()
 #           self._scheduler.remove_scheduler(self._job)
        except KeyError:
            log.info('Failed to remove scheduled job for ACQUIRE_SAMPLE')
        
    
    def _construct_resource_schema(self):
        """
        """
        parameters = deepcopy(self._param_dict)

        for k,v in parameters.iteritems():
            read_write = v.get('read_write', None)
            if read_write == 'write':
                v['visibility'] = 'READ_WRITE'
            else:
                v['visibility'] = 'READ_ONLY'

        commands = {}
        commands[RSNPlatformDriverEvent.TURN_ON_PORT] = \
            {
                "display_name" : "Port Power On",
                "description" : "Activate port power.",
                "args" : [],
                "kwargs" : {
                       'port_id' : {
                            "required" : True,
                            "type" : "int",
                        }
                }

            }
        commands[RSNPlatformDriverEvent.TURN_OFF_PORT] = \
            {
                "display_name" : "Port Power Off",
                "description" : "Deactivate port power.",
                "args" : [],
                "kwargs" : {
                       'port_id' : {
                            "required" : True,
                            "type" : "int",
                        }
                }
            }
 
        self._resource_schema['parameters'] = parameters
        self._resource_schema['commands'] = commands

    def _ping(self):
        """
        Verifies communication with external platform returning "PONG" if
        this verification completes OK.

        @retval "PONG" iff all OK.
        @raise PlatformConnectionException Cannot ping external platform or
               got unexpected response.
        """
        log.debug("%r: pinging OMS...", self._platform_id)
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot ping: _rsn_oms object required (created via connect() call)")

        try:
            retval = self._rsn_oms.hello.ping()
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot ping: %s" % str(e))

        if retval is None or retval.upper() != "PONG":
            raise PlatformConnectionException(msg="Unexpected ping response: %r" % retval)

        log.debug("%r: ping completed: response: %s", self._platform_id, retval)

        return "PONG"

    def callback_for_alert(self, event, *args, **kwargs):
        log.debug("caught an OMSDeviceStatusEvent: %s", event)       
        
#        self._notify_driver_event(OMSEventDriverEvent(event['description']))
     
        log.info('Platform agent %r published OMSDeviceStatusEvent : %s, time: %s',
                 self._platform_id, event, time.time())


    
    def _connect(self, recursion=None):
        """
        Creates an CIOMSClient instance, does a ping to verify connection,
        and starts event dispatch.
        """
        # create CIOMSClient:
        oms_uri = self._driver_config['oms_uri']
        log.debug("%r: creating CIOMSClient instance with oms_uri=%r",
                  self._platform_id, oms_uri)
        self._rsn_oms = CIOMSClientFactory.create_instance(oms_uri)
        log.debug("%r: CIOMSClient instance created: %s",
                  self._platform_id, self._rsn_oms)

        # ping to verify connection:
        self._ping()

        # start event dispatch:
        self._start_event_dispatch()
        
        
        self._build_scheduler()

        # TODO - commented out
        # self.event_subscriber = EventSubscriber(event_type='OMSDeviceStatusEvent',
        #     callback=self.callback_for_alert)
        #
        # self.event_subscriber.start()

 

    def _disconnect(self, recursion=None):
        """
        Stops event dispatch and destroys the CIOMSClient instance.
        """
        self._stop_event_dispatch()
 #       self.event_subscriber.stop()
#        self.event_subscriber=None
  

        CIOMSClientFactory.destroy_instance(self._rsn_oms)
        self._rsn_oms = None
        log.debug("%r: CIOMSClient instance destroyed", self._platform_id)
        
        self._delete_scheduler();
        self._scheduler = None
        

    def get_metadata(self):
        """
        """
      
        return self.nodeCfg.meta_data;

   
    def get_eng_data(self):
        if self.nms_source == 1:
            self.get_nms_eng_data()
        else :
            self.get_node_eng_data()
        
        
    def get_node_eng_data(self):
        
        
        log.debug("%r: get_eng_data...", self._platform_id)
       
        ntp_time = ntplib.system_to_ntp_time(time.time())      
        
        for streamKey,stream in sorted(self.nodeCfg.node_streams.iteritems()):
            log.debug("%r Stream(%s)", self._platform_id,streamKey)
            attrs=list();
            for streamAttrKey,streamAttr in sorted(stream.iteritems()):
 #               log.debug("%r     %r = %r", self._platform_id, streamAttrKey,streamAttr)
 
 
                if 'lastRcvSampleTime' not in streamAttr :   # first time this is called set this to a reasonable value
                    streamAttr['lastRcvSampleTime'] = ntp_time - streamAttr['monitor_cycle_seconds']*2  
                
                lastRcvSampleTime = streamAttr['lastRcvSampleTime']
                
                if (lastRcvSampleTime+streamAttr['monitor_cycle_seconds'])<ntp_time : # if we think that the OMS will have data from us add it to the list
                    if (ntp_time-lastRcvSampleTime)>(streamAttr['monitor_cycle_seconds']*10) : #make sure we don't reach too far back by accident or will 
                        lastRcvSampleTime=ntp_time-(streamAttr['monitor_cycle_seconds']*10)    #clog up the OMS DB search
                        
                    attrs.append((streamAttrKey,lastRcvSampleTime+0.1)) # add a little bit of time to the last received so we don't get one we already have again
            
            if len(attrs)>0 :
                
            
                returnDictTemp = self.get_attribute_values_from_oms(attrs) #go get the data from the OMS
                
                returnDict = self.round_timestamps(returnDictTemp)
                
                ts_list = self.get_all_returned_timestamps(returnDict) #get the list of all unique returned timestamps
                
                for ts in ts_list: #for each timestamp create a particle and emit it
                    oneTimestampAttrs = self.get_single_timestamp_list(stream,ts,returnDict) #go get the list at this timestamp
                    ionOneTimestampAttrs = self.convertAttrsToIon(stream,oneTimestampAttrs) #scale the attrs and convert the names to ion
              
                    pad_particle = Platform_Particle(ionOneTimestampAttrs,port_timestamp=ts) #need to review what port timetamp meaning is..
              
                    pad_particle.set_internal_timestamp(timestamp=ts)
              
                    pad_particle._data_particle_type = streamKey  # stream name
              
                    json_message = pad_particle.generate() # this cals parse values above to go from raw to values dict
       
                    event = {
                         'type': DriverAsyncEvent.SAMPLE,
                         'value': json_message,
                         'time': time.time()
                    }
            
                    self._send_event(event)
 
        return 1

    def get_nms_eng_data(self):
        
        log.debug("%r: get_nms_eng_data...", self._platform_id)
       
        ntp_time = ntplib.system_to_ntp_time(time.time())      
        
        
        attrs=list();
        
        for streamKey,stream in sorted(self.nodeCfg.node_streams.iteritems()):
 #           log.debug("%r Stream(%s)", self._platform_id,streamKey)
            
            for streamAttrKey,streamAttr in sorted(stream.iteritems()):
 #               log.debug("%r     %r = %r", self._platform_id, streamAttrKey,streamAttr)
 
 
                if 'lastRcvSampleTime' not in streamAttr :   # first time this is called set this to a reasonable value
                    streamAttr['lastRcvSampleTime'] = ntp_time - streamAttr['monitor_cycle_seconds']*2  
                
                lastRcvSampleTime = streamAttr['lastRcvSampleTime']
                
                if (lastRcvSampleTime+streamAttr['monitor_cycle_seconds'])<ntp_time : # if we think that the OMS will have data from us add it to the list
                    if (ntp_time-lastRcvSampleTime)>(streamAttr['monitor_cycle_seconds']*10) : #make sure we dont reach too far back by accident or will 
                        lastRcvSampleTime=ntp_time-(streamAttr['monitor_cycle_seconds']*10)    #clog up the OMS DB search
                        
                    attrs.append((streamAttrKey,lastRcvSampleTime+0.1)) # add a little bit of time to the last recieved so we don't get one we alread have again
            
        if len(attrs)>0 :
                
            
            returnDict = self.get_attribute_values_from_oms(attrs) #go get the data from the OMS
            
            
            for attr_id, attr_vals in returnDict.iteritems(): # go through the returned list of attributes

                for streamKey,stream in sorted(self.nodeCfg.node_streams.iteritems()): #go through all the streams for this platform
                    if attr_id in stream :  # see if this attribute is in this stream
                        for v, ts in attr_vals:
                            stream[attr_id]['lastRcvSampleTime']=ts
                            ionAttrs = self.convertAttrsToIon(stream,[(attr_id,v)]) #scale the attrs and convert the names to ion
              
                    
                            pad_particle = Platform_Particle(ionAttrs,port_timestamp=ts)
              
                            pad_particle.set_internal_timestamp(timestamp=ts)
                  
                            pad_particle._data_particle_type = streamKey  # stream name
                  
                            json_message = pad_particle.generate() # this cals parse values above to go from raw to values dict
           
                            event = {
                                      'type': DriverAsyncEvent.SAMPLE,
                                      'value': json_message,
                                      'time': time.time()
                                      }
                
                            self._send_event(event)
                 
     
        return 1



    def get_attribute_values_from_oms(self,attrs):
        """
        """
        if not isinstance(attrs, (list, tuple)):
            raise PlatformException('get_attribute_values: attrs argument must be a '
                                    'list [(attrName, from_time), ...]. Given: %s', attrs)

        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot get_platform_attribute_values: _rsn_oms object required (created via connect() call)")
        
        log.debug("get_attribute_values: attrs=%s", self._platform_id)
        log.debug("get_attribute_values: attrs=%s", attrs)

        try:
            retval = self._rsn_oms.attr.get_platform_attribute_values(self._platform_id,
                                                                      attrs)
        except Exception as e:
            raise PlatformConnectionException(msg="get_attribute_values_from_oms Cannot get_platform_attribute_values: %s" % str(e))

        if not self._platform_id in retval:
            raise PlatformException("Unexpected: response get_attribute_values_from_oms does not include "
                                    "requested platform '%s'" % self._platform_id)

        attr_values = retval[self._platform_id]
    
        if isinstance(attr_values,str):
            raise PlatformException("Unexpected: response get_attribute_values_from_oms "
                                    "'%s'" % attr_values ) 
   
            
    
        # reported timestamps are already in NTP. Just return the dict:
        return attr_values
    
    def get_all_returned_timestamps(self, attrs):
   
        ts_list = list()

        #go through all of the returned values and get the unique timestamps. Each
        #particle will have data for a unique timestamp
        for attr_id, attr_vals in attrs.iteritems():

            for v, ts in attr_vals:
                if not ts in ts_list:
                    ts_list.append(ts)

        return(ts_list)
    
    
    def get_single_timestamp_list(self,stream,ts_in, attrs):
   

        #create a list of sample data from just the single timestamp
   
        newAttrList = [] #key value list for this timestamp

        for key in stream : # assuming we will put all values in stream even if we didn't get a sample this time
            found_ts_match=0 
            if key in attrs :  
                for v, ts in attrs[key]:
                    if(ts==ts_in):
                        if(found_ts_match==0):
                            newAttrList.append((key,v))
                            found_ts_match=1
                            if ts_in>stream[key]['lastRcvSampleTime'] :
                                stream[key]['lastRcvSampleTime']=ts_in
#            if(found_ts_match==0):
#                newAttrList.append((key,'none'))  #What is the correct zero fill approach?
            
#        log.debug("timestamp list = =%s", newAttrList)

        return(newAttrList)
    
    
    
    def round_timestamps(self, attrs):
        """
        """

        new_attrs = {}

        for attr_id, attr_vals in attrs.iteritems():

            new_list = list();
            
            for v, ts in attr_vals:
                 new_ts = round(ts,0)
                 new_list.append((v,new_ts))

            new_attrs[attr_id]=new_list
            
        return(new_attrs)

        

    
    def convertAttrsToIon(self, stream, attrs):
        """
        """
  
        attrs_return = []
        
        #convert back to ION parameter name and scale from OMS to ION            
        for key,v in attrs:
            scaleFactor = stream[key]['scale_factor']
            if v == 'none':
                attrs_return.append((stream[key]['ion_parameter_name'],'none'))
            else:
                attrs_return.append((stream[key]['ion_parameter_name'],v*scaleFactor))
            
 #       log.debug("Back to ION=%s", attrs_return)

        return attrs_return


    def _verify_platform_id_in_response(self, response):
        """
        Verifies the presence of my platform_id in the response.

        @param response Dictionary returned by _rsn_oms

        @retval response[self._platform_id]
        """
        if not self._platform_id in response:
            msg = "unexpected: response does not contain entry for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)

        if response[self._platform_id] == InvalidResponse.PLATFORM_ID:
            msg = "response reports invalid platform_id for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)
        else:
            return response[self._platform_id]

   

    def set_overcurrent_limit(self,port_id, milliamps, microseconds,src):
        """
        """
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot set_overcurrent_limit: _rsn_oms object required (created via connect() call)")

        if port_id not in self.nodeCfg.node_port_info :
               raise PlatformConnectionException("Cannot set_overcurrent_limit: Invalid Port ID")

        oms_port_cntl_id = self.nodeCfg.node_port_info[port_id]['port_oms_port_cntl_id']

      
        # ok, now make the request to RSN OMS:
        try:
            retval = self._rsn_oms.port.set_over_current(self._platform_id,oms_port_cntl_id,int(milliamps),int(microseconds),src)
                                                                     
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot set_overcurrent_limit: %s" % str(e))

        log.debug("set_overcurrent_limit = %s", retval)

        dic_plat = self._verify_platform_id_in_response(response)
        self._verify_port_id_in_response(oms_port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform



  
    
    def turn_on_port(self, port_id,src):
         
         
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot turn_on_port: _rsn_oms object required (created via connect() call)")

        if port_id not in self.nodeCfg.node_port_info :
               raise PlatformConnectionException("Cannot turn_on_port: Invalid Port ID")

       
        oms_port_cntl_id = self.nodeCfg.node_port_info[port_id]['port_oms_port_cntl_id']
        
        log.debug("%r: turning on port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id,oms_port_cntl_id)
 
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot turn_on_platform_port: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.port.turn_on_platform_port(self._platform_id,
                                                                oms_port_cntl_id,src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot turn_on_platform_port: %s" % str(e))

        log.debug("%r: turn_on_platform_port response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)


        return dic_plat  # note: return the dic for the platform

    def turn_off_port(self, port_id,src):

        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot turn_off_port: _rsn_oms object required (created via connect() call)")

        if port_id not in self.nodeCfg.node_port_info :
               raise PlatformConnectionException("Cannot turn_off_port: Invalid Port ID")

       
        oms_port_cntl_id = self.nodeCfg.node_port_info[port_id]['port_oms_port_cntl_id']
 

        log.debug("%r: turning off port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id,oms_port_cntl_id)


        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot turn_off_platform_port: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.port.turn_off_platform_port(self._platform_id,
                                                                 oms_port_cntl_id,src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot turn_off_platform_port: %s" % str(e))

        log.debug("%r: turn_off_platform_port response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)

        return dic_plat  # note: return the dic for the platform
    
    def start_profiler_mission(self, mission_name,src):
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot start_profiler_mission: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.profiler.start_mission(self._platform_id,
                                                                mission_name,src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot start_profiler_mission: %s" % str(e))

        log.debug("%r: start_profiler_mission response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        # TODO commented
        #self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform

    def stop_profiler_mission(self,flag,src):
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot stop_profiler_mission: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.profiler.stop_mission(self._platform_id,flag,src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot stop_profiler_mission: %s" % str(e))

        log.debug("%r: stop_profiler_mission response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        # TODO commented
        #self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform

    def get_mission_status(self):
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot get_mission_status: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.profiler.get_mission_status(self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot get_mission_status: %s" % str(e))

        log.debug("%r: get_mission_status response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        # TODO commented
        #self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform
    
    def get_available_missions(self):
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot get_available_missions: _rsn_oms object required (created via connect() call)")

        try:
            response = self._rsn_oms.profiler.get_available_missions(self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot get_available_missions: %s" % str(e))

        log.debug("%r: get_available_missions response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        # TODO commented
        #self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform



    ###############################################
    # External event handling:

    def _register_event_listener(self, url):
        """
        Registers given url for all event types.
        """
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot _register_event_listener: _rsn_oms object required (created via connect() call)")

        try:
            already_registered = self._rsn_oms.event.get_registered_event_listeners()
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot get registered event listeners: %s" % (self._platform_id, e))

        if url in already_registered:
            log.debug("listener %r was already registered", url)
            return

        try:
            result = self._rsn_oms.event.register_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot register_event_listener: %s" % (self._platform_id, e))

        log.debug("%r: register_event_listener(%r) => %s", self._platform_id, url, result)

    def _unregister_event_listener(self, url):
        """
        Unregisters given url for all event types.
        """
        if self._rsn_oms is None:
            raise PlatformConnectionException("Cannot _unregister_event_listener: _rsn_oms object required (created via connect() call)")

        try:
            result = self._rsn_oms.event.unregister_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot unregister_event_listener: %s" % (self._platform_id, e))

        log.debug("%r: unregister_event_listener(%r) => %s", self._platform_id, url, result)

    def _start_event_dispatch(self):
        """
        Registers the event listener by using a URL that is composed from
        CFG.server.oms.host, CFG.server.oms.port, and CFG.server.oms.path.

        NOTE: the same listener URL will be registered by multiple RSN platform
        drivers. See other related notes in this file.

        @see https://jira.oceanobservatories.org/tasks/browse/OOIION-1287
        @see https://jira.oceanobservatories.org/tasks/browse/OOIION-968
        """

        # gateway host and port to compose URL:
        # TODO commented
        # host = CFG.get_safe('server.oms.host', "localhost")
        # port = CFG.get_safe('server.oms.port', "5000")
        # path = CFG.get_safe('server.oms.path', "/ion-service/oms_event")

        #the above are defined in pyon.cfg
        #we will override local host for debugging inside the VM
        host = "10.208.79.19"
        # TODO commented
        # self.listener_url = "http://%s:%s%s" % (host, port, path)
        # self._register_event_listener(self.listener_url)

        return "OK"

    def _stop_event_dispatch(self):
        """
        Stops the dispatch of events received from the platform network.

        NOTE: Nothing is actually done here: since the same listener URL
        is registered by multiple RSN platform drivers, we avoid unregistering
        it here because it might affect other drivers still depending on the
        events being notified.

        @see https://jira.oceanobservatories.org/tasks/browse/OOIION-968
        """

        log.debug("%r: Not unregistering listener URL to avoid affecting "
                  "other RSN platform drivers", self._platform_id)

        # unregister listener:
        #self._unregister_event_listener(self.listener_url)
        # NOTE: NO, DON'T unregister: other drivers might still be depending
        # on the listener being registered.

        return "OK"

   


    ##############################################################
    # GET
    ##############################################################

    def get(self, *args, **kwargs):

        if 'attrs' in kwargs:
            attrs = kwargs['attrs']
            result = self.get_attribute_values(attrs)
            return result


        if 'metadata' in kwargs:
            result = self.get_metadata()
            return result

        return super(RSNPlatformDriver, self).get(*args, **kwargs)
    
    
    
    

    ##############################################################
    # EXECUTE
    ##############################################################

    def execute(self, cmd, *args, **kwargs):
        """
        Executes the given command.

        @param cmd   command

        @return  result of the execution
        """


        if cmd == RSNPlatformDriverEvent.TURN_ON_PORT:
            result = self.turn_on_port(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.TURN_OFF_PORT:
            result = self.turn_off_port(*args, **kwargs)
            
        elif cmd == RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS:
            result = self.set_port_over_current_limits(*args, **kwargs)
          
        elif cmd == RSNPlatformDriverEvent.START_PROFILER_MISSION:
            result = self.start_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.STOP_PROFILER_MISSION:
            result = self.stop_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_MISSION_STATUS:
            result = self.get_mission_status(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS:
            result = self.get_available_missions(*args, **kwargs)

        else:
            result = super(RSNPlatformDriver, self).execute(cmd, args, kwargs)

        return result

    def _get_ports(self):
        log.debug("%r: _get_ports: %s", self._platform_id, self.nodeCfg.node_port_list)
        return self.nodeCfg.node_port_list

    
    
    def _handler_connected_start_profiler_mission(self, *args, **kwargs):
        """
        """
        
#        profile_mission_name = kwargs.get('profile_mission_name', None)
        profile_mission_name = kwargs.get('profile_mission_name', 'Test_Profile_Mission_Name')
        if profile_mission_name is None :
            raise InstrumentException('start_profiler_mission: missing profile_mission_name argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')



        try:
            result = self.start_profiler_mission(profile_mission_name,src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.START_PROFILER_MISSION,
                                         args, kwargs, e)
            
            
    def _handler_connected_stop_profiler_mission(self, *args, **kwargs):
        """
        """
        
        flag = kwargs.get('flag', None)
        if milliamps is None:
            raise InstrumentException('_handler_connected_stop_profiler_mission: missing flag argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')

        
        
        try:
            result = self.stop_profiler_mission()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.STOP_PROFILER_MISSION,
                                         args, kwargs, e)   
            
    def _handler_connected_get_mission_status(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_mission_status()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_MISSION_STATUS,
                                         args, kwargs, e)  
             
    def _handler_connected_get_available_missions(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_available_missions()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS,
                                         args, kwargs, e)   

            
            
    def _handler_connected_get_eng_data(self, *args, **kwargs):
        """
        """

        try:
            result = self.get_eng_data()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_ENG_DATA,
                                         args, kwargs, e)
            
    
    
    def _handler_connected_set_port_over_current_limits(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('set_port_over_current_limits: missing port_id argument')

        milliamps = kwargs.get('milliamps', None)
        if milliamps is None:
            raise InstrumentException('set_port_over_current_limits: missing milliamps argument')

        microseconds = kwargs.get('microseconds', None)
        if milliamps is None:
            raise InstrumentException('set_port_over_current_limits: missing microseconds argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')


        try:
            result = self.set_port_over_current_limits(port_id,milliamps,microseconds,src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS,
                                         args, kwargs, e)
    

    def _handler_connected_turn_on_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing src argument')


        try:
            result = self.turn_on_port(port_id,src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_ON_PORT,
                                         args, kwargs, e)

    def _handler_connected_turn_off_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('turn_off_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing src argument')



        try:
            result = self.turn_off_port(port_id)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT,
                                         args, kwargs, e)

    
    ##############################################################
    # RSN Platform driver FSM setup
    ##############################################################

    def _construct_fsm(self,
                       states=RSNPlatformDriverState,
                       events=RSNPlatformDriverEvent,
                       enter_event=RSNPlatformDriverEvent.ENTER,
                       exit_event=RSNPlatformDriverEvent.EXIT):
        """
        """
        super(RSNPlatformDriver, self)._construct_fsm(states, events,
                                                      enter_event, exit_event)

        # CONNECTED state event handlers we add in this class:
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.TURN_ON_PORT, self._handler_connected_turn_on_port)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS, self._handler_connected_set_port_over_current_limits)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.TURN_OFF_PORT, self._handler_connected_turn_off_port)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.START_PROFILER_MISSION, self._handler_connected_start_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.STOP_PROFILER_MISSION, self._handler_connected_stop_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_MISSION_STATUS, self._handler_connected_get_mission_status)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS, self._handler_connected_get_available_missions)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_ENG_DATA, self._handler_connected_get_eng_data)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, ScheduledJob.ACQUIRE_SAMPLE, self._handler_connected_get_eng_data)
Пример #24
0
class RSNPlatformDriver(PlatformDriver):
    """
    The main RSN OMS platform driver class.
    """

    def __init__(self, event_callback, refdes=None):
        """
        Creates an RSNPlatformDriver instance.
        @param event_callback  Listener of events generated by this driver
        """
        PlatformDriver.__init__(self, event_callback)

        # CIOMSClient instance created by connect() and destroyed by disconnect():
        self._rsn_oms = None

        # URL for the event listener registration/unregistration (based on
        # web server launched by ServiceGatewayService, since that's the
        # service in charge of receiving/relaying the OMS events).
        # NOTE: (as proposed long ago), this kind of functionality should
        # actually be provided by some component more in charge of the RSN
        # platform netwokr as a whole -- as opposed to platform-specific).
        self.listener_url = None

        # scheduler config is a bit redundant now, but if we ever want to
        # re-initialize a scheduler we will need it.
        self._scheduler = None
        self._last_sample_time = {}

    def _filter_capabilities(self, events):
        """
        """
        events_out = [x for x in events if RSNPlatformDriverCapability.has(x)]
        return events_out

    def validate_driver_configuration(self, driver_config):
        """
        Driver config must include 'oms_uri' entry.
        """
        if 'oms_uri' not in driver_config:
            log.error("'oms_uri' not present in driver_config = %r", driver_config)
            raise PlatformDriverException(msg="driver_config does not indicate 'oms_uri'")

    def _configure(self, driver_config):
        """
        Nothing special done here, only calls super.configure(driver_config)

        @param driver_config with required 'oms_uri' entry.
        """
        PlatformDriver._configure(self, driver_config)

        self.nodeCfg = NodeConfiguration()

        self._platform_id = driver_config['node_id']
        self.nodeCfg.openNode(self._platform_id, driver_config['driver_config_file']['node_cfg_file'])

        self.nms_source = self.nodeCfg.node_meta_data['nms_source']

        self.oms_sample_rate = self.nodeCfg.node_meta_data['oms_sample_rate']

        self.nodeCfg.Print()

        self._construct_resource_schema()

    def _build_scheduler(self):
        """
        Build a scheduler for periodic status updates
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()

        def event_callback(event):
            log.debug("driver job triggered, raise event: %s" % event)
            self._fsm.on_event(event)

        # Dynamically create the method and add it
        method = partial(event_callback, RSNPlatformDriverEvent.GET_ENG_DATA)

        self._job = self._scheduler.add_interval_job(method, seconds=self.oms_sample_rate)

    def _delete_scheduler(self):
        """
        Remove the autosample schedule.
        """
        try:
            self._scheduler.unschedule_job(self._job)
        except KeyError:
            log.debug('Failed to remove scheduled job for ACQUIRE_SAMPLE')

        self._scheduler.shutdown()

    def _construct_resource_schema(self):
        """
        """
        parameters = deepcopy(self._param_dict)

        for k, v in parameters.iteritems():
            read_write = v.get('read_write', None)
            if read_write == 'write':
                v['visibility'] = 'READ_WRITE'
            else:
                v['visibility'] = 'READ_ONLY'

        commands = {RSNPlatformDriverEvent.TURN_ON_PORT: {
            "display_name": "Port Power On",
            "description": "Activate port power.",
            "args": [],
            "kwargs": {
                'port_id': {
                    "required": True,
                    "type": "string",
                }
            }

        }, RSNPlatformDriverEvent.TURN_OFF_PORT: {
            "display_name": "Port Power Off",
            "description": "Deactivate port power.",
            "args": [],
            "kwargs": {
                'port_id': {
                    "required": True,
                    "type": "string",
                }
            }
        }}

        self._resource_schema['parameters'] = parameters
        self._resource_schema['commands'] = commands

    def _ping(self):
        """
        Verifies communication with external platform returning "PONG" if
        this verification completes OK.

        @retval "PONG" iff all OK.
        @raise PlatformConnectionException Cannot ping external platform or
               got unexpected response.
        """
        log.debug("%r: pinging OMS...", self._platform_id)
        self._verify_rsn_oms('_ping')

        try:
            retval = self._rsn_oms.hello.ping()
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot ping: %s" % str(e))

        if retval is None or retval.upper() != "PONG":
            raise PlatformConnectionException(msg="Unexpected ping response: %r" % retval)

        log.debug("%r: ping completed: response: %s", self._platform_id, retval)

        return "PONG"

    def _connect(self, recursion=None):
        """
        Creates an CIOMSClient instance, does a ping to verify connection,
        and starts event dispatch.
        """
        # create CIOMSClient:
        oms_uri = self._driver_config['oms_uri']
        log.debug("%r: creating CIOMSClient instance with oms_uri=%r",
                  self._platform_id, oms_uri)
        self._rsn_oms = CIOMSClientFactory.create_instance(oms_uri)
        log.debug("%r: CIOMSClient instance created: %s",
                  self._platform_id, self._rsn_oms)

        # ping to verify connection:
        self._ping()
        self._build_scheduler()  # then start calling it every X seconds

    def _disconnect(self, recursion=None):
        CIOMSClientFactory.destroy_instance(self._rsn_oms)
        self._rsn_oms = None
        log.debug("%r: CIOMSClient instance destroyed", self._platform_id)

        self._delete_scheduler()
        self._scheduler = None

    def get_metadata(self):
        """
        """
        return self.nodeCfg.node_meta_data

    def get_eng_data(self):
        ntp_time = ntplib.system_to_ntp_time(time.time())
        max_time = ntp_time - self.oms_sample_rate * 10

        for key, stream in self.nodeCfg.node_streams.iteritems():
            log.debug("%r Stream(%s)", self._platform_id, key)
            # prevent the max lookback time getting to big if we stop getting data for some reason
            self._last_sample_time[key] = max(self._last_sample_time.get(key, max_time), max_time)

            for instance in stream:
                self.get_instance_particles(key, instance, stream[instance])

    def group_by_timestamp(self, attr_dict):
        return_dict = {}
        # go through all of the returned values and get the unique timestamps. Each
        # particle will have data for a unique timestamp
        for attr_id, attr_vals in attr_dict.iteritems():
            for value, timestamp in attr_vals:
                return_dict.setdefault(timestamp, []).append((attr_id, value))

        return return_dict

    def get_instance_particles(self, stream_name, instance, stream_def):
        # add a little bit of time to the last received so we don't get one we already have again
        attrs = [(k, self._last_sample_time[stream_name] + 0.1) for k in stream_def]

        if not attrs:
            return

        attr_dict = self.get_attribute_values_from_oms(attrs)  # go get the data from the OMS
        ts_attr_dict = self.group_by_timestamp(attr_dict)

        if not ts_attr_dict:
            return

        self._last_sample_time[stream_name] = max(ts_attr_dict.keys())

        for timestamp in ts_attr_dict:
            attrs = ts_attr_dict[timestamp]
            attrs = self.convert_attrs_to_ion(stream_def, attrs)
            particle = PlatformParticle(attrs, preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
            particle.set_internal_timestamp(timestamp)
            particle._data_particle_type = stream_name

            event = {
                'type': DriverAsyncEvent.SAMPLE,
                'value': particle.generate(),
                'time': time.time(),
                'instance': '%s-%s' % (self.nodeCfg.node_meta_data['reference_designator'], instance),
            }

            self._send_event(event)

    def get_attribute_values(self, attrs):
        """Simple wrapper method for compatibility.
        """
        return self.get_attribute_values_from_oms(attrs)

    def get_attribute_values_from_oms(self, attrs):
        """
        Fetch values from the OMS
        """
        if not isinstance(attrs, (list, tuple)):
            msg = 'get_attribute_values: attrs argument must be a list [(attrName, from_time), ...]. Given: %s' % attrs
            raise PlatformException(msg)

        self._verify_rsn_oms('get_attribute_values_from_oms')
        response = None

        try:
            response = self._rsn_oms.attr.get_platform_attribute_values(self._platform_id, attrs)
            response = self._verify_platform_id_in_response(response)
            return_dict = {}
            for key in response:
                value_list = response[key]
                if value_list == 'INVALID_ATTRIBUTE_ID':
                    continue

                if not isinstance(value_list, list):
                    raise PlatformException(msg="Error in getting values for attribute %s.  %s" % (key, value_list))
                if value_list and value_list[0][0] == "ERROR_DATA_REQUEST_TOO_FAR_IN_PAST":
                        raise PlatformException(msg="Time requested for %s too far in the past" % key)
                return_dict[key] = value_list
            return return_dict

        except Exception as e:
            msg = "get_attribute_values_from_oms Cannot get_platform_attribute_values: %s" % e
            raise PlatformConnectionException(msg)
        except AttributeError:
            msg = "Error returned in requesting attributes: %s" % response
            raise PlatformException(msg)

    def convert_attrs_to_ion(self, stream, attrs):
        attrs_return = []

        # convert back to ION parameter name and scale from OMS to ION
        for key, v in attrs:
            scale_factor = stream[key]['scale_factor']
            v = v * scale_factor if v else v
            attrs_return.append((stream[key]['ion_parameter_name'], v))

        return attrs_return

    def _verify_platform_id_in_response(self, response):
        """
        Verifies the presence of my platform_id in the response.

        @param response Dictionary returned by _rsn_oms

        @retval response[self._platform_id]
        """
        if self._platform_id not in response:
            msg = "unexpected: response does not contain entry for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)

        if response[self._platform_id] == InvalidResponse.PLATFORM_ID:
            msg = "response reports invalid platform_id for %r" % self._platform_id
            log.error(msg)
            raise PlatformException(msg=msg)
        else:
            return response[self._platform_id]

    def set_overcurrent_limit(self, port_id, milliamps, microseconds, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(msg="Error in setting overcurrent for port %s: %s" % (port_id, message))
            except KeyError:
                raise PlatformException(msg="Error in response: %s" % rsp)

        self._verify_rsn_oms('set_overcurrent_limit')
        oms_port_cntl_id = self._verify_and_return_oms_port(port_id, 'set_overcurrent_limit')

        try:
            response = self._rsn_oms.port.set_over_current(self._platform_id, oms_port_cntl_id, int(milliamps),
                                                           int(microseconds), src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot set_overcurrent_limit: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(port_id, oms_port_cntl_id, response)
        log.debug("set_overcurrent_limit = %s", response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def turn_on_port(self, port_id, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(msg="Error in turning on port %s: %s" % (port_id, message))
            except KeyError:
                raise PlatformException(msg="Error in turn on port response: %s" % rsp)

        self._verify_rsn_oms('turn_on_port')
        oms_port_cntl_id = self._verify_and_return_oms_port(port_id, 'turn_on_port')

        log.debug("%r: turning on port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id, oms_port_cntl_id)

        try:
            response = self._rsn_oms.port.turn_on_platform_port(self._platform_id,
                                                                oms_port_cntl_id, src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot turn_on_platform_port: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(port_id, oms_port_cntl_id, response)
        log.debug("%r: turn_on_platform_port response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def turn_off_port(self, port_id, src):
        def _verify_response(rsp):
            try:
                message = rsp[port_id]

                if not message.startswith('OK'):
                    raise PlatformException(msg="Error in turning off port %s: %s" % (port_id, message))
            except KeyError:
                raise PlatformException(msg="Error in turn off port response: %s" % rsp)

        self._verify_rsn_oms('turn_off_port')
        oms_port_cntl_id = self._verify_and_return_oms_port(port_id, 'turn_off_port')

        log.debug("%r: turning off port: port_id=%s oms port_id = %s",
                  self._platform_id, port_id, oms_port_cntl_id)

        try:
            response = self._rsn_oms.port.turn_off_platform_port(self._platform_id,
                                                                 oms_port_cntl_id, src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot turn_off_platform_port: %s" % str(e))

        response = self._convert_port_id_from_oms_to_ci(port_id, oms_port_cntl_id, response)
        log.debug("%r: turn_off_platform_port response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def start_profiler_mission(self, mission_name, src):
        def _verify_response(rsp):
            try:
                message = rsp[mission_name]

                if not message.startswith('OK'):
                    raise PlatformException(msg="Error in starting mission %s: %s" % (mission_name, message))
            except KeyError:
                raise PlatformException(msg="Error in starting mission response: %s" % rsp)

        self._verify_rsn_oms('start_profiler_mission')

        try:
            response = self._rsn_oms.profiler.start_mission(self._platform_id,
                                                            mission_name, src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot start_profiler_mission: %s" % str(e))

        log.debug("%r: start_profiler_mission response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def stop_profiler_mission(self, flag, src):
        def _verify_response(rsp):
            if not rsp.startswith('OK'):
                raise PlatformException(msg="Error in stopping profiler: %s" % rsp)

        self._verify_rsn_oms('stop_profiler_mission')

        try:
            response = self._rsn_oms.profiler.stop_mission(self._platform_id, flag, src)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot stop_profiler_mission: %s" % str(e))

        log.debug("%r: stop_profiler_mission response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        _verify_response(dic_plat)

        return dic_plat  # note: return the dic for the platform

    def get_mission_status(self, *args, **kwargs):
        self._verify_rsn_oms('get_mission_status')

        try:
            response = self._rsn_oms.profiler.get_mission_status(self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot get_mission_status: %s" % str(e))

        log.debug("%r: get_mission_status response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)

        return dic_plat  # note: return the dic for the platform

    def get_available_missions(self, *args, **kwargs):
        self._verify_rsn_oms('get_available_missions')

        try:
            response = self._rsn_oms.profiler.get_available_missions(self._platform_id)
        except Exception as e:
            raise PlatformConnectionException(msg="Cannot get_available_missions: %s" % str(e))

        log.debug("%r: get_available_missions response: %s",
                  self._platform_id, response)

        dic_plat = self._verify_platform_id_in_response(response)

        return dic_plat  # note: return the dic for the platform

    def _verify_rsn_oms(self, method_name):
        if self._rsn_oms is None:
            raise PlatformConnectionException(
                "Cannot %s: _rsn_oms object required (created via connect() call)" % method_name)

    def _verify_and_return_oms_port(self, port_id, method_name):
        if port_id not in self.nodeCfg.node_port_info:
            raise PlatformConnectionException("Cannot %s: Invalid Port ID" % method_name)

        return self.nodeCfg.node_port_info[port_id]['port_oms_port_cntl_id']

    def _convert_port_id_from_oms_to_ci(self, port_id, oms_port_cntl_id, response):
        """
        Converts the OMS port id into the original one provided.
        """
        if response[self._platform_id].get(oms_port_cntl_id, None):
            return {self._platform_id: {port_id: response[self._platform_id].get(oms_port_cntl_id, None)}}

        return response

    ###############################################
    # External event handling:

    def _register_event_listener(self, url):
        """
        Registers given url for all event types.
        """
        self._verify_rsn_oms('_register_event_listener')

        try:
            already_registered = self._rsn_oms.event.get_registered_event_listeners()
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot get registered event listeners: %s" % (self._platform_id, e))

        if url in already_registered:
            log.debug("listener %r was already registered", url)
            return

        try:
            result = self._rsn_oms.event.register_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot register_event_listener: %s" % (self._platform_id, e))

        log.debug("%r: register_event_listener(%r) => %s", self._platform_id, url, result)

    def _unregister_event_listener(self, url):
        """
        Unregisters given url for all event types.
        """
        self._verify_rsn_oms('_unregister_event_listener')

        try:
            result = self._rsn_oms.event.unregister_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot unregister_event_listener: %s" % (self._platform_id, e))

        log.debug("%r: unregister_event_listener(%r) => %s", self._platform_id, url, result)

    ##############################################################
    # GET
    ##############################################################

    def get(self, *args, **kwargs):

        if 'attrs' in kwargs:
            attrs = kwargs['attrs']
            result = self.get_attribute_values(attrs)
            return result

        if 'metadata' in kwargs:
            result = self.get_metadata()
            return result

        return super(RSNPlatformDriver, self).get(*args, **kwargs)

    ##############################################################
    # EXECUTE
    ##############################################################

    def execute(self, cmd, *args, **kwargs):
        """
        Executes the given command.

        @param cmd   command

        @return  result of the execution
        """
        if cmd == RSNPlatformDriverEvent.TURN_ON_PORT:
            result = self.turn_on_port(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.TURN_OFF_PORT:
            result = self.turn_off_port(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS:
            result = self.set_overcurrent_limit(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.START_PROFILER_MISSION:
            result = self.start_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.STOP_PROFILER_MISSION:
            result = self.stop_profiler_mission(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_MISSION_STATUS:
            result = self.get_mission_status(*args, **kwargs)

        elif cmd == RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS:
            result = self.get_available_missions(*args, **kwargs)

        else:
            result = super(RSNPlatformDriver, self).execute(cmd, args, kwargs)

        return result

    def _handler_connected_start_profiler_mission(self, *args, **kwargs):
        """
        """
        profile_mission_name = kwargs.get('profile_mission_name')
        if profile_mission_name is None:
            raise InstrumentException('start_profiler_mission: missing profile_mission_name argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')

        try:
            result = self.start_profiler_mission(profile_mission_name, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.START_PROFILER_MISSION,
                                         args, kwargs, e)

    def _handler_connected_stop_profiler_mission(self, *args, **kwargs):
        """
        """
        flag = kwargs.get('flag', None)
        if flag is None:
            raise InstrumentException('_handler_connected_stop_profiler_mission: missing flag argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')

        try:
            result = self.stop_profiler_mission(flag, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.STOP_PROFILER_MISSION,
                                         args, kwargs, e)

    def _handler_connected_get_mission_status(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_mission_status()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_MISSION_STATUS,
                                         args, kwargs, e)

    def _handler_connected_get_available_missions(self, *args, **kwargs):
        """
        """
        try:
            result = self.get_available_missions()
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS,
                                         args, kwargs, e)

    def _handler_connected_get_eng_data(self, *args, **kwargs):
        """
        """

        try:
            self.get_eng_data()
            return None, None

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.GET_ENG_DATA,
                                         args, kwargs, e)

    def _handler_connected_set_port_over_current_limits(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('set_port_over_current_limits: missing port_id argument')

        milliamps = kwargs.get('milliamps', None)
        if milliamps is None:
            raise InstrumentException('set_port_over_current_limits: missing milliamps argument')

        microseconds = kwargs.get('microseconds', None)
        if milliamps is None:
            raise InstrumentException('set_port_over_current_limits: missing microseconds argument')

        src = kwargs.get('src', None)
        if src is None:
            raise InstrumentException('set_port_over_current_limits: missing src argument')

        try:
            result = self.set_overcurrent_limit(port_id, milliamps, microseconds, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS,
                                         args, kwargs, e)

    def _handler_connected_turn_on_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_on_port: missing src argument')

        try:
            result = self.turn_on_port(port_id, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_ON_PORT,
                                         args, kwargs, e)

    def _handler_connected_turn_off_port(self, *args, **kwargs):
        """
        """
        port_id = kwargs.get('port_id', None)
        if port_id is None:
            raise InstrumentException('turn_off_port: missing port_id argument')

        src = kwargs.get('src', None)
        if port_id is None:
            raise InstrumentException('turn_off_port: missing src argument')

        try:
            result = self.turn_off_port(port_id, src)
            return None, result

        except PlatformConnectionException as e:
            return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT,
                                         args, kwargs, e)

    ##############################################################
    # RSN Platform driver FSM setup
    ##############################################################

    def _construct_fsm(self,
                       states=RSNPlatformDriverState,
                       events=RSNPlatformDriverEvent,
                       enter_event=RSNPlatformDriverEvent.ENTER,
                       exit_event=RSNPlatformDriverEvent.EXIT):
        """
        """
        super(RSNPlatformDriver, self)._construct_fsm(states, events,
                                                      enter_event, exit_event)

        # CONNECTED state event handlers we add in this class:
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.TURN_ON_PORT,
                              self._handler_connected_turn_on_port)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.SET_PORT_OVER_CURRENT_LIMITS,
                              self._handler_connected_set_port_over_current_limits)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.TURN_OFF_PORT,
                              self._handler_connected_turn_off_port)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.START_PROFILER_MISSION,
                              self._handler_connected_start_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.STOP_PROFILER_MISSION,
                              self._handler_connected_stop_profiler_mission)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_MISSION_STATUS,
                              self._handler_connected_get_mission_status)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_AVAILABLE_MISSIONS,
                              self._handler_connected_get_available_missions)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, RSNPlatformDriverEvent.GET_ENG_DATA,
                              self._handler_connected_get_eng_data)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, ScheduledJob.ACQUIRE_SAMPLE,
                              self._handler_connected_get_eng_data)
Пример #25
0
class TestScheduler(MiUnitTest):
    """
    Test the scheduler
    """    
    def setUp(self):
        """
        Setup the test case
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()
        self._triggered =[]

        self.assertTrue(self._scheduler.daemonic)

    def tearDown(self):
        self._scheduler.shutdown()

    def _callback(self):
        """
        event callback for event processing
        """
        log.debug("Event triggered.")
        self._triggered.append(datetime.datetime.now())

    def assert_datetime_close(self, ldate, rdate, delta_seconds=0.1):
        """
        compare two date time objects to see if they are equal within delta_seconds
        param: ldate left hand date
        param: rdate right hand date
        param: delta_seconds tolerance
        """
        delta = ldate - rdate
        seconds = timedelta_seconds(delta)
        self.assertLessEqual(abs(seconds), delta_seconds)

    def assert_event_triggered(self, expected_arrival = None, poll_time = 0.5, timeout = 10):
        """
        Verify a timer was triggered within the timeout, and if
        if expected arrival is set, check the date time arrived for a match
        too.
        @param expected_arival datetime object with time we expect the triggered event to fire
        @param poll_time time to sleep between arrival queue checks, also sets the precision of
                         expected_arrival
        @param timeout seconds to wait for an event
        """
        endtime = datetime.datetime.now() + datetime.timedelta(0,timeout)

        while(len(self._triggered) == 0 and datetime.datetime.now() < endtime):
            log.trace("Wait for event.")
            time.sleep(poll_time)

        log.debug("Out of test loop")
        self.assertGreater(len(self._triggered), 0)
        arrival_time = self._triggered.pop()
        self.assertIsNotNone(arrival_time)
        if(not expected_arrival == None):
            self.assert_datetime_close(arrival_time, expected_arrival, poll_time)

    def assert_event_not_triggered(self, poll_time = 0.5, timeout = 10):
        """
        Verify a timer was not triggered within the timeout
        @param poll_time time to sleep between arrival queue checks, also sets the precision of
                         expected_arrival
        @param timeout seconds to wait for an event
        """
        endtime = datetime.datetime.now() + datetime.timedelta(0,timeout)

        while(len(self._triggered) == 0 and datetime.datetime.now() < endtime):
            log.trace("Wait for event.")
            time.sleep(poll_time)

        log.debug("Out of test loop")
        self.assertEqual(len(self._triggered), 0)

    def test_absolute_time(self):
        """
        Test with absolute time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        dt = datetime.datetime.now() + datetime.timedelta(0,1)
        job = self._scheduler.add_date_job(self._callback, dt)
        self.assert_event_triggered(dt)

    def test_elapse_time(self):
        """
        Test with elapse time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        now = datetime.datetime.now()
        interval = PolledScheduler.interval(seconds=3)

        job = self._scheduler.add_interval_job(self._callback, seconds=3)
        self.assert_event_triggered(now + interval)
        self.assert_event_triggered(now + interval + interval)
        self.assert_event_triggered(now + interval + interval + interval)

        # Now shutdown the scheduler and verify we aren't firing events
        self._scheduler.shutdown()
        self._triggered = []
        self.assert_event_not_triggered()

    def test_cron_syntax(self):
        """
        Test with cron syntax.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        job = self._scheduler.add_cron_job(self._callback, second='*/3')
        self.assert_event_triggered()
        self.assert_event_triggered()

    @unittest.skip("TODO, fix this test.  Failing on buildbot not in dev")
    def test_polled_time(self):
        """
        Test a polled job with an interval.  Also test some exceptions
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval, max_interval)
        self.assertEqual(len(self._scheduler.get_jobs()), 1)
        self.assert_event_triggered(now+max_interval)

        # after a triggered event the min time should be extended.
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(1)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assert_event_triggered(now + min_interval + max_interval)

        # after a polled event the wait time should also be exited
        self.assert_event_triggered(now + min_interval + max_interval + max_interval)

        # Test exceptions. Job name doesn't exist
        with self.assertRaises(LookupError):
            self._scheduler.run_polled_job('foo')

        # Verify that an exception is raised if we try to add a job with the same name
        with self.assertRaises(ValueError):
            job = self._scheduler.add_polled_job(self._callback, test_name, min_interval, max_interval)

    def test_polled_time_no_interval(self):
        """
        Test the scheduler with a polled job with no interval
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval)

        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))

    def test_polled_time_no_interval_not_started(self):
        """
        Try to setup some jobs with the scheduler before the scheduler has been started.
        Then try to startup and see if the job is setup properly.
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        self._scheduler = PolledScheduler()
        self.assertFalse(self._scheduler.running)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name, min_interval)
        self.assertIsNotNone(job)
        self.assertEqual(len(self._scheduler.get_jobs()), 0)
        self.assertEqual(len(self._scheduler._pending_jobs), 1)

        self._scheduler.start()

        log.debug("JOBS: %s" % self._scheduler.get_jobs())
        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))


####################################################################################################
#  Test our new polled trigger
####################################################################################################
    def test_polled_interval_trigger(self):
        """
        test the trigger mechanism.
        """
        ###
        # Test all constructors and exceptions
        ###
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertIsNone(trigger.max_interval)
        self.assertIsNone(trigger.max_interval_length)
        self.assertIsInstance(trigger.next_min_date, datetime.datetime)
        self.assertIsNone(trigger.next_max_date)

        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3)
        )
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3),
            datetime.datetime.now()
        )
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        # Test Type Error Exception
        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger('boom')

        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                'boom'
            )

        # Test Value Error Exception
        with self.assertRaises(ValueError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                PolledScheduler.interval(seconds=1)
            )

        ###
        # Verify min and max dates are incremented correctly.
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now), now + max_interval)

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now), now + max_interval)

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        ###
        # Now do the same sequence, but with no max_interval
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = None

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

    def test_trigger_string(self):
        """
        test the str and repr methods
        """
        now = datetime.datetime.now()
        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3),
            now)

        self.assertEqual(str(trigger), "min_interval[0:00:01] max_interval[0:00:03]")
        self.assertEqual(repr(trigger), "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=datetime.timedelta(0, 3))>")

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1), None, now)
        self.assertEqual(str(trigger), "min_interval[0:00:01] max_interval[None]")
        self.assertEqual(repr(trigger), "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=None)>")

####################################################################################################
#  Test our new polled job
####################################################################################################
    def test_polled_job(self):
        """
        Test features of the specialized job class that we overloaded.
        """
        now = datetime.datetime.now()
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)
        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        job = PolledIntervalJob(trigger, self._callback, [], {}, 1, 1, name='test_job')
        self.assertIsNotNone(job)
        log.debug("H: %s" % repr(job))
        next_time = job.compute_next_run_time(now)
        self.assert_datetime_close(next_time, now + max_interval)
        self.assertEqual(job.name, 'test_job')

        self.assertTrue(job.ready_to_run())
        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)

        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(job.ready_to_run())

        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)
Пример #26
0
    def test_polled_interval_trigger(self):
        """
        test the trigger mechanism.
        """
        ###
        # Test all constructors and exceptions
        ###
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertIsNone(trigger.max_interval)
        self.assertIsNone(trigger.max_interval_length)
        self.assertIsInstance(trigger.next_min_date, datetime.datetime)
        self.assertIsNone(trigger.next_max_date)

        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3)
        )
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        trigger = PolledIntervalTrigger(
            PolledScheduler.interval(seconds=1),
            PolledScheduler.interval(seconds=3),
            datetime.datetime.now()
        )
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        # Test Type Error Exception
        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger('boom')

        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                'boom'
            )

        # Test Value Error Exception
        with self.assertRaises(ValueError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                PolledScheduler.interval(seconds=1)
            )

        ###
        # Verify min and max dates are incremented correctly.
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now), now + max_interval)

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now), now + max_interval)

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        ###
        # Now do the same sequence, but with no max_interval
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = None

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())
Пример #27
0
    def test_polled_interval_trigger(self):
        """
        test the trigger mechanism.
        """
        ###
        # Test all constructors and exceptions
        ###
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertIsNone(trigger.max_interval)
        self.assertIsNone(trigger.max_interval_length)
        self.assertIsInstance(trigger.next_min_date, datetime.datetime)
        self.assertIsNone(trigger.next_max_date)

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3),
                                        datetime.datetime.now())
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        # Test Type Error Exception
        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger('boom')

        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3), 'boom')

        # Test Value Error Exception
        with self.assertRaises(ValueError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                PolledScheduler.interval(seconds=1))

        ###
        # Verify min and max dates are incremented correctly.
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now),
                                   now + max_interval)

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now),
                                   now + max_interval)

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        ###
        # Now do the same sequence, but with no max_interval
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = None

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())
Пример #28
0
class TestScheduler(MiUnitTest):
    """
    Test the scheduler
    """
    def setUp(self):
        """
        Setup the test case
        """
        self._scheduler = PolledScheduler()
        self._scheduler.start()
        self._triggered = []

        self.assertTrue(self._scheduler.daemonic)

    def tearDown(self):
        self._scheduler.shutdown()

    def _callback(self):
        """
        event callback for event processing
        """
        log.debug("Event triggered.")
        self._triggered.append(datetime.datetime.now())

    def assert_datetime_close(self, ldate, rdate, delta_seconds=0.1):
        """
        compare two date time objects to see if they are equal within delta_seconds
        param: ldate left hand date
        param: rdate right hand date
        param: delta_seconds tolerance
        """
        delta = ldate - rdate
        seconds = timedelta_seconds(delta)
        self.assertLessEqual(abs(seconds), delta_seconds)

    def assert_event_triggered(self,
                               expected_arrival=None,
                               poll_time=0.5,
                               timeout=10):
        """
        Verify a timer was triggered within the timeout, and if
        if expected arrival is set, check the date time arrived for a match
        too.
        @param expected_arival datetime object with time we expect the triggered event to fire
        @param poll_time time to sleep between arrival queue checks, also sets the precision of
                         expected_arrival
        @param timeout seconds to wait for an event
        """
        endtime = datetime.datetime.now() + datetime.timedelta(0, timeout)

        while (len(self._triggered) == 0
               and datetime.datetime.now() < endtime):
            log.trace("Wait for event.")
            time.sleep(poll_time)

        log.debug("Out of test loop")
        self.assertGreater(len(self._triggered), 0)
        arrival_time = self._triggered.pop()
        self.assertIsNotNone(arrival_time)
        if (not expected_arrival == None):
            self.assert_datetime_close(arrival_time, expected_arrival,
                                       poll_time)

    def assert_event_not_triggered(self, poll_time=0.5, timeout=10):
        """
        Verify a timer was not triggered within the timeout
        @param poll_time time to sleep between arrival queue checks, also sets the precision of
                         expected_arrival
        @param timeout seconds to wait for an event
        """
        endtime = datetime.datetime.now() + datetime.timedelta(0, timeout)

        while (len(self._triggered) == 0
               and datetime.datetime.now() < endtime):
            log.trace("Wait for event.")
            time.sleep(poll_time)

        log.debug("Out of test loop")
        self.assertEqual(len(self._triggered), 0)

    def test_absolute_time(self):
        """
        Test with absolute time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        dt = datetime.datetime.now() + datetime.timedelta(0, 1)
        job = self._scheduler.add_date_job(self._callback, dt)
        self.assert_event_triggered(dt)

    def test_elapse_time(self):
        """
        Test with elapse time.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        now = datetime.datetime.now()
        interval = PolledScheduler.interval(seconds=3)

        job = self._scheduler.add_interval_job(self._callback, seconds=3)
        self.assert_event_triggered(now + interval)
        self.assert_event_triggered(now + interval + interval)
        self.assert_event_triggered(now + interval + interval + interval)

        # Now shutdown the scheduler and verify we aren't firing events
        self._scheduler.shutdown()
        self._triggered = []
        self.assert_event_not_triggered()

    def test_cron_syntax(self):
        """
        Test with cron syntax.  Not an exhaustive test because it's implemented in the library
        not our code.
        """
        job = self._scheduler.add_cron_job(self._callback, second='*/3')
        self.assert_event_triggered()
        self.assert_event_triggered()

    @unittest.skip("TODO, fix this test.  Failing on buildbot not in dev")
    def test_polled_time(self):
        """
        Test a polled job with an interval.  Also test some exceptions
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval, max_interval)
        self.assertEqual(len(self._scheduler.get_jobs()), 1)
        self.assert_event_triggered(now + max_interval)

        # after a triggered event the min time should be extended.
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(1)
        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assert_event_triggered(now + min_interval + max_interval)

        # after a polled event the wait time should also be exited
        self.assert_event_triggered(now + min_interval + max_interval +
                                    max_interval)

        # Test exceptions. Job name doesn't exist
        with self.assertRaises(LookupError):
            self._scheduler.run_polled_job('foo')

        # Verify that an exception is raised if we try to add a job with the same name
        with self.assertRaises(ValueError):
            job = self._scheduler.add_polled_job(self._callback, test_name,
                                                 min_interval, max_interval)

    @unittest.skip("TODO, fix this test.  Failing on buildbot not in dev")
    def test_polled_time_no_interval(self):
        """
        Test the scheduler with a polled job with no interval
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval)

        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))

    def test_polled_time_no_interval_not_started(self):
        """
        Try to setup some jobs with the scheduler before the scheduler has been started.
        Then try to startup and see if the job is setup properly.
        """
        now = datetime.datetime.now()
        test_name = 'test_job'
        min_interval = PolledScheduler.interval(seconds=1)

        self._scheduler = PolledScheduler()
        self.assertFalse(self._scheduler.running)

        # Verify that triggered events work.
        job = self._scheduler.add_polled_job(self._callback, test_name,
                                             min_interval)
        self.assertIsNotNone(job)
        self.assertEqual(len(self._scheduler.get_jobs()), 0)
        self.assertEqual(len(self._scheduler._pending_jobs), 1)

        self._scheduler.start()

        log.debug("JOBS: %s" % self._scheduler.get_jobs())
        self.assertEqual(len(self._scheduler.get_jobs()), 1)

        self.assertTrue(self._scheduler.run_polled_job(test_name))
        self.assertFalse(self._scheduler.run_polled_job(test_name))
        time.sleep(2)
        self.assertTrue(self._scheduler.run_polled_job(test_name))

####################################################################################################
#  Test our new polled trigger
####################################################################################################

    def test_polled_interval_trigger(self):
        """
        test the trigger mechanism.
        """
        ###
        # Test all constructors and exceptions
        ###
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertIsNone(trigger.max_interval)
        self.assertIsNone(trigger.max_interval_length)
        self.assertIsInstance(trigger.next_min_date, datetime.datetime)
        self.assertIsNone(trigger.next_max_date)

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3))
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3),
                                        datetime.datetime.now())
        self.assertEqual(trigger.min_interval_length, 1)
        self.assertEqual(trigger.max_interval_length, 3)

        # Test Type Error Exception
        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger('boom')

        with self.assertRaises(TypeError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3), 'boom')

        # Test Value Error Exception
        with self.assertRaises(ValueError):
            trigger = PolledIntervalTrigger(
                PolledScheduler.interval(seconds=3),
                PolledScheduler.interval(seconds=1))

        ###
        # Verify min and max dates are incremented correctly.
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now),
                                   now + max_interval)

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assert_datetime_close(trigger.next_max_date, now + max_interval)
        self.assert_datetime_close(trigger.get_next_fire_time(now),
                                   now + max_interval)

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        ###
        # Now do the same sequence, but with no max_interval
        ###
        now = datetime.datetime.now()
        log.debug("Now: %s" % now)
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = None

        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        # Initialized correctly?
        self.assert_datetime_close(trigger.next_min_date, now)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # First call should be successful, but second should not.
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

        self.assert_datetime_close(trigger.next_min_date, now + min_interval)
        self.assertIsNone(trigger.next_max_date)
        self.assertIsNone(trigger.get_next_fire_time(now))

        # Wait for the minimum interval and it should succeed again!
        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(trigger.pull_trigger())
        self.assertFalse(trigger.pull_trigger())

    def test_trigger_string(self):
        """
        test the str and repr methods
        """
        now = datetime.datetime.now()
        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        PolledScheduler.interval(seconds=3),
                                        now)

        self.assertEqual(str(trigger),
                         "min_interval[0:00:01] max_interval[0:00:03]")
        self.assertEqual(
            repr(trigger),
            "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=datetime.timedelta(0, 3))>"
        )

        trigger = PolledIntervalTrigger(PolledScheduler.interval(seconds=1),
                                        None, now)
        self.assertEqual(str(trigger),
                         "min_interval[0:00:01] max_interval[None]")
        self.assertEqual(
            repr(trigger),
            "<PolledIntervalTrigger (min_interval=datetime.timedelta(0, 1), max_interval=None)>"
        )

####################################################################################################
#  Test our new polled job
####################################################################################################

    def test_polled_job(self):
        """
        Test features of the specialized job class that we overloaded.
        """
        now = datetime.datetime.now()
        min_interval = PolledScheduler.interval(seconds=1)
        max_interval = PolledScheduler.interval(seconds=3)
        trigger = PolledIntervalTrigger(min_interval, max_interval, now)

        job = PolledIntervalJob(trigger,
                                self._callback, [], {},
                                1,
                                1,
                                name='test_job')
        self.assertIsNotNone(job)
        log.debug("H: %s" % repr(job))
        next_time = job.compute_next_run_time(now)
        self.assert_datetime_close(next_time, now + max_interval)
        self.assertEqual(job.name, 'test_job')

        self.assertTrue(job.ready_to_run())
        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)

        time.sleep(2)
        now = datetime.datetime.now()
        self.assertTrue(job.ready_to_run())

        next_time = job.compute_next_run_time(now)
        self.assertFalse(job.ready_to_run())
        self.assert_datetime_close(next_time, now + max_interval)