Exemple #1
0
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent',
                                        seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent',
            seconds=0.3, max_instances=2, max_runs=4, args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=0.3, max_instances=2, max_runs=4,
                                        args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Exemple #3
0
def main():

    log.info("BLI Monitor starting...")

    #check_pid()

    bli = BLIMonitor()

    spot = SpotMonitor()

    sched = Scheduler(daemonic=False)

    #sched.add_listener(err_listener, events.EVENT_ALL)

    sched.add_interval_job(lambda: bli.check(), seconds=3)

    sched.add_interval_job(lambda: spot.check(), seconds=3)

    sched.add_listener(
        err_listener, events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED
        | events.EVENT_JOB_MISSED)

    sched.start()

    log.info("started")
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
Exemple #4
0
def main():
    
    log.info( "BLI Monitor starting..." )
    
    #check_pid()
    
    bli = BLIMonitor()    
   
    spot = SpotMonitor()
    
    sched = Scheduler(daemonic = False)
    
    
    #sched.add_listener(err_listener, events.EVENT_ALL) 
    
    sched.add_interval_job(lambda:bli.check(), seconds=3)
    
    sched.add_interval_job(lambda:spot.check(), seconds=3)
    
    sched.add_listener(err_listener,  events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED| events.EVENT_JOB_MISSED)  
   
    sched.start()
    
    log.info( "started" )
    
    
    
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
Exemple #5
0
 def start_scheduler(self):
     global schedulerObj
     schedulerObj = Scheduler()
     schedulerObj.start()
     schedulerObj.add_listener(self.my_listener,
                               EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
     print 'Scheduler is started.'
def init_sched():
    global g_sched
    global g_old_appdata 
    g_old_appdata = []

    config = {'apscheduler.threadpool.core_threads':1,'apscheduler.threadpool.max_threads':1}
    g_sched = Scheduler(gconfig=config)
    g_sched.add_listener(listener_missed_job, apscheduler.events.EVENT_JOB_MISSED)

    g_sched.start()

    update_jobs()
    g_sched.add_cron_job(update_jobs, name='update_jobs', 
            minute='*', hour='*', day='*', month='*', day_of_week='*', year='*')
Exemple #7
0
def createTask( listTask ):
    """
    创建并开始文件提取任务
    """

    for dic in listTask:
        pfID = str(dic["pfid"])
        groupID = str(dic["groupid"])
        configPath = dic["configpath"]
        logSource = dic["logsource"]
        Protocol = dic["protocol"]
        Port = dic["port"]
        userName = dic["username"]
        userPass = dic["userpass"]
        fPath = dic["fpath"]
        Files = dic["files"]
        oneTime = dic["onetime"]
        schedStart = dic["schedstart"]
        schedEnd = dic["schedend"]
        schedTime = dic["schedtime"]
        schedCron = dic["schedcron"]

        argus = [ int(pfID), int(groupID), configPath, logSource, Protocol, Port, userName, userPass, fPath, Files ]
        
        sched = Scheduler()
        if oneTime.upper() == 'Y':  # 只执行一次
            if schedStart == None:
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 )  # 延时两秒
            sched.add_date_job( taskFunc, schedStart, name='Job'+pfID, args=argus )
        elif schedTime != None:
            ( sWeeks, sDays, sHours, sMinutes, sSeconds ) = scheduletime.fmtSchedTime( schedTime )
            if schedStart == None:    # interval_job 在start_date为None时默认从当前算起,过一个设定的时间间隔第一次执行任务
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 ) - datetime.timedelta( seconds = sSeconds, \
                        minutes = sMinutes, hours = sHours, days = sDays, weeks = sWeeks )
            sched.add_interval_job( taskFunc, weeks=sWeeks, days=sDays, hours=sHours, minutes=sMinutes, seconds=sSeconds, \
                    start_date=schedStart, name='Job'+pfID, args=argus )
        elif schedCron != None:
            ( cSecond, cMinute, cHour, cDay, cDayofWeek, cWeek, cMonth, cYear ) = scheduletime.frmSchedCron( schedCron )
            sched.add_cron_job( taskFunc, year=cYear, month=cMonth, week=cWeek, day_of_week=cDayofWeek, day=cDay, \
                    hour=cHour, minute=cMinute, second=cSecond, start_date=schedStart, name='Job'+pfID, args=argus )
        sched.add_listener( taskListener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR )

        # 保存计划任务的截止时间
        dicJob['T'+pfID] = oneTime.upper()
        dicJob['Job'+pfID] = schedEnd

        dicTask['Task'+pfID] = sched
        dicTask['Task'+pfID].start()
def createTask(listTask):
    """
    创建并开始计划任务
    """

    for dic in listTask:
        taskID = str(dic["schedid"])
        searchCond = dic["searchcond"].replace(DELIMITER, " ")
        searchStart = dic["searchstart"]
        searchEnd = dic["searchend"]
        schedStart = dic["schedstart"]
        schedEnd = dic["schedend"]
        schedTime = dic["schedtime"]
        schedCron = dic["schedcron"]
        warnOrNot = dic["warnornot"]
        warnCondOp = dic["warncondop"]
        warnCondVal = dic["warncondval"]
        warnLevel = dic["warnlevel"]
        saveResult = dic["saveresult"]

        argus = [
            int(taskID), searchCond, searchStart, searchEnd, warnOrNot,
            warnCondOp, warnCondVal, warnLevel, saveResult
        ]

        sched = Scheduler()
        if schedTime != None:
            (sWeeks, sDays, sHours, sMinutes,
             sSeconds) = scheduletime.fmtSchedTime(schedTime)
            if schedStart == None:  # interval_job 在start_date为None时默认从当前算起,过一个设定的时间间隔第一次执行任务
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 ) - datetime.timedelta( seconds = sSeconds, \
                        minutes = sMinutes, hours = sHours, days = sDays, weeks = sWeeks )
            sched.add_interval_job( taskFunc, weeks=sWeeks, days=sDays, hours=sHours, minutes=sMinutes, seconds=sSeconds, \
                    start_date=schedStart, name='Job'+taskID, args=argus )
        elif schedCron != None:
            (cSecond, cMinute, cHour, cDay, cDayofWeek, cWeek, cMonth,
             cYear) = scheduletime.frmSchedCron(schedCron)
            sched.add_cron_job( taskFunc, year=cYear, month=cMonth, week=cWeek, day_of_week=cDayofWeek, day=cDay, \
                    hour=cHour, minute=cMinute, second=cSecond, start_date=schedStart, name='Job'+taskID, args=argus )
        sched.add_listener(taskListener,
                           events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)

        # 保存计划任务的截止时间
        dicJob['Job' + taskID] = schedEnd

        dicTask['Task' + taskID] = sched
        dicTask['Task' + taskID].start()
Exemple #9
0
class SchedulerService(object):
    def __init__(self, config, task):
        self.config = config
        self.task = task
        self.task_thread = Scheduler()
        self.job = None

    def run(self):
        self.task.validate()
        self.task_thread.add_listener(self.reconfigure_interval,
                                      EVENT_JOB_EXECUTED)
        self.task_thread.add_listener(self.reconfigure_interval,
                                      EVENT_JOB_ERROR)
        self.task_thread.start()
        self.reconfigure_interval(None)

    def reconfigure_interval(self, event):
        if event:
            self.task_thread.unschedule_job(event.job)
        new_interval = self.task.get_new_interval()
        log.debug("=== interval for job:'" + str(self.task) + "' set to :'" +
                  str(new_interval) + "'===")
        self.job = self.task_thread.add_interval_job(self.task.do,
                                                     seconds=new_interval)
Exemple #10
0
def init_sched():
    global g_sched
    global g_old_appdata
    g_old_appdata = []

    config = {
        'apscheduler.threadpool.core_threads': 1,
        'apscheduler.threadpool.max_threads': 1
    }
    g_sched = Scheduler(gconfig=config)
    g_sched.add_listener(listener_missed_job,
                         apscheduler.events.EVENT_JOB_MISSED)

    g_sched.start()

    update_jobs()
    g_sched.add_cron_job(update_jobs,
                         name='update_jobs',
                         minute='*',
                         hour='*',
                         day='*',
                         month='*',
                         day_of_week='*',
                         year='*')
Exemple #11
0
class schedulerDaemon(object):
    
    def __init__(self):
                       
        #starting scheduler 
        self.sched = Scheduler()
        self.sched.start()
        self.sched.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)    
        self.recoverySchedulerDaemon()
        self.newEmulationList =[]
        #Logger.init()

    def listJobs(self):
        schedFileLogger.debug("-> listJobs(self)")

        schJobsFormat=self.sched.get_jobs()
        
        #!have to convert list of jobs from scheduler into list of strings to send over the Pyro 4.20 which has new "serpent serializer"
        strJobsList=[]
       
        if schJobsFormat:
            
            for job in self.sched.get_jobs():
                strJobsList.append(str(job))            
            
            schedFileLogger.debug("sending list of jobs")
            #[<Job (name=1-1-MEM_EMU-logger interval-3sec., trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 10))>)>, <Job (name=1-MEM_EMU-1-0-MEM_Distro-lookbusy-mem: 100 Duration: 60.0sec. End Time: 10:11:10, trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 10))>)>, <Job (name=1-MEM_EMU-1-1-MEM_Distro-lookbusy-mem: 225 Duration: 48.0sec. End Time: 10:11:04, trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 16))>)>, <Job (name=1-MEM_EMU-1-2-MEM_Distro-lookbusy-mem: 225 Duration: 36.0sec. End Time: 10:10:58, trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 22))>)>, <Job (name=1-MEM_EMU-1-3-MEM_Distro-lookbusy-mem: 225 Duration: 24.0sec. End Time: 10:10:52, trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 28))>)>, <Job (name=1-MEM_EMU-1-4-MEM_Distro-lookbusy-mem: 225 Duration: 12.0sec. End Time: 10:10:46, trigger=<SimpleTrigger (run_date=datetime.datetime(2014, 10, 10, 10, 10, 34))>)>]
            
            return strJobsList
        else:
            schedFileLogger.debug("No jobs to send")
            return []
    
        
       
    def stopSchedulerDaemon(self):
        schedFileLogger.debug("-> stopSchedulerDaemon(self)")
        schedFileLogger.info("stopping Daemon")
        sys.exit(1)   
        sys.exit(0) 
    
    def hello(self):
        schedFileLogger.debug("-> hello(self)") 
        greeting = "Pong!"
        schedFileLogger.debug(greeting)
        return greeting
    
    def deleteJobs(self,emulationID,distribitionName):
        schedFileLogger.debug("-> deleteJobs(self,emulationID,distribitionName)")
        #stringify
        emulationID =str(emulationID)
        distribitionName=str(distribitionName)
        
        schedFileLogger.debug("Looking for job name:"+emulationID+"-"+distribitionName)
        
        if emulationID=="all":
            schedFileLogger.info("Jobs deleted:")
            #setting emulation objects list to initial state
            self.newEmulationList =[]
            for job in self.sched.get_jobs():
                self.sched.unschedule_job(job)
                msg = {"Action":"Job Deleted","jobName":job.name}
                schedFileLogger.info(msg)
                #schedFileLogger.info(str(job.name))
                
        else:
            for job in self.sched.get_jobs():
                if distribitionName in job.name :
                    self.sched.unschedule_job(job)
                    
                    msg = {"Action":"Job Deleted","jobName":job.name}
                    schedFileLogger.info(msg)
                    
                    #schedFileLogger.info( "Job: "+job.name+" Deleted")
                    
                
                else:
                    schedFileLogger.info( "These jobs remain: "+job.name)
    
    #Adding current emulation object for further availability
    def setEmuObject(self,newEmulation):
        try:
            #prevent duplicate entries of emulations to be created
#            print "\nlen emuObject: ", str(len(self.newEmulationList))
            n=0
            if len(self.newEmulationList) >= 1:
                for emus in self.newEmulationList:
                    #print "emu compare:",emus.emulationID,newEmulation.emulationID
                    if emus.emulationID==newEmulation.emulationID:
                        #print "FOUND IT"
                        self.newEmulationList.pop(n)
                    n+=1
                self.newEmulationList.append(newEmulation)
            else:
                self.newEmulationList.append(newEmulation)
            
            return True
        except Exception,e:
            print e
            return False
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_add_job_by_reference(self):
        job = self.scheduler.add_date_job("copy:copy", datetime(2200, 7, 24))
        eq_(job.func, copy)
        eq_(job.func_ref, "copy:copy")

    def test_configure_jobstore(self):
        conf = {"apscheduler.jobstore.ramstore.class": "apscheduler.jobstores.ram_store:RAMJobStore"}
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore("ramstore")

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {"misfire_grace_time": "2", "daemonic": "false"}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {"apscheduler.misfire_grace_time": 2, "apscheduler.daemonic": False}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
class TestJobExecution(object):
    def setup(self):
        self.scheduler = Scheduler(threadpool=FakeThreadPool())
        self.scheduler.add_jobstore(RAMJobStore(), "default")

        # Make the scheduler think it's running
        self.scheduler._thread = FakeThread()

        self.logstream = StringIO()
        self.loghandler = StreamHandler(self.logstream)
        self.loghandler.setLevel(ERROR)
        scheduler.logger.addHandler(self.loghandler)

    def teardown(self):
        scheduler.logger.removeHandler(self.loghandler)
        if scheduler.datetime == FakeDateTime:
            scheduler.datetime = datetime
        FakeDateTime._now = original_now

    def test_job_name(self):
        def my_job():
            pass

        job = self.scheduler.add_interval_job(my_job, start_date=datetime(2010, 5, 19))
        eq_(
            repr(job),
            "<Job (name=my_job, trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), "
            "start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>",
        )

    def test_schedule_object(self):
        # Tests that any callable object is accepted (and not just functions)
        class A:
            def __init__(self):
                self.val = 0

            def __call__(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_schedule_method(self):
        # Tests that bound methods can be scheduled (at least with RAMJobStore)
        class A:
            def __init__(self):
                self.val = 0

            def method(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a.method, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_unschedule_job(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_cron_job(increment)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)
        self.scheduler.unschedule_job(job)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)

    def test_unschedule_func(self):
        def increment():
            vals[0] += 1

        def increment2():
            vals[0] += 1

        vals = [0]
        job1 = self.scheduler.add_cron_job(increment)
        job2 = self.scheduler.add_cron_job(increment2)
        job3 = self.scheduler.add_cron_job(increment)
        eq_(self.scheduler.get_jobs(), [job1, job2, job3])

        self.scheduler.unschedule_func(increment)
        eq_(self.scheduler.get_jobs(), [job2])

    @raises(KeyError)
    def test_unschedule_func_notfound(self):
        self.scheduler.unschedule_func(copy)

    def test_job_finished(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_interval_job(increment, max_runs=1)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [1])
        assert job not in self.scheduler.get_jobs()

    def test_job_exception(self):
        def failure():
            raise DummyException

        job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9))
        self.scheduler._process_jobs(job.next_run_time)
        assert "DummyException" in self.logstream.getvalue()

    def test_misfire_grace_time(self):
        self.scheduler.misfire_grace_time = 3
        job = self.scheduler.add_interval_job(lambda: None, seconds=1)
        eq_(job.misfire_grace_time, 3)

        job = self.scheduler.add_interval_job(lambda: None, seconds=1, misfire_grace_time=2)
        eq_(job.misfire_grace_time, 2)

    def test_coalesce_on(self):
        # Makes sure that the job is only executed once when it is scheduled
        # to be executed twice in a row
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(
            increment, seconds=1, start_date=FakeDateTime.now(), coalesce=True, misfire_grace_time=2
        )

        # Turn the clock 14 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 1)
        eq_(len(events), 1)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(vals, [1])

    def test_coalesce_off(self):
        # Makes sure that every scheduled run for the job is executed even
        # when they are in the past (but still within misfire_grace_time)
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(
            increment, seconds=1, start_date=FakeDateTime.now(), coalesce=False, misfire_grace_time=2
        )

        # Turn the clock 2 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 3)
        eq_(len(events), 3)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(events[1].code, EVENT_JOB_EXECUTED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(vals, [3])

    def test_interval(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_interval_job(increment, seconds=1, args=[2])
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [4, 2])

    def test_interval_schedule(self):
        @self.scheduler.interval_schedule(seconds=1)
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [2])

    def test_cron(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_cron_job(increment, args=[3])
        start = job.next_run_time
        self.scheduler._process_jobs(start)
        eq_(vals, [3, 1])
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [6, 2])
        self.scheduler._process_jobs(start + timedelta(seconds=2))
        eq_(vals, [9, 3])

    def test_cron_schedule_1(self):
        @self.scheduler.cron_schedule()
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals[0], 2)

    def test_cron_schedule_2(self):
        @self.scheduler.cron_schedule(minute="*")
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        next_run = start + timedelta(seconds=60)
        eq_(increment.job.get_run_times(next_run), [start, next_run])
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(next_run)
        eq_(vals[0], 2)

    def test_date(self):
        def append_val(value):
            vals.append(value)

        vals = []
        date = datetime.now() + timedelta(seconds=1)
        self.scheduler.add_date_job(append_val, date, kwargs={"value": "test"})
        self.scheduler._process_jobs(date)
        eq_(vals, ["test"])

    def test_print_jobs(self):
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = "Jobstore default:%s" "    No scheduled jobs%s" % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

        self.scheduler.add_date_job(copy, datetime(2200, 5, 19))
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = (
            "Jobstore default:%s    "
            "copy (trigger: date[2200-05-19 00:00:00], "
            "next run at: 2200-05-19 00:00:00)%s" % (os.linesep, os.linesep)
        )
        eq_(out.getvalue(), expected)

    def test_jobstore(self):
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")
        job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
        eq_(self.scheduler.get_jobs(), [job])
        self.scheduler.remove_jobstore("dummy")
        eq_(self.scheduler.get_jobs(), [])

    @raises(KeyError)
    def test_remove_nonexistent_jobstore(self):
        self.scheduler.remove_jobstore("dummy2")

    def test_job_next_run_time(self):
        # Tests against bug #5
        def increment():
            vars[0] += 1

        vars = [0]
        scheduler.datetime = FakeDateTime
        job = self.scheduler.add_interval_job(increment, seconds=1, misfire_grace_time=3, start_date=FakeDateTime.now())
        start = job.next_run_time

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vars, [2])
class MonitorScheduler:

    #logger = LoggerUtil.getLogger('MonitorScheduler')

    def __init__(self,events):
        self.scheduler = Scheduler(daemonic = False)
        self.scheduler.add_cron_job(self.monitorListener, second='*/3')
        self.events = events
        self.flag = True
        self.runflag = True
        if self.events !=None:
            self.flag = True
        self.monitor_tasks = {}

    def monitorListener(self):
         if self.flag == True:
             print "first monitorListener:"
             self.flag = False
             ConfigUtil().setEvents(self.events)
             self.monitor_tasks_start(self.events)
         else:
             events = CDSUtil.getEvents(CDSUtil)
             eventInfos = self.event_filter(events)
             if eventInfos == None or len(eventInfos) == 0:
                 return
             ConfigUtil().setEvents(events)
             self.monitor_tasks_start(eventInfos)

    def start(self):
        print('MonitorScheduler start ...')
        self.scheduler.start()

    def stop(self):
         self.scheduler._stopped()

    # Filtering duplicate events
    def event_filter(self,events):
        eventInfos = []
        if events == None or len(events) == 0:
            return None
        for event in events:
             if ConfigUtil().is_config_has_event(event) == False:
                 eventInfos.append(event)
        return eventInfos

    def monitor_task_start(self,monitorTask):
        monitorTask.start()

    def monitor_tasks_start(self,events):
         if events == None and len(events) == 0:
             return
         for event in events:
             monitorTask = self.monitor_tasks.get(str(event.monitorId)+'_'+str(event.dbConfig.db_info_id))
             print event.monitorId
             if event.eventType == None:
                 break
             if event.eventType == 'MONITOR_START':
                 if monitorTask:
                     monitorTask.stop()
                 print 'save start ...'
                 monitorTask = MonitorTaskProcess(event)
                 self.monitor_tasks[str(event.monitorId)+'_'+str(event.dbConfig.db_info_id)] = monitorTask
                 print(self.monitor_tasks)

                 try:
                    monitorTask.start()
                    print 'save end ...'
                 except Exception as e:
                     print(e)
             else:
                 if monitorTask:
                     monitorTask.stop()
                     monitorTask.terminate()
                     self.monitor_tasks[str(event.monitorId)+'_'+str(event.dbConfig.db_info_id)] = None


    def err_listener(self,cls,event):
        if event.exception:
            cls.logger.exception('%s error.', str(event.job))
        else:
            cls.logger.info('%s miss', str(event.job))

    def err_listener(self):
        self.scheduler.add_listener(self.err_listener, self.apscheduler.events.EVENT_JOB_ERROR | self.apscheduler.events.EVENT_JOB_MISSED)
Exemple #15
0
    sys.exit(0)

sched = Scheduler()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)


def event_handler(event):
    if event.exception:
        logger.error("SCHEDULER FAIL:\n%s\n%s\n%s" % (
            event.job, event.exception, traceback.format_exc()))
    else:
        logger.info("SUCCESS: %s" % event.job)

sched.add_listener(event_handler,
    events.EVENT_JOB_EXECUTED |
    events.EVENT_JOB_ERROR |
    events.EVENT_JOB_MISSED)


# ----------------------------------------------------------------------------
# Tasks

#sched.add_cron_job(send_digest, hour=11)

# ----------------------------------------------------------------------------

if __name__ == '__main__':
    sched.start()
    sched.print_jobs()
    while True:
        time.sleep(10)
Exemple #16
0
class TestJobExecution(object):
    def setup(self):
        self.scheduler = Scheduler(threadpool=FakeThreadPool())
        self.scheduler.add_jobstore(RAMJobStore(), 'default')

        # Make the scheduler think it's running
        self.scheduler._thread = FakeThread()

        self.logstream = StringIO()
        self.loghandler = StreamHandler(self.logstream)
        self.loghandler.setLevel(ERROR)
        scheduler.logger.addHandler(self.loghandler)

    def teardown(self):
        scheduler.logger.removeHandler(self.loghandler)
        if scheduler.datetime == FakeDateTime:
            scheduler.datetime = datetime
        FakeDateTime._now = original_now

    @raises(TypeError)
    def test_noncallable(self):
        date = datetime.now() + timedelta(days=1)
        self.scheduler.add_date_job('wontwork', date)

    def test_job_name(self):
        def my_job():
            pass

        job = self.scheduler.add_interval_job(my_job,
                                              start_date=datetime(2010, 5, 19))
        eq_(
            repr(job), '<Job (name=my_job, '
            'trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), '
            'start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>')

    def test_schedule_object(self):
        # Tests that any callable object is accepted (and not just functions)
        class A:
            def __init__(self):
                self.val = 0

            def __call__(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_schedule_method(self):
        # Tests that bound methods can be scheduled (at least with RAMJobStore)
        class A:
            def __init__(self):
                self.val = 0

            def method(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a.method, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_unschedule_job(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_cron_job(increment)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)
        self.scheduler.unschedule_job(job)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)

    def test_unschedule_func(self):
        def increment():
            vals[0] += 1

        def increment2():
            vals[0] += 1

        vals = [0]
        job1 = self.scheduler.add_cron_job(increment)
        job2 = self.scheduler.add_cron_job(increment2)
        job3 = self.scheduler.add_cron_job(increment)
        eq_(self.scheduler.get_jobs(), [job1, job2, job3])

        self.scheduler.unschedule_func(increment)
        eq_(self.scheduler.get_jobs(), [job2])

    @raises(KeyError)
    def test_unschedule_func_notfound(self):
        self.scheduler.unschedule_func(copy)

    def test_job_finished(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_interval_job(increment, max_runs=1)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [1])
        assert job not in self.scheduler.get_jobs()

    def test_job_exception(self):
        def failure():
            raise DummyException

        job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9))
        self.scheduler._process_jobs(job.next_run_time)
        assert 'DummyException' in self.logstream.getvalue()

    def test_misfire_grace_time(self):
        self.scheduler.misfire_grace_time = 3
        job = self.scheduler.add_interval_job(lambda: None, seconds=1)
        eq_(job.misfire_grace_time, 3)

        job = self.scheduler.add_interval_job(lambda: None,
                                              seconds=1,
                                              misfire_grace_time=2)
        eq_(job.misfire_grace_time, 2)

    def test_coalesce_on(self):
        # Makes sure that the job is only executed once when it is scheduled
        # to be executed twice in a row
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              start_date=FakeDateTime.now(),
                                              coalesce=True,
                                              misfire_grace_time=2)

        # Turn the clock 14 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 1)
        eq_(len(events), 1)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(vals, [1])

    def test_coalesce_off(self):
        # Makes sure that every scheduled run for the job is executed even
        # when they are in the past (but still within misfire_grace_time)
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              start_date=FakeDateTime.now(),
                                              coalesce=False,
                                              misfire_grace_time=2)

        # Turn the clock 2 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 3)
        eq_(len(events), 3)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(events[1].code, EVENT_JOB_EXECUTED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(vals, [3])

    def test_interval(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_interval_job(increment, seconds=1, args=[2])
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [4, 2])

    def test_interval_schedule(self):
        @self.scheduler.interval_schedule(seconds=1)
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [2])

    def test_cron(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_cron_job(increment, args=[3])
        start = job.next_run_time
        self.scheduler._process_jobs(start)
        eq_(vals, [3, 1])
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [6, 2])
        self.scheduler._process_jobs(start + timedelta(seconds=2))
        eq_(vals, [9, 3])

    def test_cron_schedule_1(self):
        @self.scheduler.cron_schedule()
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals[0], 2)

    def test_cron_schedule_2(self):
        @self.scheduler.cron_schedule(minute='*')
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        next_run = start + timedelta(seconds=60)
        eq_(increment.job.get_run_times(next_run), [start, next_run])
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(next_run)
        eq_(vals[0], 2)

    def test_date(self):
        def append_val(value):
            vals.append(value)

        vals = []
        date = datetime.now() + timedelta(seconds=1)
        self.scheduler.add_date_job(append_val, date, kwargs={'value': 'test'})
        self.scheduler._process_jobs(date)
        eq_(vals, ['test'])

    def test_print_jobs(self):
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = 'Jobstore default:%s'\
                   '    No scheduled jobs%s' % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

        self.scheduler.add_date_job(copy, datetime(2200, 5, 19))
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = 'Jobstore default:%s    '\
            'copy (trigger: date[2200-05-19 00:00:00], '\
            'next run at: 2200-05-19 00:00:00)%s' % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

    def test_jobstore(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        eq_(self.scheduler.get_jobs(), [job])
        self.scheduler.remove_jobstore('dummy')
        eq_(self.scheduler.get_jobs(), [])

    @raises(KeyError)
    def test_remove_nonexistent_jobstore(self):
        self.scheduler.remove_jobstore('dummy2')

    def test_job_next_run_time(self):
        # Tests against bug #5
        def increment():
            vars[0] += 1

        vars = [0]
        scheduler.datetime = FakeDateTime
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              misfire_grace_time=3,
                                              start_date=FakeDateTime.now())
        start = job.next_run_time

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vars, [2])
Exemple #17
0
from yunbk.backend.local import LocalBackend

logger = logging.getLogger('default')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)

sched = Scheduler(daemonic=False)


def err_listener(ev):
    if ev.exception:
        logger.fatal('%s error.', str(ev.job), exc_info=True)
    else:
        logger.info('%s miss', str(ev.job))


@sched.cron_schedule(second='1')
def job():
    logger.debug(datetime.datetime.now())
    backend = LocalBackend('/data/release/backup/')
    with YunBK('ybk', [backend]) as ybk:
        f = open('t2.txt', 'w')
        f.write('ok')
        f.close()
        ybk.backup()


if __name__ == '__main__':
    sched.add_listener(err_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED)
    sched.start()
Exemple #18
0
logger = logging.getLogger('default')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)

sched = Scheduler(daemonic=False)


def err_listener(ev):
    if ev.exception:
        logger.fatal('%s error.', str(ev.job), exc_info=True)
    else:
        logger.info('%s miss', str(ev.job))


@sched.cron_schedule(second='1')
def job():
    logger.debug(datetime.datetime.now())
    backend = LocalBackend('/data/release/backup/')
    with YunBK('ybk', [backend]) as ybk:
        f = open('t2.txt', 'w')
        f.write('ok')
        f.close()
        ybk.backup()


if __name__ == '__main__':
    sched.add_listener(err_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED)
    sched.start()

Exemple #19
0
def schedule():
    '''

    **Purpose:**

    This function is an in-process task scheduler that lets you schedule
    functions (or any other python callables) to be executed at times of your
    choosing.

    It replaces the reliance on externally run cron scripts for long-running
    applications such as XFERO.

    **Features:**

    * No (hard) external dependencies, except for setuptools/distribute
    * Cron-like scheduling

    **Cron-style Scheduling**

    You can specify a variety of different expressions on each field, and when
    determining the next execution time, it finds the earliest possible time
    that satisfies the conditions in every  field. This behavior resembles the
    Cron utility found in most UNIX-like operating systems.

    You can also specify the starting date for the cron-style schedule through
    the start_date parameter, which can be given as a date or datetime object
    or text.

    Unlike with crontab expressions, you can omit fields that you don't need.
    Fields greater than the least significant explicitly defined field default
    to * while lesser fields default to their minimum values except for week
    and day_of_week which default to *.

    For example, if you specify only day=1, minute=20, then the job will execute
    on the first day of every month on every year at 20 minutes of every hour.

    +------------------------+-------------------------------------------------+
    | Available Fields       | Description                                     |
    +========================+=================================================+
    | year                   | 4-digit year number                             |
    +------------------------+-------------------------------------------------+
    | month                  | month number (1-12)                             |
    +------------------------+-------------------------------------------------+
    | day                    | day of the month (1-31)                         |
    +------------------------+-------------------------------------------------+
    | week                   | ISO week number (1-53)                          |
    +------------------------+-------------------------------------------------+
    | day_of_week            | number or name of weekday (0-6 or mon-sun)      |
    +------------------------+-------------------------------------------------+
    | hour                   | hour (0-23)                                     |
    +------------------------+-------------------------------------------------+
    | minute                 | minute (0-59)                                   |
    +------------------------+-------------------------------------------------+
    | second                 | second (0-59)                                   |
    +------------------------+-------------------------------------------------+

    The following table lists all the available expressions applicable in cron-
    style schedules.

    +-----------------+------+-------------------------------------------------+
    | Expression types|Field | Description                                     |
    +=================+======+=================================================+
    | \\*              | any  | Fire on every value                            |
    +-----------------+------+-------------------------------------------------+
    | \\*/a            | any  | Fire every a values, starting from the minimum |
    +-----------------+------+-------------------------------------------------+
    | a-b             | any  | Fire on any value within the a-b range          |
    +-----------------+------+-------------------------------------------------+
    | a-b/c           | any  | Fire every c values within the a-b range        |
    +-----------------+------+-------------------------------------------------+
    | xth y           | day  | Fire on the x -th occurrence of weekday y within|
    |                 |      | the month                                       |
    +-----------------+------+-------------------------------------------------+
    | last x          | day  | Fire on the last occurrence of weekday x within |
    |                 |      | the month                                       |
    +-----------------+------+-------------------------------------------------+
    | last            | day  | Fire on the last day within the month           |
    +-----------------+------+-------------------------------------------------+
    | x,y,z           | any  | Fire on any matching expression; can combine any|
    |                 |      | number of any of the above expressions          |
    +-----------------+------+-------------------------------------------------+

    *Example Uses*

    Scheduled pull transfers from partner site.
    Scheduled outbond transfer (Part of a transfer workflow)
    Scheduled Housekeeping

    :returns: retval: Details of return

    **Unit Test Module:** None

    **Process Flow**

    .. figure::  ../process_flow/scheduler.png
       :align:   center

       Process Flow: Scheduler

    *External dependencies*

    os (xfero.scheduler)
    time (xfero.scheduler)
    xfero
      db
        manage_control (xfero.scheduler)
        manage_schedule (xfero.scheduler)
      get_conf (xfero.scheduler)
      hk
        housekeeping (xfero.scheduler)
      monitor (xfero.scheduler)
      stats
        xfero_stats (xfero.scheduler)

    +------------+-------------+-----------------------------------------------+
    | Date       | Author      | Change Details                                |
    +============+=============+===============================================+
    | 02/07/2013 | Chris Falck | Created                                       |
    +------------+-------------+-----------------------------------------------+
    | 27/09/2014 | Chris Falck | Added ability to call xfero_stats                |
    +------------+-------------+-----------------------------------------------+
    | 27/10/2014 | Chris Falck | modified call to get_conf                     |
    +------------+-------------+-----------------------------------------------+

    '''
    try:
        (xfero_logger,
         xfero_database,
         outbound_directory,
         transient_directory,
         error_directory,
         xfero_pid) = get_conf.get.xfero_config()
    except Exception as err:
        print('Cannot get XFERO Config: %s' % err)
        raise err

    logging.config.fileConfig(xfero_logger)

    # create logger
    logger = logging.getLogger('scheduler')

    logger.info('Running XFERO Scheduler...')

    # Check status of XFERO_Control.control_status = 'STOPPED'. If it is 'RUNNING'
    # Advise that it is already running

    try:
        rows = db_control.read_XFERO_Control('1')
    except Exception as err:
        logger.error(
            'Unable to read XFERO_Control from DB: Error %s',
            (err),
            exc_info=True)
        sys.exit(err)

    control_id = rows[0]
    control_status = rows[1]
    # print('control_id = %s' % control_id)
    # print('control_status = %s' % control_status)

    # Advise that it is closing down as request they wait
    if control_status == 'STARTED':
        logger.warning('The XFERO Scheduler is already running! Exiting')
        print('Scheduler is running... exiting')
        sys.exit('Scheduler is running')

    # If it is 'STOPPING'
    # Advise that it is closing down as request they wait
    if control_status == 'STOPPING':
        logger.warning(
            'The XFERO Scheduler is currently stopping... Please wait! Exiting')
        print('Scheduler is stopping... wait')
        control_id = '1'
        control_status = 'STOPPED'
        try:
            rows = db_control.update_XFERO_Control(control_id, control_status)
        except Exception as err:
            logger.error(
                'Unable to update XFERO_Control from DB: Error %s',
                (err),
                exc_info=True)
            sys.exit(err)
    # If it is 'STOPPED'
    # Advise it is about to startup
    if control_status == 'STOPPED':
        logger.warning('The XFERO Scheduler is starting!')
        print('Starting scheduler')

    # Get rows from table
    logger.info('Retrieving Scheduled Tasks...')
    try:
        rows = db_schedule.get_activated_XFERO_Scheduled_Task()
    except Exception as err:
        logger.info(
            'Unable to get active Scheduled Task from DB: Error %s',
            (err),
            exc_info=True)
        sys.exit(err)

    counter = 1
    jobs = []

    sched = Scheduler(coalesce=True, daemonic=False)
    sched.add_listener(listener, sched.shutdown )

    # scheduled_task_id, scheduled_task_name, scheduled_task_function,
    # scheduled_task_year, scheduled_task_month, scheduled_task_day,
    # scheduled_task_week, scheduled_task_day_of_week,  scheduled_task_hour,
    # scheduled_task_minute, scheduled_task_second, scheduled_task_args,
    # scheduled_task_active FROM XFERO_Scheduled_Task WHERE
    # scheduled_task_active=?', ('1'))

    for task in rows:
        print('In for loop task')
        scheduled_task_id = task['scheduled_task_id']
        scheduled_task_name = task['scheduled_task_name']
        scheduled_task_function = task['scheduled_task_function']
        scheduled_task_year = task['scheduled_task_year']
        scheduled_task_month = task['scheduled_task_month']
        scheduled_task_day = task['scheduled_task_day']
        scheduled_task_week = task['scheduled_task_week']
        scheduled_task_day_of_week = task['scheduled_task_day_of_week']
        scheduled_task_hour = task['scheduled_task_hour']
        scheduled_task_minute = task['scheduled_task_minute']
        scheduled_task_second = task['scheduled_task_second']
        scheduled_task_args = task['scheduled_task_args']
        scheduled_task_active = task['scheduled_task_active']

        # job1 = sched.add_cron_job (job_function, day_of_week = 'mon-fri',
        # hour = '*', minute = '0-59 ', second ='*/20 ', args = ['hello'],
        # name="Hiya")
        f_module, f_func = scheduled_task_function.split('.')
        # print(f_module + '.' + f_func)

        if f_module == 'monitor':
            func = getattr(monitor, f_func)
        elif f_module == 'housekeeping':
            f_module = 'hk.housekeeping'
            func = getattr(housekeeping, f_func)
        elif f_module == 'xfero_stats':
            f_module = 'stats.xfero_stats'
            func = getattr(xfero_stats, f_func)

        print('Func = %s' % func)

        if scheduled_task_year == 'NULL':
            scheduled_task_year = None
        if scheduled_task_month == 'NULL':
            scheduled_task_month = None
        if scheduled_task_day == 'NULL':
            scheduled_task_day = None
        if scheduled_task_week == 'NULL':
            scheduled_task_week = None
        if scheduled_task_day_of_week == 'NULL':
            scheduled_task_day_of_week = None
        if scheduled_task_hour == 'NULL':
            scheduled_task_hour = None
        if scheduled_task_minute == 'NULL':
            scheduled_task_minute = None
        if scheduled_task_second == 'NULL':
            scheduled_task_second = None

        if scheduled_task_args == 'NULL':
            list_args = ''
        else:
            list_args = scheduled_task_args.split(',')

        job = 'Job_' + str(counter)
        jobs.append(job)

        # NOTE using __import__ returns the top-level name of the package
        # Using sys.modules allows us to make the function call

        __import__(f_module)
        mod = sys.modules[f_module]

        job = sched.add_cron_job(
            getattr(mod, f_func),
            year=scheduled_task_year,
            month=scheduled_task_month,
            day=scheduled_task_day,
            week=scheduled_task_week,
            day_of_week=scheduled_task_day_of_week,
            hour=scheduled_task_hour,
            minute=scheduled_task_minute,
            second=scheduled_task_second,
            args=list_args,
            name=scheduled_task_name)

        counter += 1
    sched.start()

    # Set XFERO_Control.control_status = 'STARTED'
    try:
        rows = db_control.update_XFERO_Control('1', 'STARTED')
    except Exception:
        logger.error('Unable to update Control Status from DB')

    # IN A LOOP
    # Now that scheduled tasks are running, we need to watch for requests to
    # shutdown.
    # Retrieve status from XFERO_Control where control_status = 'STOPPING'

    while True:
        print('Going to sleepies!!!')
        sleep(30)

        for job in jobs:
            logger.info('Running Job: %s', job)

        logger.info('Checking Control Status')
        try:
            rows = db_control.read_XFERO_Control('1')
        except Exception:
            logger.error('Unable to retrieve Control Status from DB')

        control_id = rows[0]
        control_status = rows[1]
        logger.info('Status = %s', control_status)
        # If XFERO_Control.control_status = 'STOPPING'
        if control_status == 'STOPPING':
            logger.info('Scheduler is shutting down')
            sched.shutdown(0)

            # Set XFERO_Control.control_status = 'STOPPED'
            try:
                rows = db_control.update_XFERO_Control('1', 'STOPPED')
            except Exception:
                logger.error('Unable to retrieve Control Status from DB')
            break
Exemple #20
0
import json
import re

logging.basicConfig()
session = DB_Session()
dht11 = DHT11(4)
sched = Scheduler()

def err_listener(ev):
    err_logger = logging.getLogger('schedErrJob')
    if ev.exception:
        err_logger.exception('%s error.', str(ev.job))
    else:
        err_logger.info('%s miss', str(ev.job))

sched.add_listener(err_listener, apscheduler.events.EVENT_JOB_ERROR | apscheduler.events.EVENT_JOB_MISSED)

regExp = "(>=|<=|>|<|!=|==|=)"

def opCompare(op,par1,par2):
	if op == "<=":
		return par1 <= par2
	elif op == ">=":
		return par1 >= par2
	elif op == "<":
		return par1 < par2
	elif op == ">":
		return par1 > par2
	elif op == "!=":
		return par1 != par2
	elif op == "==" or op == "=":
Exemple #21
0
        logging.info("Site %s is online." % siteURL)

    def visitAndVerifySites(self):
        """ Visit all sites and verify their status. """
        siteList = shepherd_util.unpackSites()
        try:
            for site in siteList:
                self.verifySiteIsOnline(site)
                self.takeScreenshot(site)
        finally:
            logging.info("Closing all browser windows and shutting down.")
            self.driver.quit()

    def eventListener(self, event):
        """ Listener that can be attached to scheduler to monitor event execution. """
        if event.exception:
            logging.warning("Encountered exception during event processing: %s" % event.exception)


if __name__ == "__main__":
    shepherd_util.setupLogging(LOG_FILE)
    logging.info("Initializing scheduler.")
    sched = Scheduler(daemon=True)
    sched.start()
    shepherd = Shepherd()
    logging.info("Adding shepherd function to scheduler to run once per hour.")
    sched.add_listener(shepherd.eventListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    sched.add_interval_job(shepherd.visitAndVerifySites, minutes=SCREENSHOT_INTERVAL_MINS)
    while True:
        pass
Exemple #22
0
from helpers.loggers import get_logger
from manager import syndb
from manager.products import dailyupdate 


def err_listener(ev):  
    err_logger = get_logger('schedErrJob')  
    if ev.exception:  
        err_logger.exception('%s %s %s error.',str(ev.job),str(ev.exception), str(ev.traceback))  
    else:  
        err_logger.info('%s', str(ev.job))  


# Start the scheduler
sched = Scheduler(daemonic = False)
sched.add_listener(err_listener)
# Crawl Scheduler Taobao

sched.add_cron_job(pickitem.startupdate, minute=1, args=['nvzhuang'])
sched.add_cron_job(pickitem.startupdate, minute=5, args=['nvxie'])
sched.add_cron_job(pickitem.startupdate, minute=25, args=['danjianbao'])

"""
sched.add_cron_job(startupdate, minute=10, args=['wenxiong'])
sched.add_cron_job(startupdate, minute=15, args=['shuiyi'])
sched.add_cron_job(startupdate, minute=20, args=['sushen'])
sched.add_cron_job(startupdate, minute=30, args=['shoutibao'])
sched.add_cron_job(startupdate, minute=35, args=['xiekuabao'])
sched.add_cron_job(startupdate, minute=40, args=['qianbao'])
sched.add_cron_job(startupdate, minute=45, args=['shounabao'])
sched.add_cron_job(startupdate, minute=50, args=['tongzhuang'])
Exemple #23
0
from logger import Log

logger = Log().getLog()

sched = Scheduler()
sched.daemonic = False  #非daemon线程


def job_events_listener(jobEvent):
    '''监听任务事件
    '''
    if jobEvent.code == apscheduler.events.EVENT_JOB_EXECUTED:
        pass
        # 正常执行任务
        logger.info("scheduled|%s|trigger=%s|scheduled_time=%s" % (jobEvent.job.name, jobEvent.job.trigger, jobEvent.scheduled_run_time))

    else:
        # 异常或丢失
        logger.exception((jobEvent.code, jobEvent.exception, jobEvent.job, jobEvent.scheduled_run_time))
        except_msg = "miss execute" if not jobEvent.exception else str(jobEvent.exception.args)
        alert_msg = "%s,%s,%s,%s" % (
            jobEvent.code, jobEvent.job.name, except_msg, jobEvent.scheduled_run_time.strftime("%H:%M:%S"))
        # 告警
        #monitor.add_alert_msg(jobEvent.job.name, alert_msg)
        logger.info("send alert over, key=%s|msg=%s" % alert_msg)


#监听执行情况
sched.add_listener(job_events_listener,
                apscheduler.events.EVENT_JOB_ERROR | apscheduler.events.EVENT_JOB_MISSED | apscheduler.events.EVENT_JOB_EXECUTED)
Exemple #24
0
            logger.info("Event #" + str(event.job.name) +
                        " completed successfully")
        elif (event.retval == False):
            set_status(event.job.name, 4)
            logger.error("Event #" + str(event.job.name) + " had an error")
        if event.exception:
            print event.exception
            logger.fatal("Event #" + str(event.job.name) + ' - job crashed :(')


#start the scheduler
sched = Scheduler()
sched.add_jobstore(WriteBackShelveJobStore('jobstore.db'), 'shelve')
sched.start()

sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

process_events()

# Process events list every 10 seconds
sched.add_interval_job(process_events, seconds=10)

# Remove completed events from db every minute
sched.add_interval_job(remove_events, minutes=1)

print "Dispatcher started..."

try:
    while True:
        time.sleep(1)
finally:

def listen(event):
    if event.exception:
        if options.verbose:
            print("Listen Error: %s" % event.exception)
            print("".join(traceback.format_tb(event.traceback)))
        log("%s at line %d" % (event.exception, event.traceback.tb_lineno))
        log_msg = ("".join(
            traceback.format_tb(event.traceback)
        )).splitlines(True)
        log_msg = [">>> %s" % line for line in log_msg]
        log("\n%s" % "".join(log_msg))
# I think the following line gives: Error function takes exactly 1 argument (0
# given):
sched.add_listener(listen, EVENT_JOB_ERROR)


def log_STAMPS():
    STAMPS_list = map(lambda item: (item[0],
                                    time.strftime("%x %X %Z",
                                                  time.localtime(item[1]))),
                      STAMPS.iteritems())
    stamps_s = ("STAMPS=%s" % dict(STAMPS_list))
    log(stamps_s)

STAMPS_list = map(lambda item: (item[0],
                                time.strftime("%x %X %Z",
                                              time.localtime(item[1]))),
                  STAMPS.iteritems())
STAMPS_str = str(dict(STAMPS_list))
class HouseControl(object):
    
    __scheduler = None
    __heatingStatusBean = None
    
    busJobsQueue = Queue.Queue()
    busWorkerThread = BusWorker(busJobsQueue)
    
    def __init__(self):
        self.logger = logging.getLogger(APPLICATION_LOGGER_NAME)
        self.logger.info("HouseControl starting...")

        configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
        
        #Initialize HeatingStatusBean
        self.__initalizeHeatingStatusBean(configurationReader)
        
        #Initialize Scheduler
        self.__initializeScheduler(configurationReader)
        
        #Initialize BusQueueWorker
        self.busWorkerThread.setDaemon(True)        
        self.busWorkerThread.start() 
        
        self.logger.info("HouseControl started.")
        
        
    def __initalizeHeatingStatusBean(self, configurationReader):
        #HeatingStatusBean       
        self.__heatingStatusBean = HeatingStatusBean.HeatingStatusBean()
        
        #Configure Bean
        self.updateHeatingStatusBeanConfiguration(configurationReader)
        
        #Add ChangeListener
        self.__heatingStatusBean.addChangeListener(HeatingControlService.HeatingControlService(self))
        self.__heatingStatusBean.addChangeListener(HeatingSwitchService.HeatingSwitchService(self))
        ##self.__heatingStatusBean.addChangeListener(HeatingMonitorService.HeatingMonitorService(self))
        self.logger.info("HeatingStatusBean configured.")
            
            
    def __initializeScheduler(self, configurationReader):
        #Scheduler
        self.__scheduler = Scheduler()
        self.__scheduler.configure(standalone=True)
        self.__scheduler.add_listener(schedulerListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        
        #SchedulerTasks
        #TemperaturFeedService, TemperatureLogService, MixerControlService
        self.__loadBaseSchedulerTasks()
        
        self.__scheduler.start()
        
        #Benutzerdefinierte Schaltzeiten
        self.loadUserSchedulerTasks(configurationReader)
        
        self.logger.info("Scheduler started.")
        
    
    def getHeatingStatusBean(self):
        return self.__heatingStatusBean
    
    def getScheduler(self):
        return self.__scheduler
    
        
    def __loadBaseSchedulerTasks(self):
        temperatureFeedService = TemperatureFeedService.TemperatureFeedService(self)
        temperatureLogService = TemperatureLogService.TemperatureLogService(self)
        mixerControlService = MixerControlService.MixerControlService(self)
        
        #TemperaturFeedService
        job = self.__scheduler.add_interval_job(temperatureFeedService.run, seconds=INTERVALL_UPDATE_TEMPERATURE)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_UPDATER
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")

        #TemperatureLogService
        job = self.__scheduler.add_interval_job(temperatureLogService.run, seconds=INTERVALL_LOG_TEMPERATURE)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_LOGGER
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
        
        #MixerControlService
        job = self.__scheduler.add_interval_job(mixerControlService.run, seconds=INTERVALL_UPDATE_MIXER)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_MIXERCONTROL
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
        
        
    def updateHeatingStatusBeanConfiguration(self, configurationReader):
        temperatures = configurationReader.temperatures
        self.__heatingStatusBean.setUpperFloorFlowTargetTemperature(float(temperatures.get('ogv')))
        self.__heatingStatusBean.setGroundFloorFlowTargetTemperature(float(temperatures.get('egv')))
        self.__heatingStatusBean.setWaterTargetTemperature(float(temperatures.get('hotwater')))
    
    
    def reloadUserSchedulerTasks(self):
        self.removeUserSchedulerTasks()
        
        configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
        self.updateHeatingStatusBeanConfiguration(configurationReader)
        
        self.loadUserSchedulerTasks(configurationReader)
        
        
    def removeUserSchedulerTasks(self):
        prefixLen = len(SERVICE_HEATING_ACTION_PREFIX) 
        jobList = self.__scheduler.get_jobs()
        for job in jobList:
            jobName = job.name
            if(jobName[:prefixLen] == SERVICE_HEATING_ACTION_PREFIX):
                self.logger.info("Scheduler-Job [" + job.name + "] removed.")
                self.__scheduler.unschedule_job(job)


    def loadUserSchedulerTasks(self, configurationReader):
        baseCronSched = {'year':None, 'month':None, 'day':None, 'week':None, 'day_of_week':None, 'hour':None, 'minute':None, 'second':None, 'start_date':None}
        for task in configurationReader.heatingTasks:
            
            schedType = task.get('schedule').get('type') 
            if(schedType == 'cron'):
                cronSched = baseCronSched.copy()
                cronSched.update(task.get('schedule'))
                cronSched.pop('type')
                if(task.get('type') == 'changeHeatingStatus'):
                    taskFunction = self.__heatingStatusBean.setHeatingStatusMap
                    job = self.__scheduler.add_cron_job(taskFunction,
                                                        year=cronSched['year'], month=cronSched['month'], day=cronSched['day'],
                                                        week=cronSched['week'], day_of_week=cronSched['day_of_week'], 
                                                        hour=cronSched['hour'], minute=cronSched['minute'], second=cronSched['second'], 
                                                        start_date=cronSched['start_date'],
                                                        args=[task.get('status')])
                    n = SERVICE_HEATING_ACTION_PREFIX + str(task.get('name'))
                    job.name = n
        
        prefixLen = len(SERVICE_HEATING_ACTION_PREFIX) 
        jobList = self.__scheduler.get_jobs()
        for job in jobList:
            jobName = job.name
            if(jobName[:prefixLen] == SERVICE_HEATING_ACTION_PREFIX):
                self.logger.info("Scheduler-Job [" + jobName + "] loaded.")
Exemple #27
0
class SchedulerContainer( DaemonContainer ):
    
    def __init__(self, environment):
        super(Scheduler, self).__init__(environment)
        gconfig = environment.get("gconfig", {})
        options = environment.get("options", {})
        self.scheduler = Scheduler(gconfig, **options)
    
    def on_start(self):
        self.scheduler.start()
    
    def on_stop(self):
        self.scheduler.stop()
        
    def unschedule_func(self, func):
        self.scheduler.unschedule_func(func)
    
    def unschedule_job(self, job):
        self.scheduler.unschedule_job(job)
        
        
    def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, args=None, kwargs=None, **options):
        return self.scheduler.add_interval_job(func=func, 
                                        weeks=weeks, 
                                        days=days, 
                                        hours=hours, 
                                        minutes=minutes, 
                                        seconds=seconds, 
                                        start_date=start_date, 
                                        args=args, 
                                        kwargs=kwargs, 
                                        **options)
        
    def add_cron_job(self, func, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, args=None, kwargs=None, **options):
        return self.scheduler.add_cron_job(func=func, 
                                    year=year, 
                                    month=month, 
                                    day=day, 
                                    week=week, 
                                    day_of_week=day_of_week, 
                                    hour=hour, 
                                    minute=minute, 
                                    second=second, 
                                    start_date=start_date, 
                                    args=args, 
                                    kwargs=kwargs,
                                    **options)
    
    def add_date_job(self, func, date, args=None, kwargs=None, **options):
        return self.scheduler.add_date_job(func=func, 
                                    date=date, 
                                    args=args, 
                                    kwargs=kwargs,
                                    **options)
    
    def get_jobs(self):
        return self.scheduler.get_jobs()
    
    def add_job(self, trigger, func, args, kwargs, jobstore='default', **options):
        return self.scheduler.add_job(trigger=trigger, 
                                      func=func, 
                                      args=args, 
                                      kwargs=kwargs, 
                                      jobstore=jobstore,
                                      **options)
        
    def add_listener(self, callback, mask):
        self.scheduler.add_listener(callback, mask)
    
    def remove_listener(self, callback):
        self.scheduler.remove_listener(callback)
Exemple #28
0
    core_threads=app.config.get('CORE_THREADS', 25),
    max_threads=app.config.get('MAX_THREADS', 30),
    keepalive=0
)
scheduler = Scheduler(
    standalone=True,
    threadpool=pool,
    coalesce=True,
    misfire_grace_time=app.config.get('MISFIRE_GRACE_TIME', 30)
)


def exception_listener(event):
     store_exception("scheduler-change-reporter-uncaught", None, event.exception)

scheduler.add_listener(exception_listener, events.EVENT_JOB_ERROR)


def setup_scheduler():
    """Sets up the APScheduler"""
    log = logging.getLogger('apscheduler')

    try:
        accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all()  # noqa
        accounts = [account.name for account in accounts]
        for account in accounts:
            app.logger.debug("Scheduler adding account {}".format(account))
            rep = Reporter(account=account)
            delay = app.config.get('REPORTER_START_DELAY', 10)

            for period in rep.get_intervals(account):
Exemple #29
0
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_configure_jobstore(self):
        conf = {
            'apscheduler.jobstore.ramstore.class':
            'apscheduler.jobstores.ram_store:RAMJobStore'
        }
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore('ramstore')

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {'misfire_grace_time': '2', 'daemonic': 'false'}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {
            'apscheduler.misfire_grace_time': 2,
            'apscheduler.daemonic': False
        }
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
Exemple #30
0
pool = ThreadPool(core_threads=app.config.get('CORE_THREADS', 25),
                  max_threads=app.config.get('MAX_THREADS', 30),
                  keepalive=0)
scheduler = Scheduler(standalone=True,
                      threadpool=pool,
                      coalesce=True,
                      misfire_grace_time=app.config.get(
                          'MISFIRE_GRACE_TIME', 30))


def exception_listener(event):
    store_exception("scheduler-change-reporter-uncaught", None,
                    event.exception)


scheduler.add_listener(exception_listener, events.EVENT_JOB_ERROR)


def setup_scheduler():
    """Sets up the APScheduler"""
    log = logging.getLogger('apscheduler')

    try:
        accounts = Account.query.filter(Account.third_party == False).filter(
            Account.active == True).all()  # noqa
        accounts = [account.name for account in accounts]
        for account in accounts:
            app.logger.debug("Scheduler adding account {}".format(account))
            rep = Reporter(account=account)
            delay = app.config.get('REPORTER_START_DELAY', 10)
Exemple #31
0
		me = '*****@*****.**'
		msg = MIMEText(context)
		msg['subject'] = sub
		msg['From'] = '*****@*****.**'
		msg['To'] = COMMASPACE.join(mailto_list)
		send_smtp = smtplib.SMTP(mail_host)
		send_smtp.sendmail(me, mail_list, msg.as_string())
		send_smtp.close()

	def job():
		bookList = []
		isSendMail = False
		context = 'Today free books are'
		mailto_list = ['*****@*****.**']
		bookList = get_book(bookList)
		context, isSendMail = record_book(bookList, context, isSendMail)
		if isSendMail:
			send_mail(mailto_list, 'Free Book is Update', context)

	def job_listener(event):
		logging.basicConfig()
		if event.exception:
			print 'job failed'
		else:
			print 'job succeed'

if __name__ = '__main__':
	sched = Scheduler(daemonic = False)
	sched.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
	sched.add_interval_job(job, minutes=30)
	sched.start()
class TrainScheduler(object):
 
	def __init__(self):
		logging.basicConfig(level=logging.DEBUG, filename="debug.log", format='%(asctime)s %(levelname)-8s %(message)s', datefmt="%d.%m.%Y %H:%M:%S")

		self.scheduler = Scheduler()
		self.scheduler.add_listener(self.checkForDuplicates, apscheduler.events.EVENT_JOBSTORE_JOB_ADDED)
		self.scheduler.start()

		if len(self.scheduler.get_jobs()) == 0:
			self.createInitSchedule()

		self.log("Initial tasks completed. Waiting for next event..")

		while True:
			try:
				time.sleep(10)
				#self.scheduler.print_jobs()

			except KeyboardInterrupt:
				self.log("Shutting down..")
				self.scheduler.shutdown()
				quit()


	def createInitSchedule(self):

		self.log("Perform initial query for passenger trains..")
		self.processPassenger()
		self.log("Perform initial query for freight trains..")
		self.processFreight()
		self.log("Perform initial query for auto trains..")
		self.processAutotrain()

		self.log("Creating initial train schedule..")
		
		# request passenger trains every hour
		self.scheduler.add_cron_job(self.processPassenger, hour="*/1", minute="0", day="*", month="*", year="*")
		# request freight trains every day
		self.scheduler.add_cron_job(self.processFreight, hour="0", minute="2", day="*", month="*", year="*")
		# request auto trains every month
		self.scheduler.add_cron_job(self.processAutotrain, hour="0", minute="5", day="1", month="*", year="*")


	def processPassenger(self):
		# return trains for station in question
		tReq = passenger.PassengerTrainRequest(PASSENGER_STATION_ID)
	 
		for train in tReq.getTrainList():
			trainTime = train.actualTime if (train.actualTime) else train.scheduledTime
			trainTimeCheck = trainTime - datetime.timedelta(minutes=CHECKBEFORE)
						
			try:
				self.scheduler.add_date_job(self.checkIfOnTime, trainTimeCheck, args=[train], name=train.name)
				self.log("Schedule passenger train '%s' to be checked on %s." % (train.name, trainTimeCheck))

			except ValueError:
				try:
					self.scheduler.add_date_job(self.output, trainTime, args=[train], name=train.name)
					self.log("Schedule passenger train '%s' to be displayed on %s." % (train.name, trainTime))

				except ValueError:
					self.log("Passenger train '%s' (%s) already passed by." % (train.name, trainTime))

	def checkIfOnTime(self, remTrain):
		# return trains for station in question
		tReq = passenger.PassengerTrainRequest(PASSENGER_STATION_ID)
	 
		for train in tReq.getTrainList():
			if remTrain.name == train.name:
				trainTime = train.actualTime if (train.actualTime) else train.scheduledTime
				try:
					self.scheduler.add_date_job(self.output, trainTime, args=[train], name=train.name)
					self.log("Schedule passenger train '%s' to be displayed on %s." % (train.name, trainTime))

				except ValueError:
					self.log("Passenger train '%s' (%s) already passed by." % (train.name, trainTime))
				break


	def processFreight(self):
		# return trains for station in question
		freightTrains = freight.FreightTrainRequest(FREIGHT_STATION_ID)
 
		for train in freightTrains.getTrainList():
			# FIXME: only arrival atm
			if train.arrival > datetime.datetime.now():
				self.log("Schedule freight train '%s' to be displayed on %s." % (train.name, train.arrival))
				self.scheduler.add_date_job(self.output, train.arrival, args=[train], name=train.name)
			else:
				self.log("Freight train '%s' (%s) already passed." % (train.name, train.arrival))


	def processAutotrain(self):
		# return trains for station in question
		freightTrains = autotrain.AutoTrainRequest(AUTO_TRAIN_STATION_NAME)
	 
		for train in freightTrains.getTrainList():
			if train.arrival > datetime.datetime.now():
				self.log("Schedule auto train '%s' to be displayed on %s." % (train.name, train.arrival))
				self.scheduler.add_date_job(self.output, train.arrival, args=[train], name=train.name)
			else:
				self.log("Auto train '%s' (%s) already passed." % (train.name, train.arrival))

	def checkForDuplicates(self, event):
		jobs = self.scheduler.get_jobs()

		if jobs:
			# events with the same name (train name) and the next "next run time" are duplicates
			dups = [job for job in jobs if job.name == event.job.name and job.next_run_time == event.job.next_run_time]
			if len(dups) > 1:
				self.log("Unscheduling %s." % event.job)
				self.scheduler.unschedule_job(event.job)


	def output(self, train):
		self.log("OUTPUT: %s" % train)
		f = open(OUTPUT_FILE, "a")
		f.write("%s\n" % train)
		f.close()


	def log(self, message):
		logging.info("* %s" % message)
Exemple #33
0
sched = Scheduler()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)


def event_handler(event):
    if event.exception:
        logger.error("SCHEDULER FAIL:\n%s\n%s\n%s" %
                     (event.job, event.exception, traceback.format_exc()))
    else:
        logger.info("SUCCESS: %s" % event.job)


sched.add_listener(
    event_handler, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR
    | events.EVENT_JOB_MISSED)

# ----------------------------------------------------------------------------
# Tasks

#sched.add_cron_job(send_digest, hour=11)

# ----------------------------------------------------------------------------

if __name__ == '__main__':
    sched.start()
    sched.print_jobs()
    while True:
        time.sleep(10)
        db.remove()
class MonitorScheduler:

    #logger = LoggerUtil.getLogger('MonitorScheduler')

    def __init__(self,events):
        self.scheduler = Scheduler(daemonic = False)
        self.events = events
        self.flag = False
        self.runflag = True
        if self.events !=None:
            self.flag = True

    def monitorListener(self):
         print self.events
         if self.flag == True:
            self.monitor_task_start(self.events)
            flag = False
         else:
             events = self.event_filter(CDSUtil.getEvents(CDSUtil))
             self.monitor_task_start(events)

    def start(self):
        print('MonitorScheduler start ...')
        while self.runflag:
            try:
                self.monitorListener()
            except Exception as e:
                print(e)
            time.sleep(60)
        #self.scheduler.add_interval_job('monitorListener',seconds=5)
        #self.scheduler.start()
    def stop(self):
         self.runflag = False


    # Filtering duplicate events
    def event_filter(self,events):
        eventInfos = []
        if events == None:
            return None
        for event in events:
             eventId = ConfigUtil().getEventId(event.getMonitorId())
             if event.getEventId() != eventId:
                 eventInfos.append(event)
        ConfigUtil().setEvents(eventInfos)
        return eventInfos

    def monitor_task_start(self,events):
         if events == None:
            return
         for event in events:
              MonitorTask(event).start()

    def err_listener(self,cls,event):
        if event.exception:
            cls.logger.exception('%s error.', str(event.job))
        else:  
            cls.logger.info('%s miss', str(event.job))

    def err_listener(self):
        self.scheduler.add_listener(self.err_listener, self.apscheduler.events.EVENT_JOB_ERROR | self.apscheduler.events.EVENT_JOB_MISSED)