def init_scheduler(self): schedule_store = RAMJobStore() job_second = self.scheduler.add_interval_job(self.do_step, 0, 0, 0, 0, 2) schedule_store.add_job(job_second) self.scheduler.add_jobstore(schedule_store, 'Simulator scheduler', quiet = False)
def init_scheduler(self): schedule_store = RAMJobStore() # Write data every 15 seconds. job_second = self.scheduler.add_interval_job(self.do_step, 0, 0, 0, 0, 15) schedule_store.add_job(job_second) self.scheduler.add_jobstore(schedule_store, "Simulator scheduler", quiet=False)
def start(self): """ Starts the scheduler in a new thread. In threaded mode (the default), this method will return immediately after starting the scheduler thread. In standalone mode, this method will block until there are no more scheduled jobs. """ if self.running: raise SchedulerAlreadyRunningError # Create a RAMJobStore as the default if there is no default job store if not 'default' in self._jobstores: self.add_jobstore(RAMJobStore(), 'default', True) # Schedule all pending jobs for job, jobstore in self._pending_jobs: self._real_add_job(job, jobstore, False) del self._pending_jobs[:] self._stopped = False if self.standalone: self._main_loop() else: self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.setDaemon(self.daemonic) self._thread.start()
def startConsumer(self, daemon=True, queues=[]): ''' Enables this scheduler to "consume" (i.e. execute) jobs. Currently, this method must be called only **ONCE** for any given queue, as it will launch a thread that actually executes the jobs. The caller is in charge of ensuring this concurrency limitation; it is therefore recommended that `pscheduler` be used with a process manager (such as `supervisor <http://cr.yp.to/daemontools.html>`_) in conjunction with the configuration `scheduler.combined` set to `false`. :param daemon: Specifies whether or not the thread created for the consumer will be set to a daemon thread, i.e. if True (the default), it will stop as soon as the main and all non-daemon threads have stopped. ''' self.broker.startConsumer(queues) log.info('starting APS job execution daemon') self.aps = apscheduler.scheduler.Scheduler() self.aps.add_jobstore(RAMJobStore(), self.ramstore) apsconf = addPrefix(self.confdict, 'apscheduler.') apsconf['apscheduler.standalone'] = 'false' self.aps.configure(apsconf, daemonic=daemon) self.aps.add_interval_job(housekeeping, args=(self.id,), seconds=self.conf.housekeeping, jobstore=self.ramstore) self.aps.add_listener(self._apsEvent) self.aps.start()
def test_jobstore(self): self.scheduler.add_jobstore(RAMJobStore(), 'dummy') job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore='dummy') eq_(self.scheduler.get_jobs(), [job]) self.scheduler.remove_jobstore('dummy') eq_(self.scheduler.get_jobs(), [])
def setup(self): self.scheduler = Scheduler(threadpool=FakeThreadPool()) self.scheduler.add_jobstore(RAMJobStore(), 'default') # Make the scheduler think it's running self.scheduler._thread = FakeThread() self.logstream = StringIO() self.loghandler = StreamHandler(self.logstream) self.loghandler.setLevel(ERROR) scheduler.logger.addHandler(self.loghandler)
def start(self): """ Starts the scheduler in a new thread. """ if self.running: raise SchedulerAlreadyRunningError # Create a RAMJobStore as the default if there is no default job store if not 'default' in self._jobstores: self.add_jobstore(RAMJobStore(), 'default', True) # Schedule all pending jobs for job, jobstore in self._pending_jobs: self._real_add_job(job, jobstore, False) del self._pending_jobs[:] self._stopped = False self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.setDaemon(self.daemonic) self._thread.start()
def test_jobstore_twice(self): self.scheduler.add_jobstore(RAMJobStore(), 'dummy') self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
def setup_class(cls): cls.jobstore = RAMJobStore()