def configure(self, **options): try: redis = StrictRedis(**settings.REDISES["default"]) redis2 = StrictRedis(**settings.REDISES["default"]) redis3 = StrictRedis(**settings.REDISES["default"]) except: logging.exception("cannot connect to redis") raise self._timezone = gettz("Asia/Chongqing") self._threadpool = ThreadPool() self._jobstore = JobStore(redis, self._timezone) # todo Jobstore self._jobstats = JobStats(redis2) self._jobsync = JobSync(redis3) self.misfire_grace_time = 1 self.coalesce = True self.daemonic = True
def setUp(self): self.redis = StrictRedis(**settings.REDISES['default']) self.job_changes_pool_key = 'queue_job_changes' self.job_sync = JobSync(self.redis)
class TaskTest(TestCase): def setUp(self): self.redis = StrictRedis(**settings.REDISES['default']) self.job_changes_pool_key = 'queue_job_changes' self.job_sync = JobSync(self.redis) def tearDown(self): Task.objects.all().delete() #self.redis.delete('active_jobs_pool') self.redis.delete(self.job_changes_pool_key) self.redis.delete('job_stats.1') def testAddJob(self): task = self.generateTask() task.run_time = '{"days":0, "hours":0, "minutes":10, "seconds":0}' task.save() self.assertTrue(task.id > 0) job = task.getJob() now = Task.objects.now() self.assertTrue(job.compute_next_run_time(now) is not None) self.assertEqual(600, job.trigger.interval_length) self.assertEqual(task.run_entry, job.func) self.assertEqual(1, self.redis.llen(self.job_changes_pool_key)) def testGetJob(self): task = self.generateTask() task.save() job = task.getJob() self.assertEqual(task.id, job.id) self.assertEqual(task.name, job.name) self.assertEqual(task.run_entry, job.func) self.assertEqual(str(task.getTrigger()), str(job.trigger)) def testDelete(self): task = self.generateTask() task.active = 1 task.save() now = Task.objects.now() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task.id, job_id) self.assertEqual('add', change_type) task_id = task.id task.delete() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task_id, job_id) self.assertEqual('remove', change_type) def testUpdate(self): task = self.generateTask() task.save() now = Task.objects.now() job_id, change_type, job = self.job_sync.pop(now) task.run_time = '2013-09-10 00:00:00' task.type = 'date' task.save() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task.id, job_id) self.assertEqual('update', change_type) def testEnable(self): task = self.generateTask() task.active = 0 task.save() self.assertEqual(0, self.job_sync.count()) task.disable() self.assertEqual(0, self.job_sync.count()) task.enable() now = Task.objects.now() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task.id, job_id) self.assertEqual('add', change_type) def generateTask(self): task = Task() task.name = 'Task1' task.run_time = '{"days":0, "hours":0, "minutes":10, "seconds":0}' task.run_entry = 'http://baidu.com' task.run_method = 'post' task.active = 1 return task def testDisable(self): task = self.generateTask() task.active = 1 task.save() now = Task.objects.now() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task.id, job_id) self.assertEqual('add', change_type) task.enable() self.assertEqual(0, self.job_sync.count()) task.disable() job_id, change_type, job = self.job_sync.pop(now) self.assertEqual(task.id, job_id) self.assertEqual('remove', change_type) def testRun(self): task = self.generateTask() task.save() self.redis.delete('job_stats.%d' % task.id) self.assertEqual(0,task.stats['total']) task.run() self.assertEqual(1,task.stats['total'])
class JobSyncTest(TestCase): def setUp(self): self.redis = StrictRedis(**settings.REDISES['default']) self.sync = JobSync(self.redis) self.key = 'queue_job_changes' def tearDown(self): self.redis.delete(self.key) def test_notifier(self): self.assertFalse(self.sync.has_notifier()) self.redis.lpush(self.key, '1||add||None') self.assertTrue(self.sync.has_notifier()) def test_push(self): self.assertFalse(self.sync.has_notifier()) self.sync.push(1, 'add') self.assertTrue(self.sync.has_notifier()) def test_pop(self): self.assertFalse(self.sync.has_notifier()) timezone = tz.gettz('Asia/Chongqing') now = datetime.now(timezone) item = self.sync.pop(now) self.assertEqual(0, item[0]) self.sync.push(1, 'add') self.assertTrue(self.sync.has_notifier()) item = self.sync.pop(now) self.assertFalse(self.sync.has_notifier()) self.assertEqual(1, item[0]) self.assertEqual('add', item[1]) def test_handle(self): pass
class Scheduler(object): def __init__(self, **options): self._wakeup = Event() self._jobstore = None self._threadpool = None self._jobstats = None self._jobsync = None self._jobsync_lock = Lock() self._jobstats_lock = Lock() self._jobstore_lock = Lock() self._listeners = [] self._listeners_lock = Lock() self.configure(**options) def configure(self, **options): try: redis = StrictRedis(**settings.REDISES["default"]) redis2 = StrictRedis(**settings.REDISES["default"]) redis3 = StrictRedis(**settings.REDISES["default"]) except: logging.exception("cannot connect to redis") raise self._timezone = gettz("Asia/Chongqing") self._threadpool = ThreadPool() self._jobstore = JobStore(redis, self._timezone) # todo Jobstore self._jobstats = JobStats(redis2) self._jobsync = JobSync(redis3) self.misfire_grace_time = 1 self.coalesce = True self.daemonic = True def start(self): logger.info("Scheduler is starting...") with self._jobstore_lock: self._jobstore.load_jobs(datetime.now(self._timezone)) self._stopped = False self._thread = Thread(target=self._main_loop, name="APScheduler") self._thread.setDaemon(True) logger.info("start main loop thread") self._thread.start() self._sync_thread = Thread(target=self._sync_jobs, name="JobsSync") self._sync_thread.setDaemon(True) logger.info("start job sync thread") self._sync_thread.start() def shutdown(self, wait=True, shutdown_threadpool=True, close_jobstore=True): if not self.running: return logger.info("Scheduler is stopping") self._stopped = True self._wakeup.set() self._threadpool.shutdown(wait) if self._sync_thread: self._sync_thread.join() if self._thread: self._thread.join() if close_jobstore: self._jobstore.close() logger.info("Scheduler is stoped") @property def running(self): return not self._stopped and self._thread and self._thread.isAlive() def _update_job(self, job, change_type): with self._jobstore_lock: if change_type == "add": ret = self._jobstore.add_job(job) elif change_type == "remove": ret = self._jobstore.remove_job(job) elif change_type == "update": ret = self._jobstore.update_job(job) else: ret = "invalid change_type %s" % change_type def _sync_jobs(self): while not self._stopped: if self._jobsync.has_notifier(): while True: now = datetime.now(self._timezone) job_id, change_type, job = self._jobsync.pop(now) if job_id == 0: break logger.debug("pop %d|%s" % (job_id, change_type)) self._update_job(job, change_type) self._wakeup.set() time.sleep(3) def _main_loop(self): logger.info("Scheduler is started") self._fire(SchedulerEvent(EVENT_SCHEDULER_START)) self._wakeup.clear() while not self._stopped: now = datetime.now(self._timezone) next_wakeup_time = self._process_jobs(now) if next_wakeup_time is not None: wait_seconds = time_difference(next_wakeup_time, now) logger.debug("Next wakeup is due at %s (in %f seconds)", next_wakeup_time, wait_seconds) self._wakeup.wait(wait_seconds) self._wakeup.clear() else: logger.debug("No jobs; waiting until a job is added") self._wakeup.wait() self._wakeup.clear() logger.info("Scheduler has been shut down") self._fire(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) def _run_job(self, job, run_times): for run_time in run_times: difference = datetime.now(self._timezone) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: # Notify listeners about a missed run event = JobEvent(EVENT_JOB_MISSED, job, run_time) self._fire(event) with self._jobstats_lock: self._jobstats.miss(job.id) logger.warning('Run time of job "%s" was missed by %s', job, difference) else: try: job.add_instance() except MaxInstancesReachedError: event = JobEvent(EVENT_JOB_MISSED, job, run_time) self._fire(event) with self._jobstats_lock: self._jobstats.miss(job.id) logger.warning( 'Execution of job "%s" skipped: maximum number of running instances reached (%d)', job, job.max_instances, ) break logger.info('1Running job "%s" (scheduled at %s)', job, run_time) try: retval = job.run() except: exc, tb = sys.exec_info()[1:] with self._jobstats_lock: self._jobstats.fail(job.id) event = JobEvent(EVENT_JOB_ERROR, job, run_time, exception=exc, traceback=tb) self._fire(event) logger.exception('Job "%s" raised an exception', job) else: with self._jobstats_lock: self._jobstats.succ(job.id) event = JobEvent(EVENT_JOB_EXECUTED, job, run_time, retval=retval) self._fire(event) logger.info('Job "%s" executed successfully', job) logger.info('2runned job "%s" (scheduled at %s)', job, run_time) job.remove_instance() if job.coalesce: break def _process_jobs(self, now): logger.debug("processing jobs") next_wakeup_time = None with self._jobstore_lock: jobs = tuple(self._jobstore.jobs) for job in jobs: run_times = job.get_run_times(now) if run_times: self._threadpool.submit(self._run_job, job, run_times) if job.compute_next_run_time(now + timedelta(microseconds=1)): with self._jobstore_lock: self._jobstore.update_job(job) else: with self._jobstore_lock: self._jobstore.remove_job(job) if not next_wakeup_time: next_wakeup_time = job.next_run_time elif job.next_run_time: next_wakeup_time = min(next_wakeup_time, job.next_run_time) logger.debug("processing jobs end") return next_wakeup_time def add_listener(self, callback, mask=EVENT_ALL): """ 添加事件监听器 """ with self._listeners_lock: self._listeners.append((callback, mask)) def remove_listener(self, callback): """ 移除事件监听器 """ with self._listeners_lock: for i, (cb, _) in enumerate(self._listeners): if callback == cb: del self._listeners[i] def _fire(self, event): """ 事件分发 """ with self._listeners_lock: listeners = tuple(self._listeners) for cb, mask in listeners: if event.code & mask: try: cb(event) except: logger.exception("Error notifying listener")