def scheduleAgent(self): if not hasattr(self, 'scheduler'): scheduler = BlockingScheduler() self.scheduler = scheduler self.scheduledJob = scheduler.add_job(self.execute, 'interval', seconds=60 * self.runSchedule) try: scheduler.start() except (KeyboardInterrupt, SystemExit): self.publishHealthData( self.generateHealthData(systemFailure=True)) else: scheduler = self.scheduler schedulerStatus = self.config.get('schedulerStatus', None) if schedulerStatus == 'UPDATE_SCHEDULE': self.scheduledJob.reschedule('interval', seconds=60 * self.runSchedule) elif schedulerStatus == 'STOP': scheduler.shutdown() elif schedulerStatus == 'PAUSE': scheduler.pause() elif schedulerStatus == 'RESUME': scheduler.resume() elif schedulerStatus == 'RESTART': scheduler.start() else: pass
def start_job(): # 开始执行任务 scheduler = BlockingScheduler() # 类型:``date``, ``interval`` or ``cron`` job = scheduler.add_job(test_job, 'interval', seconds=3) job.remove() job.pause() job.resume() scheduler.start() scheduler.pause() scheduler.resume() scheduler.shutdown(wait=True)
class APSPlanner(BasePlanner): """ Planner implementing scheduling using the |APS|_. Scheduling sets the :any:`APS Job <apscheduler.job.Job>` as links' job. .. |APS| replace:: Advanced Python Scheduler .. _APS: https://apscheduler.readthedocs.io/en/stable/index.html .. _configuring-scheduler: https://apscheduler.readthedocs.io/en/stable/userguide.html#configuring-the-scheduler """ def __init__(self, links: Union[Link, List[Link]] = None, threads: int = 30, executors_override: dict = None, job_defaults_override: dict = None, catch_exceptions: bool = False): """ :type links: :any:`Link` or list[:any:`Link`] :param links: Links that should be added and scheduled. |default| :code:`None` :type threads: int :param threads: Number of threads available for job execution. Each link will be run on a separate thread job. |default| :code:`30` :type executors_override: dict :param executors_override: Overrides for executors option of `APS configuration <configuring-scheduler_>`__ |default| :code:`None` :type job_defaults_override: dict :param job_defaults_override: Overrides for job_defaults option of `APS configuration <configuring-scheduler_>`__ |default| :code:`None` :type catch_exceptions: bool :param catch_exceptions: Whether exceptions should be caught or halt the planner. |default| :code:`False` """ self._threads = threads self._catch_exceptions = catch_exceptions if executors_override is None: executors_override = {} if job_defaults_override is None: job_defaults_override = {} executors = { 'default': ThreadPoolExecutor(threads), **executors_override } job_defaults = { 'coalesce': False, 'max_instances': threads, **job_defaults_override } self._scheduler = BlockingScheduler(executors=executors, job_defaults=job_defaults, timezone='UTC') # self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc) self._scheduler.add_listener(self._on_exception, EVENT_JOB_ERROR) super().__init__(links) def _on_exception(self, event): if event.code is EVENT_JOB_ERROR: try: # It would be amazing if we could print the entire Link, but APS serialises Link.transfer to a string and that's all we have from Job's perspective. extra_info = f'\n\nRaised when executing {self._scheduler.get_job(event.job_id)}' exception_message = str(event.exception) + f'{extra_info}' traceback = event.exception.__traceback__ # print(type(event.exception)) # type(event.exception)('asdf') try: raise type(event.exception)( exception_message).with_traceback(traceback) except TypeError as type_exception: # Some custom exceptions won't let you use the common constructor and will throw an error on initialisation. We catch these and just throw a generic RuntimeError. raise Exception(exception_message).with_traceback( traceback) from None except Exception as e: _LOGGER.exception(e) if not self._catch_exceptions and self.running: self.shutdown(False) def _schedule(self, link: Link): """ Schedule a link. Sets :any:`APS Job <apscheduler.job.Job>` as this link's job. :type link: :any:`Link` :param link: Link to be scheduled """ job = self._scheduler.add_job( link.transfer, trigger=IntervalTrigger(seconds=link.interval.total_seconds())) link.set_job(job) def _unschedule(self, link: Link): """ Unschedule a link. :type link: :any:`Link` :param link: Link to be unscheduled """ if link.job is not None: link.job.remove() link.set_job(None) def start(self): """ Start this planner. Calls :any:`APS Scheduler.start() <apscheduler.schedulers.base.BaseScheduler.start>` See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown. """ super().start() def _start_planner(self): self._scheduler.start() def pause(self): """ Pause this planner. Calls :any:`APScheduler.pause() <apscheduler.schedulers.base.BaseScheduler.pause>` """ _LOGGER.info('Pausing %s' % str(self)) self._scheduler.pause() def resume(self): """ Resume this planner. Calls :any:`APScheduler.resume() <apscheduler.schedulers.base.BaseScheduler.resume>` """ _LOGGER.info('Resuming %s' % str(self)) self._scheduler.resume() def shutdown(self, wait: bool = True): """ Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>` See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown. :type wait: bool :param wait: Whether to wait until all currently executing jobs have finished. |default| :code:`True` """ super().shutdown(wait) def _shutdown_planner(self, wait: bool = True): """ Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>` :type wait: bool :param wait: Whether to wait until all currently executing jobs have finished. |default| :code:`True` """ self._scheduler.shutdown(wait=wait) @property def running(self): """ Whether this planner is currently running. Changed by calls to :any:`start` and :any:`shutdown`. :return: State of this planner :rtype: bool """ return self._scheduler.state == STATE_RUNNING def __repr__(self): return 'APSPlanner(threads:%s)' % (self._threads)
class MonitorModel(object): def __init__(self, level=logging.INFO): self.scheduler = BlockingScheduler() self.logger = MonitorModel.initlogger(level=level) def listerner(self, event): ''' 当job抛出异常时,APScheduler会默默的把他吞掉,不提供任何提示,这不是一种好的实践,我们必须知晓程序的任何差错。 APScheduler提供注册listener,可以监听一些事件,包括:job抛出异常、job没有来得及执行等。 ''' if event.exception: self.logger.error('任务出错了!!!!!!') self.logger.error('暂停') self.scheduler.pause() self.logger.error('重启进程') self.logger.error('继续') self.scheduler.resume() else: self.logger.info('任务照常运行...') def run(self): # 周日晚10点提醒 condition_cron = CronTrigger(day_of_week=6, hour=22, minute=0) self.scheduler.add_job(main, condition_cron) # 周一早9点惩罚 subscribe_cron = CronTrigger(day_of_week=0, hour=9, minute=30) self.scheduler.add_job(main, subscribe_cron) # 监听 self.scheduler.add_listener(self.listerner, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) self.scheduler._logger = logging self.scheduler.start() @staticmethod def initlogger(level): ''' 初始化日志配置 ''' # 第一步,创建一个logger logging.basicConfig() logger = logging.getLogger() logger.setLevel(level) # Log等级总开关 # 第二步,创建一个handler,用于写入日志文件 rq = time.strftime('%Y%m%d%H%M', time.localtime( time.time())) + 'SchedulerLogs' # 日志目录 logfile = f'{rq}.log' fh = logging.FileHandler(logfile, mode='w') fh.setLevel(level) # 输出到file的log等级的开关 # 第三步,定义handler的输出格式 formatter = logging.Formatter( "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" ) fh.setFormatter(formatter) # 第四步,将logger添加到handler里面 logger.addHandler(fh) return logger
def my_job(): print(time.time()) scheduler.start() ''' ''' # remove job job = scheduler.add_job(my_job, 'interval', seconds=1) job.remove() scheduler.add_job(my_job, 'interval', seconds=2, id='my_job_id') scheduler.remove_job('my_job_id') ''' ''' # pause/resume job scheduler.pause() scheduler.pause_job() scheduler.resume() scheduler.resume_job() ''' ''' # get job list scheduler.add_job(my_job, 'interval', seconds=2, id='123') print(scheduler.get_job(job_id='123')) print(scheduler.get_jobs()) '''
class ApsPlanner(BasePlanner): """ Planner implementing scheduling using the |APS|_. Scheduling sets the :any:`APS Job <apscheduler.job.Job>` as links' job. .. |APS| replace:: Advanced Python Scheduler .. _APS: https://apscheduler.readthedocs.io/en/stable/index.html .. _configuring-scheduler: https://apscheduler.readthedocs.io/en/stable/userguide.html#configuring-the-scheduler """ def __init__(self, links: Union[Link, List[Link]] = None, threads: int = 30, executors_override: dict = None, job_defaults_override: dict = None, ignore_exceptions: bool = False, catch_exceptions: bool = None, immediate_transfer: bool = True): """ :type links: :any:`Link` or list[:any:`Link`] :param links: Links that should be added and scheduled. |default| :code:`None` :type threads: int :param threads: Number of threads available for job execution. Each link will be run on a separate thread job. |default| :code:`30` :type executors_override: dict :param executors_override: Overrides for executors option of `APS configuration <configuring-scheduler_>`__ |default| :code:`None` :type job_defaults_override: dict :param job_defaults_override: Overrides for job_defaults option of `APS configuration <configuring-scheduler_>`__ |default| :code:`None` :type ignore_exceptions: bool :param ignore_exceptions: Whether exceptions should be ignored or halt the planner. |default| :code:`False` :type immediate_transfer: :class:`bool` :param immediate_transfer: Whether planner should execute one transfer immediately upon starting. |default| :code:`True` """ self._threads = threads if executors_override is None: executors_override = {} if job_defaults_override is None: job_defaults_override = {} executors = { 'default': ThreadPoolExecutor(threads), **executors_override } job_defaults = { 'coalesce': False, 'max_instances': threads, **job_defaults_override } self._scheduler = BlockingScheduler(executors=executors, job_defaults=job_defaults, timezone='UTC') # self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc) self._scheduler.add_listener(self._exception_listener, EVENT_JOB_ERROR) self.links_by_jobid = {} super().__init__(links=links, ignore_exceptions=ignore_exceptions, immediate_transfer=immediate_transfer) if catch_exceptions is not None: # pragma: no cover self._ignore_exceptions = catch_exceptions warnings.warn( '\'catch_exceptions\' was renamed to \'ignore_exceptions\' in version 0.2.0 and will be permanently changed in version 1.0.0', DeprecationWarning) def _exception_listener(self, event): if event.code is EVENT_JOB_ERROR: self._on_exception(event.exception, self.links_by_jobid[event.job_id]) def _schedule(self, link: Link): """ Schedule a link. Sets :any:`APS Job <apscheduler.job.Job>` as this link's job. :type link: :any:`Link` :param link: Link to be scheduled """ job = self._scheduler.add_job( link.transfer, trigger=IntervalTrigger(seconds=link.interval.total_seconds())) link.set_job(job) self.links_by_jobid[job.id] = link def _unschedule(self, link: Link): """ Unschedule a link. :type link: :any:`Link` :param link: Link to be unscheduled """ if link.job is not None: link.job.remove() self.links_by_jobid.pop(link.job.id, None) link.set_job(None) def start(self): """ Start this planner. Calls :any:`APS Scheduler.start() <apscheduler.schedulers.base.BaseScheduler.start>` See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown. """ super().start() def _start_planner(self): self._scheduler.start() def pause(self): """ Pause this planner. Calls :any:`APScheduler.pause() <apscheduler.schedulers.base.BaseScheduler.pause>` """ _LOGGER.info('Pausing %s' % str(self)) self._scheduler.pause() def resume(self): """ Resume this planner. Calls :any:`APScheduler.resume() <apscheduler.schedulers.base.BaseScheduler.resume>` """ _LOGGER.info('Resuming %s' % str(self)) self._scheduler.resume() def shutdown(self, wait: bool = True): """ Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>` See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown. :type wait: bool :param wait: Whether to wait until all currently executing jobs have finished. |default| :code:`True` """ super().shutdown(wait) def _shutdown_planner(self, wait: bool = True): """ Shutdown this planner. Calls :any:`APScheduler.shutdown() <apscheduler.schedulers.base.BaseScheduler.shutdown>` :type wait: bool :param wait: Whether to wait until all currently executing jobs have finished. |default| :code:`True` """ self._scheduler.shutdown(wait=wait) def purge(self): """ Unschedule and clear all links. It can be used while planner is running. APS automatically removes jobs, so we only clear the links. """ for link in self.links: self.links_by_jobid.pop(link.job.id, None) try: link.job.remove() except JobLookupError: pass # APS already removed jobs if shutdown was called before purge, otherwise let's do it ourselves link.set_job(None) self._links = [] @property def running(self): """ Whether this planner is currently running. Changed by calls to :any:`start` and :any:`shutdown`. :return: State of this planner :rtype: bool """ return self._scheduler.state == STATE_RUNNING def __repr__(self): return 'ApsPlanner(threads:%s)' % (self._threads)