def __init__(self, min_interval, max_interval=None, start_date=None): if not isinstance(min_interval, timedelta): raise TypeError('min_interval must be a timedelta') if max_interval and not isinstance(max_interval, timedelta): raise TypeError('max_interval must be a timedelta') if start_date: start_date = convert_to_datetime(start_date) self.min_interval = min_interval self.min_interval_length = timedelta_seconds(self.min_interval) if self.min_interval_length == 0: self.min_interval = timedelta(seconds=1) self.min_interval_length = 1 self.max_interval = max_interval self.max_interval_length = None if max_interval: self.max_interval_length = timedelta_seconds(self.max_interval) if self.max_interval_length == 0: self.max_interval = timedelta(seconds=1) self.max_interval_length = 1 if (self.max_interval and self.min_interval_length > self.max_interval_length): raise ValueError("min_interval < max_interval") self.next_max_date = None if start_date is None: self.next_min_date = datetime.now() if (self.max_interval): self.next_max_date = datetime.now() + self.max_interval else: self.next_min_date = convert_to_datetime(start_date) if (self.max_interval): self.next_max_date = self.next_min_date + self.max_interval
def __init__(self, min_interval, max_interval=None, start_date=None): if not isinstance(min_interval, timedelta): raise TypeError('min_interval must be a timedelta') if max_interval and not isinstance(max_interval, timedelta): raise TypeError('max_interval must be a timedelta') if start_date: start_date = convert_to_datetime(start_date) self.min_interval = min_interval self.min_interval_length = timedelta_seconds(self.min_interval) if self.min_interval_length == 0: self.min_interval = timedelta(seconds=1) self.min_interval_length = 1 self.max_interval = max_interval self.max_interval_length = None if max_interval: self.max_interval_length = timedelta_seconds(self.max_interval) if self.max_interval_length == 0: self.max_interval = timedelta(seconds=1) self.max_interval_length = 1 if(self.max_interval and self.min_interval_length > self.max_interval_length): raise ValueError("min_interval < max_interval") self.next_max_date = None if start_date is None: self.next_min_date = datetime.now() if(self.max_interval): self.next_max_date = datetime.now() + self.max_interval else: self.next_min_date = convert_to_datetime(start_date) if(self.max_interval): self.next_max_date = self.next_min_date + self.max_interval
def _process_jobs(self): """Goodbye you apscheduler""" logger.debug('Looking for jobs to run') try: now = timezone.now() grace_time = timedelta(seconds=self.misfire_grace_time) with self._jobstores_lock: with transaction.atomic(): # Use select_for_update inside a transaction to lock the row # Statically store the select result, in case of modifying the result in selection, # which will results in a infinite selection. remind_list = list(Remind.objects.select_for_update().filter( done=False, notify_time__range=(now - grace_time, now)).all()) for rem in remind_list: rem.notify_users() rem.done = True rem.save() next_remind = Remind.objects.filter( done=False, notify_time__gt=now-grace_time).order_by('notify_time').first() wait_seconds = None if next_remind: wait_seconds = max(timedelta_seconds(next_remind.notify_time - timezone.now()), 0) logger.debug('Next wake up is due at %s (in %f seconds)', next_remind.notify_time.isoformat(), wait_seconds) else: logger.debug('No jobs, waiting until a job is added') return wait_seconds # This is a vital thread, DO NOT die except Exception as e: logger.exception('Error running scheduler job')
def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None, timezone=None): self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if timezone: self.timezone = astimezone(timezone) elif start_date and start_date.tzinfo: self.timezone = start_date.tzinfo elif end_date and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() start_date = start_date or (datetime.now(self.timezone) + self.interval) self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
def get_next_fire_time(self, start_date): if start_date < self.start_date: return self.start_date timediff_seconds = timedelta_seconds(start_date - self.start_date) next_interval_num = int(ceil(timediff_seconds / self.interval_length)) return self.start_date + self.interval * next_interval_num
def __init__( self, client: AsyncClient, store, room_id: str, reminder_text: str, start_time: Optional[datetime] = None, timezone: Optional[str] = None, recurse_timedelta: Optional[timedelta] = None, cron_tab: Optional[str] = None, target_user: Optional[str] = None, alarm: bool = False, ): self.client = client self.store = store self.room_id = room_id self.timezone = timezone self.start_time = start_time self.reminder_text = reminder_text self.cron_tab = cron_tab self.recurse_timedelta = recurse_timedelta self.target_user = target_user self.alarm = alarm # Schedule the reminder # Determine how the reminder is triggered if cron_tab: # Set up a cron trigger trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone) elif recurse_timedelta: # Use an interval trigger (runs multiple times) # If the start_time of this reminder was in daylight savings for this timezone, # and we are no longer in daylight savings, alter the start_time by the # appropriate offset. # TODO: Ideally this would be done dynamically instead of on reminder construction tz = pytz.timezone(timezone) start_time = start_time.astimezone(tz) now = tz.localize(datetime.now()) if start_time.dst() != now.dst(): start_time += start_time.dst() trigger = IntervalTrigger( # timedelta.seconds does NOT give you the timedelta converted to seconds # Use a method from apscheduler instead seconds=int(timedelta_seconds(recurse_timedelta)), start_date=start_time, ) else: # Use a date trigger (runs only once) trigger = DateTrigger(run_date=start_time, timezone=timezone) # Note down the job for later manipulation self.job = SCHEDULER.add_job(self._fire, trigger=trigger) self.alarm_job = None
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. """ self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): for job in jobstore.get_due_jobs(now): # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[-1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running instances reached (%d)', job, job.max_instances) except: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) # Update the job if it has a next execution time. Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Determine the delay until this method should be called again if next_wakeup_time is not None: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) else: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') return wait_seconds
def assert_datetime_close(self, ldate, rdate, delta_seconds=0.1): """ compare two date time objects to see if they are equal within delta_seconds param: ldate left hand date param: rdate right hand date param: delta_seconds tolerance """ delta = ldate - rdate seconds = timedelta_seconds(delta) self.assertLessEqual(abs(seconds), delta_seconds)
def get_next_fire_time(self, start_date): if start_date < self.start_date: return self.start_date timediff_seconds = timedelta_seconds(start_date - self.start_date) next_interval_num = int(ceil(timediff_seconds / self.interval_length)) if self.randomize is None: return self.start_date + self.interval * next_interval_num else: return self.start_date + self.interval * next_interval_num + timedelta(seconds=self.randomize())
def __init__(self, messagelist, hours=0, minutes=0, timezone=None): self.messagelist = messagelist self.interval = timedelta(hours=hours, minutes=minutes) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(minutes=60) self.interval_length = 1 if timezone: self.timezone = astimezone(timezone) else: self.timezone = get_localzone()
def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: next_fire_time = previous_fire_time + self.interval elif self.start_date > now: next_fire_time = self.start_date else: timediff_seconds = timedelta_seconds(now - self.start_date) next_interval_num = int(ceil(timediff_seconds / self.interval_length)) next_fire_time = self.start_date + self.interval * next_interval_num if not self.end_date or next_fire_time <= self.end_date: return self.timezone.normalize(next_fire_time)
def __init__(self, interval, start_date=None): if not isinstance(interval, timedelta): raise TypeError('interval must be a timedelta') self.interval = interval self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if start_date is None: self.start_date = datetime.now() + self.interval else: self.start_date = convert_to_datetime(start_date)
def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only version 1 can be handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.interval = state['interval'] self.interval_length = timedelta_seconds(self.interval)
def __init__( self, client: AsyncClient, store, room_id: str, reminder_text: str, start_time: Optional[datetime] = None, timezone: Optional[str] = None, recurse_timedelta: Optional[timedelta] = None, cron_tab: Optional[str] = None, target_user: Optional[str] = None, alarm: bool = False, ): self.client = client self.store = store self.room_id = room_id self.timezone = timezone self.start_time = start_time self.reminder_text = reminder_text self.cron_tab = cron_tab self.recurse_timedelta = recurse_timedelta self.target_user = target_user self.alarm = alarm # Schedule the reminder # Determine how the reminder is triggered if cron_tab: # Set up a cron trigger trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone) elif recurse_timedelta: # Use an interval trigger (runs multiple times) trigger = IntervalTrigger( # timedelta.seconds does NOT give you the timedelta converted to seconds # Use a method from apscheduler instead seconds=int(timedelta_seconds(recurse_timedelta)), start_date=start_time, timezone=timezone, ) else: # Use a date trigger (runs only once) trigger = DateTrigger(run_date=start_time, timezone=timezone) # Note down the job for later manipulation self.job = SCHEDULER.add_job(self._fire, trigger=trigger) self.alarm_job = None
def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None, timezone=None): self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if timezone: self.timezone = astimezone(timezone) elif start_date and start_date.tzinfo: self.timezone = start_date.tzinfo elif end_date and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() start_date = start_date or (datetime.now(self.timezone) + self.interval) self.start_date = convert_to_ware_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_ware_datetime(end_date, self.timezone, 'end_date')
def store_reminder(self, reminder: Reminder): """Store a new reminder in the database""" # timedelta.seconds does NOT give you the timedelta converted to seconds # Use a method from apscheduler instead if reminder.recurse_timedelta: delta_seconds = int(timedelta_seconds(reminder.recurse_timedelta)) else: delta_seconds = None if reminder.start_time: # Remove timezone from start_time. We only want to store the timezone str # in the database reminder.start_time = reminder.start_time.replace(tzinfo=None) self._execute( """ INSERT INTO reminder ( text, start_time, timezone, recurse_timedelta_s, cron_tab, room_id, target_user, alarm ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) """, ( reminder.reminder_text, reminder.start_time.isoformat() if reminder.start_time else None, reminder.timezone, delta_seconds, reminder.cron_tab, reminder.room_id, reminder.target_user, reminder.alarm, ), )
async def _fire(self): """Called when a reminder fires""" logger.debug("Reminder in room %s fired: %s", self.room_id, self.reminder_text) # Build the reminder message target = self.target_user if self.target_user else "@room" message = f"{target} {self.reminder_text}" # If this reminder has an alarm attached... if self.alarm: # Inform the user that an alarm will go off message += ( f"\n\n(This reminder has an alarm. You will be reminded again in 5m. " f"Use the `{CONFIG.command_prefix}silence` command to stop).") # Check that an alarm is not already ongoing from a previous run if not (self.room_id, self.reminder_text.upper()) in ALARMS: # Start alarming self.alarm_job = SCHEDULER.add_job( self._fire_alarm, trigger=IntervalTrigger( # timedelta.seconds does NOT give you the timedelta converted to # seconds. Use a method from apscheduler instead seconds=int(timedelta_seconds(ALARM_TIMEDELTA)), ), ) ALARMS[(self.room_id, self.reminder_text.upper())] = self.alarm_job # Send the message to the room await send_text_to_room(self.client, self.room_id, message, notice=False) # If this was a one-time reminder, cancel and remove from the reminders dict if not self.recurse_timedelta and not self.cron_tab: # We set cancel_alarm to False here else the associated alarms wouldn't even # fire self.cancel(cancel_alarm=False)
def _process_jobs(self): """Goodbye you apscheduler""" logger.debug('Looking for jobs to run') try: now = timezone.now() grace_time = timedelta(seconds=self.misfire_grace_time) with self._jobstores_lock: with transaction.atomic(): # Use select_for_update inside a transaction to lock the row # Statically store the select result, in case of modifying the result in selection, # which will results in a infinite selection. remind_list = list( Remind.objects.select_for_update().filter( done=False, notify_time__range=(now - grace_time, now)).all()) for rem in remind_list: try: rem.notify_users() finally: rem.done = True rem.save() next_remind = Remind.objects.filter( done=False, notify_time__gt=now - grace_time).order_by('notify_time').first() wait_seconds = None if next_remind: wait_seconds = max( timedelta_seconds(next_remind.notify_time - timezone.now()), 0) logger.debug( 'Next wake up is due at %s (in %f seconds)', next_remind.notify_time.isoformat(), wait_seconds) else: logger.debug('No jobs, waiting until a job is added') return wait_seconds # This is a vital thread, DO NOT die except Exception as e: logger.exception('Error running scheduler job')
def __init__(self, defaults, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, timezone=None): """ Triggers on specified intervals. :param weeks: number of weeks to wait :param days: number of days to wait :param hours: number of hours to wait :param minutes: number of minutes to wait :param seconds: number of seconds to wait :param start_date: when to first execute the job and start the counter (default is after the given interval) :param timezone: time zone for ``start_date`` :type timezone: str or an instance of a :cls:`~datetime.tzinfo` subclass """ timezone = timezone or defaults['timezone'] self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if start_date is None: self.start_date = datetime.now(timezone) + self.interval else: self.start_date = convert_to_datetime(start_date, timezone, 'start_date')
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ f = open("scheduler.lock", "wb") wait_seconds = None try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception as exc: f.close() else: if self.state == STATE_PAUSED: self._logger.debug( 'Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run and os.pid is {%s}' % os.getpid()) now = datetime.now(self.timezone) next_wakeup_time = None events = [] uuids = str(uuid.uuid1()) config = configs() is_single = config.get('is_single') if is_single and is_single != '1': if config.get('redis_pwd'): pool = redis.ConnectionPool( host=config.get('redis_host'), port=config.get('redis_port') or 6379, db=config.get('redis_db') or 0, password=config.get('redis_pwd')) else: pool = redis.ConnectionPool(host=config.get('redis_host'), port=config.get('redis_port') or 6379, db=config.get('redis_db') or 0) r = redis.Redis(connection_pool=pool) _result = r.get("SCHEDU_FLAG") if _result: if _result.decode() == uuids[0:2]: return r.setnx("SCHEDU_FLAG", uuids[0:2]) r.expire("SCHEDU_FLAG", 60 * 10) with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) self._logger.info("due_jobs:%s os.pid: %s\n" % (len(due_jobs), os.getpid())) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.update_cron_info(job.id) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent( EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent( EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: try: self.update_cron_info(job.id) self.remove_job(job.id, jobstore_alias) except: self._logger.error( 'Error remove job "%s" to executor "%s"', job, job.executor) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min( max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) if is_single and is_single != '1': r.delete("SCHEDU_FLAG") fcntl.flock(f, fcntl.LOCK_UN) f.close() return wait_seconds
def test_timedelta_seconds(): delta = timedelta(minutes=2, seconds=30) seconds = timedelta_seconds(delta) assert seconds == 150
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. """ self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): for job in jobstore.get_due_jobs(now): # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running instances reached (%d)', job, job.max_instances) except: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) # Update the job if it has a next execution time. Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Determine the delay until this method should be called again if next_wakeup_time is not None: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) else: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems( self._jobstores): # 遍历每一个 job-store try: due_jobs = jobstore.get_due_jobs( now) # 获取当前 job-store 应该执行的job except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor( job.executor) # 找到该job对应的executor except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times( now) # 以当前时间为基准,检测当前调度应该执行多少次 run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) # 用该executor执行任务 except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) # 每一个job有一个next_run_time if job_next_run: job._modify(next_run_time=job_next_run ) # whats? are you kidding me? jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time( ) # 然后每一个jobstore也有一个next_run_time if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): for job in jobstore.get_due_jobs(now): # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue past_run_times = job._get_run_times(now) if job.coalesce: for past_run_time in past_run_times[:-1]: job_submission_id = self._add_job_submission(job) # Insert missed job submissions for every job but the most recent... self._update_job_submission(job_submission_id, jobstore_alias, state='missed', submitted_at=past_run_time) # When coalescing, we collapse jobs list to JUST the most recent! past_run_times = past_run_times[-1:] if past_run_times else past_run_times for past_run_time in past_run_times: difference = now - past_run_time self._logger.warning('Run time of job "%s" was missed by %s', job, difference) if job.misfire_grace_time is not None: grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: job_submission_id = self._add_job_submission(job) self._update_job_submission(job_submission_id, jobstore_alias, state='missed') events.append(JobSubmissionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, past_run_time)) continue try: executor.submit_job(job, past_run_time) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, past_run_time) events.append(event) except: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, past_run_time) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( past_run_times[-1] if past_run_times else None, now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug('Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
class BaseScheduler(six.with_metaclass(ABCMeta)): """ Abstract base class for all schedulers. Takes the following keyword arguments: """ _stopped = True # # Public API # def __init__(self, gconfig={}, **options): super(BaseScheduler, self).__init__() self._listeners = [] self._listeners_lock = self._create_lock() self._pending_jobs = [] self._executor_lock = self._create_lock() self._executor = None self._jobstore_lock = self._create_lock() self._jobstore = None self._dispatcher_lock = self._create_lock() self._dispatcher = None self.configure(gconfig, **options) def configure(self, gconfig={}, prefix='apscheduler.', **options): """ Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running. :param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to this method :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string (pass an empty string or ``None`` to use all keys) :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.running: raise SchedulerAlreadyRunningError # If a non-empty prefix was given, strip it from the keys in the global configuration dict if prefix: prefixlen = len(prefix) gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) if key.startswith(prefix)) # Create a structure from the dotted options (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) config = {} for key, value in six.iteritems(gconfig): parts = key.split('.') parent = config key = parts.pop(0) while parts: parent = parent.setdefault(key, {}) key = parts.pop(0) parent[key] = value # Override any options with explicit keyword arguments config.update(options) self._configure(config) @abstractmethod def start(self): """ Starts the scheduler. The details of this process depend on the implementation. :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.running: raise SchedulerAlreadyRunningError up = True with self._executor_lock: if self._executor: up = up and self._executor.start(self, 'default') if not up: logging.error('executor fail to start') with self._dispatcher_lock: if self._dispatcher: up = up and self._dispatcher.start() if not up: logging.error('_dispatcher fail to start') with self._jobstore_lock: if self._jobstore: up = up and self._jobstore.start(self, 'default') if not up: logging.error('_jobstore fail to start') for job, jobstore_alias, replace_existing in self._pending_jobs: self._real_add_job(job, 'default', replace_existing, False) del self._pending_jobs[:] if up: self._stopped = False logging.info('Scheduler started') # Notify listeners that the scheduler has been started self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) else: raise SchedulerInitError() @abstractmethod def shutdown(self, wait=True): """ Shuts down the scheduler. Does not interrupt any currently running jobs. :param bool wait: ``True`` to wait until all currently executing jobs have finished :raises SchedulerNotRunningError: if the scheduler has not been started yet """ if not self.running: raise SchedulerNotRunningError self._stopped = True self._dispatcher.shut_down() self._executor.shutdown(wait) self._jobstore.shutdown() logging.info('Scheduler has been shut down') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) @property def running(self): return not self._stopped def add_executor(self, executor, alias='default', **executor_opts): """ Adds an executor to this scheduler. Any extra keyword arguments will be passed to the executor plugin's constructor, assuming that the first argument is the name of an executor plugin. :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor instance or the name of an executor plugin :param str|unicode alias: alias for the scheduler :raises ValueError: if there is already an executor by the given alias """ if not self._executor: with self._executor_lock: if isinstance(executor, BaseExecutor): self._executor = executor elif isinstance(executor, six.string_types): self._executor = executor = self._create_plugin_instance( 'executor', executor, executor_opts) else: raise TypeError( 'Expected an executor instance or a string, got %s instead' % executor.__class__.__name__) # Start the executor right away if the scheduler is running if self.running: executor.start(self, 'default') self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) def remove_executor(self, alias, shutdown=True): """ Removes the executor by the given alias from this scheduler. :param str|unicode alias: alias of the executor :param bool shutdown: ``True`` to shut down the executor after removing it """ executor = self._executor self._executor = None if shutdown and executor: executor.shutdown() self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) def add_jobstore(self, jobstore, alias='default', **jobstore_opts): if not self._jobstore: with self._jobstore_lock: if isinstance(jobstore, BaseJobStore): self._jobstore = jobstore logging.info("new jobstore alias=%s type=%s" % (alias, str(type(jobstore)))) elif isinstance(jobstore, six.string_types): self._jobstore = jobstore = self._create_plugin_instance( 'jobstore', jobstore, jobstore_opts) else: raise TypeError( 'Expected a job store instance or a string, got %s instead' % jobstore.__class__.__name__) # Start the job store right away if the scheduler is running if self.running: jobstore.start(self, alias) else: logging.warn('jobstore exists, init over again') # Notify listeners that a new job store has been added self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) # Notify the scheduler so it can scan the new job store for jobs if self.running: self.wakeup() def remove_jobstore(self, alias, shutdown=True): """ Removes the job store by the given alias from this scheduler. :param str|unicode alias: alias of the job store :param bool shutdown: ``True`` to shut down the job store after removing it """ with self._jobstore_lock: jobstore = self._jobstore self._jobstore = None if shutdown and jobstore: jobstore.shutdown() self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) def add_listener(self, callback, mask=EVENT_ALL): """ add_listener(callback, mask=EVENT_ALL) Adds a listener for scheduler events. When a matching event occurs, ``callback`` is executed with the event object as its sole argument. If the ``mask`` parameter is not provided, the callback will receive events of all types. :param callback: any callable that takes one argument :param int mask: bitmask that indicates which events should be listened to .. seealso:: :mod:`apscheduler.events` .. seealso:: :ref:`scheduler-events` """ with self._listeners_lock: self._listeners.append((callback, mask)) def remove_listener(self, callback): """Removes a previously added event listener.""" with self._listeners_lock: for i, (cb, _) in enumerate(self._listeners): if callback == cb: del self._listeners[i] def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', replace_existing=False, conf=None, **trigger_args): """ add_job(func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \ coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \ executor='default', replace_existing=False, **trigger_args) Adds the given job to the job list and wakes up the scheduler if it's already running. Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running). The ``func`` argument can be given either as a callable object or a textual reference in the ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the second half is a reference to the callable object, relative to the module. The ``trigger`` argument can either be: #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword arguments to this method are passed on to the trigger's constructor #. an instance of a trigger class :param func: callable (or a textual reference to one) to run at the given time :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called :param list|tuple args: list of positional arguments to call func with :param dict kwargs: dict of keyword arguments to call func with :param str|unicode id: explicit identifier for the job (for modifying it later) :param str|unicode name: textual description of the job :param int misfire_grace_time: seconds after the designated run time that the job is still allowed to be run :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession :param int max_instances: maximum number of concurrently running instances allowed for this job :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the job as paused) :param str|unicode jobstore: alias of the job store to store the job in :param str|unicode executor: alias of the executor to run the job with :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the number of runs from the existing one) :rtype: Job """ job_kwargs = { 'trigger': trigger, 'executor': executor, 'func': func, 'args': tuple(args) if args is not None else (), 'kwargs': dict(kwargs) if kwargs is not None else {}, 'id': id, 'name': name, 'misfire_grace_time': misfire_grace_time, 'coalesce': coalesce, 'max_instances': max_instances, 'next_run_time': next_run_time } job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined) job = Job(self, **job_kwargs) job.conf = conf # Don't really add jobs to job store before the scheduler is up and running with self._jobstore_lock: if not self.running: self._pending_jobs.append((job, jobstore, replace_existing)) logging.info( 'Adding job tentatively -- it will be properly scheduled when the scheduler starts' ) else: logging.info( 'Adding job really, it will be start as soon as the settings. ' ) self._real_add_job(job, jobstore, replace_existing, True) return job def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', **trigger_args): """ scheduled_job(trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \ coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \ executor='default',**trigger_args) A decorator version of :meth:`add_job`, except that ``replace_existing`` is always ``True``. .. important:: The ``id`` argument must be given if scheduling a job in a persistent job store. The scheduler cannot, however, enforce this requirement. """ logging.info("scheduled_job called") def inner(func): self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, max_instances, next_run_time, jobstore, executor, True, **trigger_args) return func return inner def modify_job(self, job_id, jobstore=None, **changes): """ Modifies the properties of a single job. Modifications are passed to this method as extra keyword arguments. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job """ self._dispatch_event( JobEvent(EVENT_JOB_MODIFIED, job_id, self._jobstore)) # Wake up the scheduler since the job's next run time may have been changed self.wakeup() def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): """ Constructs a new trigger for a job and updates its next run time. Extra keyword arguments are passed directly to the trigger's constructor. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :param trigger: alias of the trigger type or a trigger instance """ jobstore = jobstore if jobstore else self._jobstore logging.info("rescheduled_job called") now = datetime.now(self.timezone) next_run_time = trigger.get_next_fire_time(None, now) self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) def pause_job(self, job_id, jobstore=None): """ Causes the given job not to be executed until it is explicitly resumed. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job """ self.modify_job(job_id, jobstore, next_run_time=None) def resume_job(self, job_id, jobstore=None): """ Resumes the schedule of the given job, or removes the job if its schedule is finished. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job """ jobstore = jobstore if jobstore else self._jobstore with self._jobstore_lock: job, jobstore = self._lookup_job(job_id, jobstore) now = datetime.now(self.timezone) next_run_time = job.trigger.get_next_fire_time(None, now) if next_run_time: self.modify_job(job_id, jobstore, next_run_time=next_run_time) else: self.remove_job(job.id, jobstore) def get_jobs(self, jobstore=None, pending=None): """ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a specific job store or from all of them. :param str|unicode jobstore: alias of the job store :param bool pending: ``False`` to leave out pending jobs (jobs that are waiting for the scheduler start to be added to their respective job store), ``True`` to only include pending jobs, anything else to return both :rtype: list[Job] """ logging.info("get_jobs called") jobstore = jobstore if jobstore else self._jobstore with self._jobstore_lock: jobs = [] if pending is not False: for job, alias, replace_existing in self._pending_jobs: jobs.append(job) if pending is not True and jobstore: jobs.extend(jobstore.get_all_jobs()) return jobs def get_job(self, job_id, jobstore=None): """ Returns the Job that matches the given ``job_id``. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that most likely contains the job :return: the Job by the given ID, or ``None`` if it wasn't found :rtype: Job """ jobstore = jobstore if jobstore else self._jobstore with self._jobstore_lock: try: return self._lookup_job(job_id, jobstore)[0] except JobLookupError: return def remove_job(self, job_id, jobstore=None): """ Removes a job, preventing it from being run any more. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :raises JobLookupError: if the job was not found """ jobstore = jobstore if jobstore else self._jobstore with self._jobstore_lock: # Check if the job is among the pending jobs for i, (job, jobstore_alias, replace_existing) in enumerate(self._pending_jobs): if job.id == job_id: del self._pending_jobs[i] break else: # Otherwise, try to remove it from each store until it succeeds or we run out of store to check jobstore.remove_job(job_id) # Notify listeners that a job has been removed event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore) self._dispatch_event(event) logging.info('Removed job %s', job_id) def remove_all_jobs(self, jobstore=None): """ Removes all jobs from the specified job store, or all job store if none is given. :param str|unicode jobstore: alias of the job store """ with self._jobstore_lock: self._pending_jobs = [] self._jobstore.remove_all_jobs() self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) def print_jobs(self, jobstore=None, out=None): """ print_jobs(jobstore=None, out=sys.stdout) Prints out a textual listing of all jobs currently scheduled on either all job store or just a specific one. :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all store :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given) """ out = out or sys.stdout with self._jobstore_lock: if self._pending_jobs: print(six.u('Pending jobs:'), file=out) for job, jobstore_alias, replace_existing in self._pending_jobs: print(six.u(' %s') % job, file=out) print(six.u('Jobstore %s:') % alias, file=out) jobs = self._jobstore.get_all_jobs() if jobs: for job in jobs: print(six.u(' %s') % job, file=out) else: print(six.u(' No scheduled jobs'), file=out) @abstractmethod def wakeup(self): """ Notifies the scheduler that there may be jobs due for execution. Triggers :meth:`_process_jobs` to be run in an implementation specific manner. """ # # Private API # def _configure(self, config): # Set general options logging = maybe_ref(config.pop( 'logger', None)) or getLogger('apscheduler.scheduler') # handler = logging.handlers.RotatingFileHandler('/opt/logs/windmill/app.log', maxBytes=104857600, backupCount=10, encoding='utf_8') # handler.setLevel(logging.INFO) # handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(name)s:%(lineno)d]')) # logging.addHandler(handler) #self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() self.timezone = get_localzone() # Set the job defaults job_defaults = config.get('job_defaults', {}) self._job_defaults = { 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), 'coalesce': asbool(job_defaults.get('coalesce', True)), 'max_instances': asint(job_defaults.get('max_instances', 1)) } def _create_default_executor(self): """Creates a default executor store, specific to the particular scheduler type.""" return ThreadPoolExecutor() def _create_default_jobstore(self): """Creates a default job store, specific to the particular scheduler type.""" return MysqlJobStore() def _lookup_job(self, job_id, jobstore=None): """ Finds a job by its ID. :type job_id: str :param str jobstore_alias: alias of a job store to look in :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job) :raises JobLookupError: if no job by the given ID is found. """ store = jobstore or self._jobstore # Check if the job is among the pending jobs for job, alias, replace_existing in self._pending_jobs: if job.id == job_id: return job, None # Look in all job store job = store.lookup_job(job_id) if job is not None: return job, 'default' raise JobLookupError(job_id) def _dispatch_event(self, event): """ Dispatches the given event to interested listeners. :param SchedulerEvent event: the event to send """ with self._listeners_lock: listeners = tuple(self._listeners) for cb, mask in listeners: if event.code & mask: try: cb(event) except: logging.exception('Error notifying listener') def _real_add_job(self, job, jobstore_alias, replace_existing, wakeup): """ :param Job job: the job to add :param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store :param bool wakeup: ``True`` to wake up the scheduler after adding the job """ # Fill in undefined values with defaults replacements = {} for key, value in six.iteritems(self._job_defaults): if not hasattr(job, key): replacements[key] = value # Calculate the next run time if there is none defined if not hasattr(job, 'next_run_time'): now = datetime.now(self.timezone) replacements['next_run_time'] = job.trigger.get_next_fire_time( None, now) # Apply any replacements job._modify(**replacements) # Add the job to the given job store try: self._jobstore.add_job(job) except ConflictingIdError: if replace_existing: print( "fail to add job into store cause of exists, try to replace it then." ) logging.error( "fail to add job into store cause of exists, try to replace it then." ) self._jobstore.update_job(job) else: raise # Mark the job as no longer pending job._jobstore_alias = jobstore_alias # Notify listeners that a new job has been added event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) self._dispatch_event(event) logging.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) # Notify the scheduler about the new job if wakeup: self.wakeup() def _create_plugin_instance(self, type_, alias, constructor_kwargs): """Creates an instance of the given plugin type, loading the plugin first if necessary.""" return type_(**constructor_kwargs) def _create_trigger(self, trigger, trigger_args): return trigger(**trigger_args) def _create_lock(self): """Creates a reentrant lock object.""" return RLock() def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. """ if not self._executor: logging.error('_executor init error') raise SchedulerInitError() if not self._dispatcher: logging.error('_dispatcher init error') raise SchedulerInitError() if not self._jobstore: logging.error('_jobstore init error') raise SchedulerInitError() logging.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None with self._jobstore_lock: for job in self._jobstore.get_due_jobs(now): run_times = job._get_run_times(now) #run_times = run_times[-1:] if run_times and job.coalesce else run_times # 0, restore just one time. 1. restore for every lack run_times = run_times[-1:] if len( run_times ) > 1 and job.conf.restore_strategy == 0 else run_times #logging.info('run_times after store_strategy processed: %s %s' % (run_times, job.conf.restore_strategy)) if run_times: code = None try: logging.info( "dispatch_job jobs %s cmd=%s run_time=%s" % (job.id, job.conf.cmd, run_times[-1])) code = self._dispatcher.dispatch(job, run_times) except MaxInstancesReachedError: logging.warning( 'Execution of job "%s" skipped: maximum number of running instances reached (%d)', job, job.max_instances) except Exception, e: logging.exception( 'Error dispatch job "%s" to dispatcher "%s"', job.id, str(e)) if code == DispatchCode.DONE: # Update the job if it has a next execution time. Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) #print('jjjjjjjjjjjjjjjjjjj job_id = %s job_next_run=%s previous_fire_time=%s, now=%s' % (job.id, job_next_run, run_times[-1], now)) if job_next_run: job._modify(next_run_time=job_next_run) self._jobstore.update_job(job) else: self._jobstore.remove_job(job.id) from apscheduler.history import add_log add_log( job.conf, output= 'job will NOT be run any more, so remove it.') logging.warn( 'job will NOT be run any more, so remove it. job_id=%s' % job.conf.id) # Set a new next wakeup time if there isn't one yet or the jobstore has an even earlier one jobstore_next_run_time = self._jobstore.get_next_run_time() logging.debug("jobstore_next_run_time %s " % jobstore_next_run_time) if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time # Determine the delay until this method should be called again if next_wakeup_time is not None: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) logging.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) else: wait_seconds = None logging.debug('No jobs; waiting until a job is added') return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: '''互斥操作''' # 获取job 的id id = job.id # 使用 redis setnx 进行互斥 status = RedisTool.setnx("%s.lock" % id, time.time()) # 成功存入redis key 后进行job的提交 if status: executor.submit_job(job, run_times) # 提交完成后设置redis key 过期时间为 900毫秒(周期最小1秒) RedisTool.pexpire("%s.lock" % id, 900) # 失败直接跳出,说明这个周期的job已经被执行过了 else: continue except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min( max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning('Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[-1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug('Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds