def test_handle_submission_event_not_supported_raises_exception( self, jobstore): event = JobSubmissionEvent(events.EVENT_ALL, "test_job", jobstore, [timezone.now()]) with pytest.raises(NotImplementedError): jobstore.handle_submission_event(event)
def test_handle_submission_event_for_job_that_no_longer_exists_does_not_raise_exception( self, jobstore): event = JobSubmissionEvent(events.EVENT_JOB_SUBMITTED, "finished_job", jobstore, [timezone.now()]) jobstore.handle_submission_event(event) assert not DjangoJobExecution.objects.filter( job_id=event.job_id).exists()
def test_handle_submission_event_creates_job_execution( self, event_code, jobstore, create_add_job): job = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) event = JobSubmissionEvent(event_code, job.id, jobstore, [timezone.now()]) jobstore.handle_submission_event(event) assert DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
def test_delete_old_job_executions(db, scheduler): register_events(scheduler) scheduler.add_job(job, trigger="interval", seconds=1, id="job_1") scheduler.add_job(job, trigger="interval", seconds=1, id="job_2") scheduler.start() now = datetime.datetime.now(utc) one_second_ago = now - datetime.timedelta(seconds=1) # Simulate scheduler._dispatch_event( JobExecutionEvent(4096, "job_1", None, one_second_ago)) scheduler._dispatch_event(JobExecutionEvent(4096, "job_2", None, now)) scheduler._dispatch_event( JobSubmissionEvent(32768, "job_1", None, [one_second_ago])) scheduler._dispatch_event(JobSubmissionEvent(32768, "job_2", None, [now])) assert DjangoJobExecution.objects.count() == 2 DjangoJobExecution.objects.delete_old_job_executions(1) assert DjangoJobExecution.objects.count() == 1
def test_job_events(db, scheduler): register_events(scheduler) scheduler.add_job(job, trigger="interval", seconds=1, id="job") scheduler.start() dj = DjangoJob.objects.last() dj.next_run_time -= datetime.timedelta(seconds=2) dj.save() now = datetime.datetime.now(utc) scheduler._dispatch_event(JobExecutionEvent(4096, "job", None, now)) scheduler._dispatch_event(JobSubmissionEvent(32768, "job", None, [now])) assert DjangoJobExecution.objects.count() == 1
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: '''互斥操作''' # 获取job 的id id = job.id # 使用 redis setnx 进行互斥 status = RedisTool.setnx("%s.lock" % id, time.time()) # 成功存入redis key 后进行job的提交 if status: executor.submit_job(job, run_times) # 提交完成后设置redis key 过期时间为 900毫秒(周期最小1秒) RedisTool.pexpire("%s.lock" % id, 900) # 失败直接跳出,说明这个周期的job已经被执行过了 else: continue except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min( max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): for job in jobstore.get_due_jobs(now): # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
def test_get_event_str_job_submission_event(self): event = JobSubmissionEvent("code", "job_id", "jobstore", []) message = ProgramyScheduler.get_event_str(event) self.assertIsNotNone(message) self.assertEqual("JobSubmissionEvent [code] [job_id] [jobstore] [[]]", message)
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ f = open("scheduler.lock", "wb") wait_seconds = None try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception as exc: f.close() else: if self.state == STATE_PAUSED: self._logger.debug( 'Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run and os.pid is {%s}' % os.getpid()) now = datetime.now(self.timezone) next_wakeup_time = None events = [] uuids = str(uuid.uuid1()) config = configs() is_single = config.get('is_single') if is_single and is_single != '1': if config.get('redis_pwd'): pool = redis.ConnectionPool( host=config.get('redis_host'), port=config.get('redis_port') or 6379, db=config.get('redis_db') or 0, password=config.get('redis_pwd')) else: pool = redis.ConnectionPool(host=config.get('redis_host'), port=config.get('redis_port') or 6379, db=config.get('redis_db') or 0) r = redis.Redis(connection_pool=pool) _result = r.get("SCHEDU_FLAG") if _result: if _result.decode() == uuids[0:2]: return r.setnx("SCHEDU_FLAG", uuids[0:2]) r.expire("SCHEDU_FLAG", 60 * 10) with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) self._logger.info("due_jobs:%s os.pid: %s\n" % (len(due_jobs), os.getpid())) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.update_cron_info(job.id) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent( EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent( EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: try: self.update_cron_info(job.id) self.remove_job(job.id, jobstore_alias) except: self._logger.error( 'Error remove job "%s" to executor "%s"', job, job.executor) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min( max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) if is_single and is_single != '1': r.delete("SCHEDU_FLAG") fcntl.flock(f, fcntl.LOCK_UN) f.close() return wait_seconds
def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems( self._jobstores): # 遍历每一个 job-store try: due_jobs = jobstore.get_due_jobs( now) # 获取当前 job-store 应该执行的job except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning( 'Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta( seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor( job.executor) # 找到该job对应的executor except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times( now) # 以当前时间为基准,检测当前调度应该执行多少次 run_times = run_times[ -1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) # 用该executor执行任务 except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except: self._logger.exception( 'Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time( run_times[-1], now) # 每一个job有一个next_run_time if job_next_run: job._modify(next_run_time=job_next_run ) # whats? are you kidding me? jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time( ) # 然后每一个jobstore也有一个next_run_time if jobstore_next_run_time and ( next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone( self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug( 'Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds