def get_or_create_job_execution(self, job, event): """ Create and return new job execution item. :param job: DjangoJob instance :param event: JobSubmissionEvent instance :return: JobExecution id """ # type: (DjangoJob, JobSubmissionEvent)->int # For blocking schedulers we first got FINISH event, and than - SUBMITTED event job_execution = DjangoJobExecution.objects.filter( job=job, run_time=serialize_dt( event.scheduled_run_times[0])).order_by("-id").first() if job_execution and job_execution.started is None: job_execution.started = time.time() job_execution.duration = float(job_execution.finished) - float( job_execution.started) job_execution.save() return job_execution.id return DjangoJobExecution.objects.create( job=job, status=DjangoJobExecution.SENT, started=time.time(), run_time=serialize_dt(event.scheduled_run_times[0])).id
def get_or_create_job_execution(self, job, event): """ Create and return new job execution item. :param job: DjangoJob instance :type job: django_apscheduler.models.DjangoJob :param event: JobSubmissionEvent instance :return: JobExecution id """ # type: (DjangoJob, JobSubmissionEvent)->int # For blocking schedulers we first got FINISH event, and than - SUBMITTED event job_execution = DjangoJobExecution.objects.filter( job=job, run_time=serialize_dt(event.scheduled_run_times[0]) ).order_by("-id").first() if job_execution and job_execution.started is None: job_execution.started = time.time() try: job_execution.duration = float(job_execution.finished) - float(job_execution.started) except: job_execution.duration = None job_execution.save() return job_execution.id return DjangoJobExecution.objects.create( job=job, status=DjangoJobExecution.SENT, started=time.time(), run_time=serialize_dt(event.scheduled_run_times[0]) ).id
def update_job(self, job): updated = DjangoJob.objects.filter(name=job.id).update( next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol)) LOGGER.debug("Update job %s: next_run_time=%s, job_state=%s", job, serialize_dt(job.next_run_time), job.__getstate__()) if updated == 0: LOGGER.info("Job with id %s not found", job.id) raise JobLookupError(job.id)
def add_job(self, job): dbJob, created = DjangoJob.objects.get_or_create( defaults=dict( next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol) ), name=job.id, ) if not created: LOGGER.warning("Job with id %s already in jobstore. I'll refresh it", job.id) dbJob.next_run_time = serialize_dt(job.next_run_time) dbJob.job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol) dbJob.save()
def add_job(self, job): dbJob, created = DjangoJob.objects.get_or_create( defaults=dict( next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol) ), name=job.id, ) if not created: LOGGER.warning("Job with id %s already in jobstore. I'll refresh it", job.id) dbJob.next_run_time = serialize_dt(job.next_run_time) dbJob.job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol) dbJob.save()
def add_job(self, job): if DjangoJob.objects.filter(name=job.id).exists(): raise ConflictingIdError(job.id) DjangoJob.objects.create(name=job.id, next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps( job.__getstate__(), self.pickle_protocol))
def update_job(self, job): updated = DjangoJob.objects.filter(name=job.id).update( next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol) ) LOGGER.debug( "Update job %s: next_run_time=%s, job_state=%s", job, serialize_dt(job.next_run_time), job.__getstate__() ) if updated == 0: LOGGER.info("Job with id %s not found", job.id) raise JobLookupError(job.id)
def get_due_jobs(self, now): LOGGER.debug("get_due_jobs for time=%s", now) try: out = self._get_jobs(next_run_time__lte=serialize_dt(now)) LOGGER.debug("Got %s", out) return out except: LOGGER.exception("Exception during getting jobs") return []
def get_due_jobs(self, now): LOGGER.debug("get_due_jobs for time=%s", now) try: out = self._get_jobs(next_run_time__lte=serialize_dt(now)) LOGGER.debug("Got %s", out) return out except: LOGGER.exception("Exception during getting jobs") return []
def register_job_executed(self, job, event): """ Registration of job execution status :param job: DjangoJob instance :param event: JobExecutionEvent instance :return: JobExecution id """ # type: (DjangoJobExecution, JobExecutionEvent)->int job_execution = DjangoJobExecution.objects.filter( job=job, status=DjangoJobExecution.SENT, run_time=serialize_dt(event.scheduled_run_time)).order_by( "id").last() # type: DjangoJobExecution if not job_execution: job_execution = DjangoJobExecution.objects.create( job=job, status=DjangoJobExecution.SENT, run_time=serialize_dt(event.scheduled_run_time)) if job_execution.finished: self.LOGGER.warning("Job already finished! %s", job_execution) return job_execution.finished = time.time() try: job_execution.duration = float(job_execution.finished) - float( job_execution.started) except: job_execution.duration = 0 job_execution.status = DjangoJobExecution.SUCCESS if event.exception: job_execution.exception = str(event.exception) job_execution.traceback = str(event.traceback) job_execution.status = DjangoJobExecution.ERROR job_execution.save()
def register_job_executed(self, job, event): """ Registration of job execution status :param job: DjangoJob instance :param event: JobExecutionEvent instance :return: JobExecution id """ # type: (DjangoJobExecution, JobExecutionEvent)->int job_execution = DjangoJobExecution.objects.filter( job=job, status=DjangoJobExecution.SENT, run_time=serialize_dt(event.scheduled_run_time) ).order_by("id").last() # type: DjangoJobExecution if not job_execution: job_execution = DjangoJobExecution.objects.create( job=job, status=DjangoJobExecution.SENT, run_time=serialize_dt(event.scheduled_run_time) ) if job_execution.finished: self.LOGGER.warning("Job already finished! %s", job_execution) return job_execution.finished = time.time() try: job_execution.duration = float(job_execution.finished) - float(job_execution.started) except: job_execution.duration = 0 job_execution.status = DjangoJobExecution.SUCCESS if event.exception: job_execution.exception = str(event.exception) job_execution.traceback = str(event.traceback) job_execution.status = DjangoJobExecution.ERROR job_execution.save()
def test_issue_15(db): """ This test covers bug from https://github.com/jarekwg/django-apscheduler/issues/15 """ storage = DjangoResultStorage() srt = datetime.datetime.now() job = DjangoJob.objects.create(name="test", next_run_time=datetime.datetime.now()) DjangoJobExecution.objects.create(job=job, run_time=serialize_dt(srt)) storage.get_or_create_job_execution( job, mock_compat.Mock(scheduled_run_times=[srt]))
def test_issue_15(db): """ This test covers bug from https://github.com/jarekwg/django-apscheduler/issues/15 """ storage = DjangoResultStorage() srt = datetime.datetime.now() job = DjangoJob.objects.create(name="test", next_run_time=datetime.datetime.now()) DjangoJobExecution.objects.create( job=job, run_time=serialize_dt(srt) ) storage.get_or_create_job_execution( job, mock_compat.Mock(scheduled_run_times=[srt]) )
def update_job(self, job): updated = DjangoJob.objects.filter(name=job.id).update( next_run_time=serialize_dt(job.next_run_time), job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol)) if updated == 0: raise JobLookupError(job.id)
def get_due_jobs(self, now): try: return self._get_jobs(next_run_time__lte=serialize_dt(now)) except: logging.exception("")