Beispiel #1
0
 def get_due_jobs(self, now: datetime) -> list:
     now_timestamp = datetime_to_utc_timestamp(now)
     pending = list()
     sorted_items = self._get_sorted_items()
     for item in sorted_items:
         _next_run_time = datetime_to_utc_timestamp(item.job.next_run_time)
         if _next_run_time is None or _next_run_time > now_timestamp:
             break
         item.bind_job(scheduler=self._scheduler, alias=self._alias)
         pending.append(item.job)
     return pending
Beispiel #2
0
    def _run_job_error(self, job, run_time, exc, traceback=None):
        """Called by the dispatcher with the exception if there is an error calling `run_job`."""
#         with self._lock:
#             indices = [i for i, (f, rt) in enumerate(self._state[job.id]) if rt == run_time]
#             for i in indices:
#                 del self._state[job.id][i]
 
        exc_info = (exc.__class__, exc, traceback)
        logging.error('Error running job %s run_time %s' % (job.id, run_time), exc_info=exc_info)
        
        cost_ms = int ((datetime_to_utc_timestamp(datetime.now( get_localzone())) - datetime_to_utc_timestamp(run_time)) * 1000)
        add_result(job.conf, datetime_to_utc_timestamp(run_time), result=1, output=exc_info, cost_ms=cost_ms)
Beispiel #3
0
    def _run_job_success(self, job, run_time, events):
        """Called by the dispatcher with the list of generated events when `run_job` has been successfully called."""
#         with self._lock:
#             #self._state[job.id].remove((f, run_time))
#             indices = [i for i, (f, rt) in enumerate(self._state[job.id]) if rt == run_time]
#             for i in indices:
#                 del self._state[job.id][i]
        
        cost_ms = int ((datetime_to_utc_timestamp(datetime.now( get_localzone())) - datetime_to_utc_timestamp(run_time)) * 1000)  
        for event in events:
            self._scheduler._dispatch_event(event)
            
        add_result(job.conf, datetime_to_utc_timestamp(run_time), result=0, output='ok', cost_ms=cost_ms)
Beispiel #4
0
    def get_due_jobs(self, now):
        try:
            timestamp = datetime_to_utc_timestamp(now)
            return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
        except SQLAlchemyError as e:
            if isinstance(e.orig, InvalidRequestError):
               self.session.rollback()
            elif not isinstance(e, OperationalError):
               raise

            del self.engine 
            self.engine = create_engine(self.url)
            timestamp = datetime_to_utc_timestamp(now)
            return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
     if job_ids:
         job_states = self.redis.hmget(self.jobs_key, *job_ids)
         return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
     return []
Beispiel #6
0
 def update_job(self, job):
     job.conf.next_run_time = datetime_to_utc_timestamp(job.next_run_time)
     logging.debug('job %s update next_run_time to %s %s cmd=%s' % (job.conf.id, job.conf.next_run_time, job.next_run_time, job.conf.cmd))
     
     update = self.wm_jobs_t.update().values(**{
         'cmd': job.conf.cmd,
         'cron_str': job.conf.cron_str,
         'name': job.conf.name,
         'desc': job.conf.desc,
         'mails': job.conf.mails,
         'phones': job.conf.phones,
         'team': job.conf.team,
         'owner': job.conf.owner,
         'hosts': job.conf.hosts,
         'host_strategy': job.conf.host_strategy,
         'restore_strategy': job.conf.restore_strategy,
         'retry_strategy': job.conf.retry_strategy,
         'error_strategy': job.conf.error_strategy,
         'exist_strategy': job.conf.exist_strategy,
         'running_timeout_s': job.conf.running_timeout_s,
         'status': job.conf.status,
         'modify_time': job.conf.modify_time,
         'modify_user': job.conf.modify_user,
         'create_time': job.conf.create_time,
         'create_user': job.conf.create_user,
         'start_date': job.conf.start_date,
         'end_date': job.conf.end_date,
         'oupput_match_reg': job.conf.oupput_match_reg,
         'next_run_time': job.conf.next_run_time
     }).where(self.wm_jobs_t.c.id == job.id)
     result = self.engine.execute(update)
     if result.rowcount == 0:
         raise JobLookupError(id)
Beispiel #7
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     documents = []
     snapshots = self.collection.where(u'next_run_time', u'<=', timestamp).get()
     for snap in snapshots:
         documents.append(snap.to_dict())
     return self._reconstituteAndAppendJobs(documents)
Beispiel #8
0
    def add_job(self, job):
        if isinstance(job, Job):
            if job.id:
                job_exist = list(self.table.get_all(job.id).run(self.conn))
                if job_exist:
                    job_exist = job_exist[0]
            else:
                job_exist = None
        else:
            job_exist = None

        if not job_exist:
            job_dict = {}
            job_dict['id'] = job.id
            job_dict['job_state'] = (
                pickle
                .dumps(job.__getstate__(), self.pickle_protocol)
                .encode("zip")
                .encode("base64")
                .strip()
            )
            job_dict['next_run_time'] = (
                datetime_to_utc_timestamp(job.next_run_time)
            )

            results = self.table.insert(job_dict).run(self.conn)
            if results['errors'] > 0:
                raise ConflictingIdError(job.id)
        else:
            raise ConflictingIdError(job)
    def _encode_job(self, job: Job):
        job_state = job.__getstate__()
        job_state["kwargs"]["config"] = None
        job_state["kwargs"]["bot"] = None
        # new_kwargs = job_state["kwargs"]
        # new_kwargs["config"] = None
        # new_kwargs["bot"] = None
        # job_state["kwargs"] = new_kwargs
        encoded = base64.b64encode(
            pickle.dumps(job_state, self.pickle_protocol))
        out = {
            "_id": job.id,
            "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
            "job_state": encoded.decode("ascii"),
        }
        job_state["kwargs"]["config"] = self.config
        job_state["kwargs"]["bot"] = self.bot
        # new_kwargs = job_state["kwargs"]
        # new_kwargs["config"] = self.config
        # new_kwargs["bot"] = self.bot
        # job_state["kwargs"] = new_kwargs
        # log.debug(f"Encoding job id: {job.id}\n"
        #           f"Encoded as: {out}")

        return out
Beispiel #10
0
 def get_next_run_time(self) -> Optional[datetime]:
     sorted_items = self._get_sorted_items()
     for item in sorted_items:
         _next_run_time = datetime_to_utc_timestamp(item.job.next_run_time)
         if _next_run_time is not None:
             return utc_timestamp_to_datetime(_next_run_time)
     return None
Beispiel #11
0
 def dispatch(self, job, run_times):
     # none sense to run in repeat
     if len(run_times) > 1:
         run_times = run_times[-1:]
     run_time = run_times[0] 
     
     # allow hosts
     hosts = re.split('[,;,; \\r\\n]', job.conf.hosts) # 里面有两个中文字符, 2 chinese symbol included
     if self._local_hostname not in hosts:
         logging.error('jog ignored. local ip %s not in hosts %s' % (self._local_hostname, job.conf.hosts ))
         return DispatchCode.IGNORED
     
     #logging.error('run_time %s' % run_time)
     # run in just one of these hosts
     if job.conf.host_strategy == 1: 
         if not self._zk.client_state == KazooState.CONNECTED:
             msg = 'zk state is %s at host %s' % (self._zk.client_state, self.ip )
             logging.error(msg)
             add_log(job.conf, run_time=datetime_to_utc_timestamp(run_time), output=msg)
             return DispatchCode.FAIL_TO_DISPATCH
         
         else:    
             if self._zk.exists(self._get_job_running_path(job)):
                 data, stat = self._zk.get(self._get_job_running_path(job))
                 logging.info('job ignored cause of exist_strategy=1 and the last running still going on. id=%s name=%s run_time=%s host=%s zk data:%s' % (job.id, job.conf.name, run_time, self.ip, data.decode("utf-8") ))
                 run_time = None
                 return DispatchCode.IGNORED
             else:
                 try:
                     self._zk.ensure_path(self._zk_root)
                     self._zk.create(self._get_job_running_path(job), json.dumps({'job': job.id, 'host': self._local_hostname , 'run_time': run_time}), ephemeral=True)
                 except Exception, e:
                     logging.error('job ignored cause of fail to create zk ephemeral node. id=%s name=%s run_time=%s host=%s zk path:%s' % (job.id, job.conf.name, run_time, self.ip, self._get_job_running_path(job) ))
                     run_time = None
                     return DispatchCode.FAIL_TO_DISPATCH
Beispiel #12
0
 def update_job(self, job):
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
     }
     result = self.collection.document(job.id).update(changes)
     print(result)
Beispiel #13
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
     if job_ids:
         job_states = self.redis.hmget(self.jobs_key, *job_ids)
         return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
     return []
Beispiel #14
0
    def add_job(self, job):
        if isinstance(job, Job):
            if job.id:
                job_exist = list(self.table.get_all(job.id).run(self.conn))
                if job_exist:
                    job_exist = job_exist[0]
            else:
                job_exist = None
        else:
            job_exist = None

        if not job_exist:
            job_dict = {}
            job_dict['id'] = job.id
            job_dict['job_state'] = (pickle.dumps(
                job.__getstate__(),
                self.pickle_protocol).encode("zip").encode("base64").strip())
            job_dict['next_run_time'] = (datetime_to_utc_timestamp(
                job.next_run_time))

            results = self.table.insert(job_dict).run(self.conn)
            if results['errors'] > 0:
                raise ConflictingIdError(job.id)
        else:
            raise ConflictingIdError(job)
 def update_job(self, job):
     update = self.jobs_t.update().values(**{
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
     }).where(self.jobs_t.c.id == job.id)
     result = self.engine.execute(update)
     if result.rowcount == 0:
         raise JobLookupError(id)
Beispiel #16
0
    def get_due_jobs(self, now):
        if now:
            timestamp = datetime_to_utc_timestamp(now)
            search = (lambda x: x['next_run_time'] <= timestamp)
        else:
            search = None

        return self._get_jobs(search)
Beispiel #17
0
    def add_job(self, job):
        if job.id in self._jobs_index:
            raise ConflictingIdError(job.id)

        timestamp = datetime_to_utc_timestamp(job.next_run_time)
        index = self._get_job_index(timestamp, job.id)
        self._jobs.insert(index, (job, timestamp))
        self._jobs_index[job.id] = (job, timestamp)
Beispiel #18
0
 def update_job(self, job):
     try:
         e = Scheduler.find(self.sess, job.id)
     except sa.orm.exc.NoResultFound():
         raise JobLookupError(id)
     e.next_time = datetime_to_utc_timestamp(job.next_run_time)
     e.job_state = pickle.dumps(job.__getstate__(), self.pickle_protocol)
     self.sess.flush()
Beispiel #19
0
 def add_job(self, job):
     e = Scheduler.create(self.sess, job.id, self.user_id)
     e.next_time = datetime_to_utc_timestamp(job.next_run_time)
     e.job_state = pickle.dumps(job.__getstate__(), self.pickle_protocol)
     try:
         self.sess.flush()
     except sa.exc.IntegrityError:
         raise ConflictingIdError(job['job'])
Beispiel #20
0
    def get_due_jobs(self, now):
        if now:
            timestamp = datetime_to_utc_timestamp(now)
            search = (lambda x: x['next_run_time'] <= timestamp)
        else:
            search = None

        return self._get_jobs(search)
Beispiel #21
0
 def update_job(self, job):
     try:
         e = Scheduler.find(self.sess, job.id)
     except sa.orm.exc.NoResultFound():
         raise JobLookupError(id)
     e.next_time = datetime_to_utc_timestamp(job.next_run_time)
     e.job_state = pickle.dumps(job.__getstate__(), self.pickle_protocol)
     self.sess.flush()
Beispiel #22
0
 def update_job(self, job):
     update = self.jobs_t.update().values(**{
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': dill.dumps(job.__getstate__(), self.pickle_protocol)
     }).where(self.jobs_t.c.id == job.id)
     result = self.engine.execute()
     if result.rowcount == 0:
         raise JobLookupError(id)
Beispiel #23
0
 def update_job(self, job):
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     result = self.collection.update({'_id': job.id}, {'$set': changes})
     if result and result['n'] == 0:
         raise JobLookupError(id)
Beispiel #24
0
 def update_job(self, job):
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     result = self.collection.update({'_id': job.id}, {'$set': changes})
     if result and result['n'] == 0:
         raise JobLookupError(id)
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     jobs = [
         job_def['job'] for job_def in self._get_jobs()
         if job_def['next_run_time'] is not None
         and job_def['next_run_time'] <= timestamp
     ]
     return jobs
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     return self._get_jobs({
         'next_run_time': {
             '$lte': timestamp
         },
         'ip': self.ip
     })
Beispiel #27
0
    def add_job(self, job):
        if job.id in self._jobs_index:
            raise ConflictingIdError(job.id)

        timestamp = datetime_to_utc_timestamp(job.next_run_time)
        index = self._get_job_index(timestamp, job.id)
        self._jobs.insert(index, (job, timestamp))
        self._jobs_index[job.id] = (job, timestamp)
Beispiel #28
0
 def add_job(self, job):
     e = Scheduler.create(self.sess, job.id, self.user_id)
     e.next_time = datetime_to_utc_timestamp(job.next_run_time)
     e.job_state = pickle.dumps(job.__getstate__(), self.pickle_protocol)
     try:
         self.sess.flush()
     except sa.exc.IntegrityError:
         raise ConflictingIdError(job['job'])
Beispiel #29
0
 def add_job(self, job):
     try:
         self.collection.insert({
             '_id': job.id,
             'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
             'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
         })
     except DuplicateKeyError:
         raise ConflictingIdError(job.id)
Beispiel #30
0
    def get_due_jobs(self, now):
        now_timestamp = datetime_to_utc_timestamp(now)
        pending = []
        for job, timestamp in self._jobs:
            if timestamp is None or timestamp > now_timestamp:
                break
            pending.append(job)

        return pending
Beispiel #31
0
 def add_job(self, job):
     try:
         self.collection.insert({
             '_id': job.id,
             'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
             'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
         })
     except DuplicateKeyError:
         raise ConflictingIdError(job.id)
Beispiel #32
0
    def get_due_jobs(self, now):
        now_timestamp = datetime_to_utc_timestamp(now)
        pending = []
        for job, timestamp in self._jobs:
            if timestamp is None or timestamp > now_timestamp:
                break
            pending.append(job)

        return pending
Beispiel #33
0
 def add_job(self, job):
     try:
         self.collection.document(job.id).set({
                 '_id': job.id,
                 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
                 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
         })
     except:
         print("error")
Beispiel #34
0
    def add_job(self, job):
        if self.redis.hexists(self.jobs_key, job.id):
            raise ConflictingIdError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.multi()
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            pipe.execute()
Beispiel #35
0
 def add_job(self, job: Job):
     if job.id in self._jobs_index:
         raise ConflictingIdError(job.id)
     # log.debug(f"Check job args: {job.args=}")
     timestamp = datetime_to_utc_timestamp(job.next_run_time)
     index = self._get_job_index(timestamp, job.id)  # This is fine
     self._jobs.insert(index, (job, timestamp))
     self._jobs_index[job.id] = (job, timestamp)
     asyncio.create_task(self._async_add_job(job, index, timestamp))
Beispiel #36
0
 def update_job(self, job):
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     results = self.table.get_all(job.id).update(changes).run(self.conn)
     skipped = False in map(lambda x: results[x] == 0, results.keys())
     if results['skipped'] > 0 or results['errors'] > 0 or not skipped:
         raise JobLookupError(job.id)
Beispiel #37
0
 def add_job(self, job):
     job_dict = {
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     results = self.table.insert(job_dict).run(self.conn)
     if results['errors'] > 0:
         raise ConflictingIdError(job.id)
Beispiel #38
0
    def _run_job_success(self, job, run_time, events):
        """Called by the dispatcher with the list of generated events when `run_job` has been successfully called."""
        #         with self._lock:
        #             #self._state[job.id].remove((f, run_time))
        #             indices = [i for i, (f, rt) in enumerate(self._state[job.id]) if rt == run_time]
        #             for i in indices:
        #                 del self._state[job.id][i]

        cost_ms = int(
            (datetime_to_utc_timestamp(datetime.now(get_localzone())) -
             datetime_to_utc_timestamp(run_time)) * 1000)
        for event in events:
            self._scheduler._dispatch_event(event)

        add_result(job.conf,
                   datetime_to_utc_timestamp(run_time),
                   result=0,
                   output='ok',
                   cost_ms=cost_ms)
Beispiel #39
0
 def update_job(self, job):
     q = self.job_model.update(
         **{
             'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
             'job_state': pickle.dumps(job.__getstate__(),
                                       self.pickle_protocol)
         }).where(self.job_model.id == job.id)
     rowcount = q.execute()
     if rowcount == 0:
         raise JobLookupError(job.id)
 def add_job(self, job):
     insert = self.jobs_t.insert().values(**{
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
     })
     try:
         self.engine.execute(insert)
     except IntegrityError:
         raise ConflictingIdError(job.id)
    def update_job(self, job):
        update = self.jobs_t.update().values(**{
            'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
            'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol),
#            'job_json': json.dumps(job.__getstate__(), cls=JobEncoder, ensure_ascii=False)
            'job_json': ''
        }).where(self.jobs_t.c.id == job.id)
        result = self.engine.execute(update)
        if result.rowcount == 0:
            raise JobLookupError(id)
Beispiel #42
0
 def add_job(self, job):
     insert = self.jobs_t.insert().values(**{
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': dill.dumps(job.__getstate__(), self.pickle_protocol)
     })
     try:
         self.engine.execute()
     except IntegrityError:
         raise ConflictingIdError(job.id)
Beispiel #43
0
    def _run_job_error(self, job, run_time, exc, traceback=None):
        """Called by the dispatcher with the exception if there is an error calling `run_job`."""
        #         with self._lock:
        #             indices = [i for i, (f, rt) in enumerate(self._state[job.id]) if rt == run_time]
        #             for i in indices:
        #                 del self._state[job.id][i]

        exc_info = (exc.__class__, exc, traceback)
        logging.error('Error running job %s run_time %s' % (job.id, run_time),
                      exc_info=exc_info)

        cost_ms = int(
            (datetime_to_utc_timestamp(datetime.now(get_localzone())) -
             datetime_to_utc_timestamp(run_time)) * 1000)
        add_result(job.conf,
                   datetime_to_utc_timestamp(run_time),
                   result=1,
                   output=exc_info,
                   cost_ms=cost_ms)
Beispiel #44
0
    def remove_job(self, job_id):
        job_id = bytes(job_id)
        if job_id not in self.jobs:
            raise JobLookupError(job_id)

        job_state = self.jobs.get(job_id)
        job = self._reconstitute_job(job_state)
        self.jobs.delete(job_id)

        ts = bytes(int(datetime_to_utc_timestamp(job.next_run_time)))
        self.run_times(ts)
Beispiel #45
0
    def update_job(self, job):
        if not self.redis.hexists(self.jobs_key, job.id):
            raise JobLookupError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            else:
                pipe.zrem(self.run_times_key, job.id)
            pipe.execute()
Beispiel #46
0
 def add_job(self, job):
     self._ensure_paths()
     node_path = os.path.join(self.path,  str(job.id))
     value = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': job.__getstate__()
     }
     data = pickle.dumps(value, self.pickle_protocol)
     try:
         self.client.create(node_path, value=data)
     except NodeExistsError:
         raise ConflictingIdError(job.id)
Beispiel #47
0
 def update_job(self, job):
     self._ensure_paths()
     node_path = os.path.join(self.path,  str(job.id))
     changes = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': job.__getstate__()
     }
     data = pickle.dumps(changes, self.pickle_protocol)
     try:
         self.client.set(node_path, value=data)
     except NoNodeError:
         raise JobLookupError(job.id)
Beispiel #48
0
 def _serialize_job(self, job):
     schedule = {f.name: str(f) for f in job.trigger.fields}
     return {
         'id': job.id,
         'name': job.name,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'task': job.args[0],
         'args': job.args[1:],
         'enabled': job.next_run_time is not None,
         'hidden': job.kwargs['hidden'],
         'protected': job.kwargs['protected'],
         'schedule': schedule
     }
Beispiel #49
0
    def update_job(self, job):
        old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
        if old_job is None:
            raise JobLookupError(job.id)

        # If the next run time has not changed, simply replace the job in its present index.
        # Otherwise, reinsert the job to the list to preserve the ordering.
        old_index = self._get_job_index(old_timestamp, old_job.id)
        new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
        if old_timestamp == new_timestamp:
            self._jobs[old_index] = (job, new_timestamp)
        else:
            del self._jobs[old_index]
            new_index = self._get_job_index(new_timestamp, job.id)
            self._jobs.insert(new_index, (job, new_timestamp))

        self._jobs_index[old_job.id] = (job, new_timestamp)
Beispiel #50
0
 def update_job(self, job):
     document = {}
     if isinstance(job, Job):
         next_run_time = (
             datetime_to_utc_timestamp(job.next_run_time)
         )
         document['job_state'] = (
             pickle
             .dumps(job.__getstate__(), self.pickle_protocol)
             .encode("zip")
             .encode("base64")
             .strip()
         )
         document['next_run_time'] = next_run_time
         results = self.table.get_all(job.id).update(document).run(self.conn)
         skipped = False in map(lambda x: results[x] == 0, results.keys())
         if results['skipped'] > 0 or results['errors'] > 0 or not skipped:
             raise JobLookupError(job.id)
     else:
         raise JobLookupError(job.id)
Beispiel #51
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     jobs = [job_def['job'] for job_def in self._get_jobs()
             if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp]
     return jobs
Beispiel #52
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     return self._get_jobs(('next_run_time', '<=', timestamp))
Beispiel #53
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
Beispiel #54
0
 def get_due_jobs(self, now):
     return self._get_jobs(r.row['next_run_time'] <= datetime_to_utc_timestamp(now))
Beispiel #55
0
def test_datetime_to_utc_timestamp(timezone):
    dt = timezone.localize(datetime(2014, 3, 12, 5, 40, 13, 254012))
    timestamp = datetime_to_utc_timestamp(dt)
    dt2 = utc_timestamp_to_datetime(timestamp)
    assert dt2 == dt
Beispiel #56
0
 def get_due_jobs(self, now):
     timestamp = datetime_to_utc_timestamp(now)
     return self._get_jobs({'next_run_time': {'$lte': timestamp}})