def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job
async def _decode_job(self, in_job): if in_job is None: return None job_state = in_job["job_state"] job_state = pickle.loads(base64.b64decode(job_state)) if job_state["args"]: # Backwards compatibility on args to kwargs job_state["kwargs"] = {**job_state["args"][0]} job_state["args"] = [] job_state["kwargs"]["config"] = self.config job_state["kwargs"]["bot"] = self.bot # new_kwargs = job_state["kwargs"] # new_kwargs["config"] = self.config # new_kwargs["bot"] = self.bot # job_state["kwargs"] = new_kwargs job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias # task_name, guild_id = _disassemble_job_id(job.id) # task = Task(task_name, guild_id, self.config) # await task.load_from_config() # save_task_objects.append(task) # # job.func = task.execute # log.debug(f"Decoded job id: {job.id}\n" # f"Decoded as {job_state}") return job
def db_to_scheduler(document, scheduler, alias="beer_garden"): """Convert a database job to a scheduler's job.""" job = APJob.__new__(APJob) if document.next_run_time: next_run_time = utc.localize(document.next_run_time) else: next_run_time = None state = { "id": document.id, "func": "brew_view.scheduler:run_job", "trigger": construct_trigger(document.trigger_type, document.trigger), "executor": "default", "args": (), "kwargs": { "request_template": document.request_template, "job_id": str(document.id), }, "name": document.name, "misfire_grace_time": document.misfire_grace_time, "coalesce": document.coalesce, "max_instances": document.max_instances, "next_run_time": next_run_time, } job.__setstate__(state) job._scheduler = scheduler job._jobstore_alias = alias return job
def construct_job(job: Job, scheduler, alias="beer_garden"): """Convert a Beergarden job to an APScheduler one.""" if job is None: return None trigger = construct_trigger(job.trigger_type, job.trigger) next_run_time = utc.localize(job.next_run_time) if job.next_run_time else None ap_job = APJob.__new__(APJob) ap_job._scheduler = scheduler ap_job._jobstore_alias = alias ap_job.__setstate__( { "id": job.id, "func": "beer_garden.scheduler:run_job", "trigger": trigger, "executor": "default", "args": (), "kwargs": {"request_template": job.request_template, "job_id": job.id}, "name": job.name, "misfire_grace_time": job.misfire_grace_time, "coalesce": job.coalesce, "max_instances": job.max_instances, "next_run_time": next_run_time, } ) return ap_job
def db_to_scheduler(document, scheduler, alias='beer_garden'): """Convert a database job to a scheduler's job.""" job = APJob.__new__(APJob) if document.next_run_time: next_run_time = utc.localize(document.next_run_time) else: next_run_time = None state = { 'id': document.id, 'func': 'brew_view.scheduler.runner:run_job', 'trigger': construct_trigger(document.trigger_type, document.trigger), 'executor': 'default', 'args': (), 'kwargs': { 'request_template': document.request_template, 'job_id': str(document.id), }, 'name': document.name, 'misfire_grace_time': document.misfire_grace_time, 'coalesce': document.coalesce, 'max_instances': 3, 'next_run_time': next_run_time, } job.__setstate__(state) job._scheduler = scheduler job._jobstore_alias = alias return job
def _reconstitute_job(self, job_state): from apscheduler.job import Job job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job
def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler # pylint: disable=protected-access job._jobstore_alias = self._alias # pylint: disable=protected-access return job
def _reconstitute_job(self, row): ''' code gen by shell cmd: cat a | awk -F '=' '{print $1}' | cut -c5- | awk '{ print "job."$1" = row."$1}' what in file a is the wm_jobs_t create statement which can be found in the current source code file ''' conf = JobConf() conf.id = row.id conf.cmd = row.cmd conf.cron_str = row.cron_str conf.name = row.name conf.desc = row.desc conf.mails = row.mails conf.phones = row.phones conf.team = row.team conf.owner = row.owner conf.hosts = row.hosts conf.host_strategy = row.host_strategy conf.restore_strategy = row.restore_strategy conf.retry_strategy = row.retry_strategy conf.error_strategy = row.error_strategy conf.exist_strategy = row.exist_strategy conf.running_timeout_s = row.running_timeout_s conf.status = row.status conf.modify_time = row.modify_time conf.modify_user = row.modify_user conf.create_time = row.create_time conf.create_user = row.create_user conf.start_date = row.start_date conf.end_date = row.end_date conf.oupput_match_reg = row.oupput_match_reg conf.next_run_time = row.next_run_time job = Job.__new__(Job) job.conf = conf job.id = job.conf.id job._scheduler = self._scheduler job._jobstore_alias = self._alias job.trigger = self._create_trigger_by_conf(job) t = apscheduler.util.local_timestamp_to_datetime( conf.next_run_time) if conf.next_run_time > 0 else None t = apscheduler.util.convert_to_ware_datetime(t, get_localzone(), 'conf.next_run_time') state = { 'version': 1, 'conf': conf, 'id': conf.id, 'name': conf.name, 'next_run_time': t, } job.__setstate__(state) return job
def _reconstitute_job(self, job_state): try: job_state = pickle.loads(job_state) except UnicodeDecodeError: # Unpickle py2 objects job_state = pickle.loads(job_state, encoding='latin1') job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job
def load_jobs(self): jobs = [] for row in self.engine.execute(select([self.jobs_t])): try: job = Job.__new__(Job) job_dict = dict(row.items()) job.__setstate__(job_dict) jobs.append(job) except Exception: job_name = job_dict.get("name", "(unknown)") logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
def load_jobs(self): jobs = [] for job_dict in itervalues(self.store): try: job = Job.__new__(Job) job.__setstate__(job_dict) jobs.append(job) except Exception: job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
def load_jobs(self): jobs = [] for row in self.engine.execute(select([self.jobs_t])): try: job = Job.__new__(Job) job_dict = dict(row.items()) job.__setstate__(job_dict) jobs.append(job) except Exception: job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
def _reconstitute_job(self, row): ''' code gen by shell cmd: cat a | awk -F '=' '{print $1}' | cut -c5- | awk '{ print "job."$1" = row."$1}' what in file a is the wm_jobs_t create statement which can be found in the current source code file ''' conf = JobConf() conf.id = row.id conf.cmd = row.cmd conf.cron_str = row.cron_str conf.name = row.name conf.desc = row.desc conf.mails = row.mails conf.phones = row.phones conf.team = row.team conf.owner = row.owner conf.hosts = row.hosts conf.host_strategy = row.host_strategy conf.restore_strategy = row.restore_strategy conf.retry_strategy = row.retry_strategy conf.error_strategy = row.error_strategy conf.exist_strategy = row.exist_strategy conf.running_timeout_s = row.running_timeout_s conf.status = row.status conf.modify_time = row.modify_time conf.modify_user = row.modify_user conf.create_time = row.create_time conf.create_user = row.create_user conf.start_date = row.start_date conf.end_date = row.end_date conf.oupput_match_reg = row.oupput_match_reg conf.next_run_time = row.next_run_time job = Job.__new__(Job) job.conf = conf job.id = job.conf.id job._scheduler = self._scheduler job._jobstore_alias = self._alias job.trigger = self._create_trigger_by_conf(job) t = apscheduler.util.local_timestamp_to_datetime(conf.next_run_time) if conf.next_run_time > 0 else None t = apscheduler.util.convert_to_ware_datetime(t, get_localzone(), 'conf.next_run_time' ) state = { 'version': 1, 'conf': conf, 'id': conf.id, 'name': conf.name, 'next_run_time': t, } job.__setstate__(state) return job
def _db_to_job(self, row): if row['trigger_type'] == 'date': trigger = DateTrigger(run_date=row['run_date']) if row['trigger_type'] == 'cron': keys = row['crontab'].split(',')[1] values = row['crontab'].split(',')[0].split(' ') cronMapRev = {v: k for k, v in cronMap.items()} crontab = {cronMapRev[k]: values[i] for i, k in enumerate(keys)} trigger = CronTrigger(**crontab) if row['trigger_type'] == 'interval': trigger = IntervalTrigger(seconds=row['interval']) job = Job.__new__(Job) job.__setstate__({ 'id': row['id'], 'name': row['name'], 'func': row['func'], 'args': json.loads(row['args']) if row['args'] else [], 'kwargs': json.loads(row['kwargs']) if row['kwargs'] else {}, 'version': 1, 'trigger': trigger, 'executor': row['executor'], 'start_date': row['start_date'], 'end_date': row['end_date'], 'next_run_time': utc_timestamp_to_datetime(row['next_run_time'].timestamp()), 'coalesce': row['coalesce'], 'misfire_grace_time': row['misfire_grace_time'], 'max_instances': row['max_instances'], 'jobstore': self, }) job._scheduler = self._scheduler job._jobstore_alias = self._alias print(job._scheduler) print(job._jobstore_alias) return job
def _reconstitute_job(self, job_state, job_json): #print("_______") if self.use_json == True: job_state = json.loads(job_json, cls=JobDecoder) else: job_state = pickle.loads(job_state) #print(job_state) #print(json.loads(job_json,cls=JobDecoder)) #print("--------") job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job
def load_jobs(self): jobs = [] for job_dict in self.collection.find(): try: job = Job.__new__(Job) job_dict['id'] = job_dict.pop('_id') job_dict['trigger'] = pickle.loads(job_dict['trigger']) job_dict['args'] = pickle.loads(job_dict['args']) job_dict['kwargs'] = pickle.loads(job_dict['kwargs']) job.__setstate__(job_dict) jobs.append(job) except Exception: job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
def load_jobs(self): jobs = [] for row in self.engine.execute(self.jobs_t.select()): try: job = Job.__new__(Job) job_dict = dict(row.items()) job.__setstate__(job_dict) jobs.append(job) except Exception as e: print e traceback.print_exc(e) job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs return jobs
def load_jobs(self): #continue standart execution jobs = [] for job_dict in self.collection.find({'crecord_type': 'schedule'}): try: job = Job.__new__(Job) if job_dict['aaa_owner'] != 'root': if job_dict['kwargs']['task'] != 'task_reporting': raise ValueError( "User %s isn\'t allow to run task %s" % (job_dict['aaa_owner'], job_dict['kwargs']['task'])) #keep memory of id job_dict_id = job_dict['_id'] job_dict['id'] = job_dict.pop('_id') if job_dict.has_key('runs'): job_dict['runs'] = job_dict['runs'] else: job_dict['runs'] = 0 job_dict['coalesce'] = False #try to get interval try: if job_dict['interval'] != None: job_dict['trigger'] = IntervalTrigger( timedelta(**job_dict['interval'])) except Exception, err: pass #try to get simple try: if job_dict['date'] != None: job_dict['trigger'] = SimpleTrigger( datetime(*job_dict['date'])) except Exception, err: pass #try to get crontab try: if job_dict['cron'] != None: job_dict['trigger'] = CronTrigger(**job_dict['cron']) except Exception, err: pass
def pop(self, now): item = self.redis.rpop(self.key) if item is None: return 0, '', None try: job_id, change_type, job_str = item.split('||') job_id = int(job_id) if job_str == 'None': job = None else: job_state = pickle.loads(job_str) job = Job.__new__(Job) job.__setstate__(job_state) job.compute_next_run_time(now) return job_id, change_type, job except: logger=logging.getLogger('cron.backend') logger.exception('sync item invalid') return 0, ''
def load_jobs(self): #continue standart execution jobs = [] for job_dict in self.collection.find({'crecord_type': 'schedule'}): try: job = Job.__new__(Job) if job_dict['aaa_owner'] != 'root': if job_dict['kwargs']['task'] != 'task_reporting': raise ValueError("User %s isn\'t allow to run task %s" % (job_dict['aaa_owner'],job_dict['kwargs']['task'])) #keep memory of id job_dict_id = job_dict['_id'] job_dict['id'] = job_dict.pop('_id') if job_dict.has_key('runs'): job_dict['runs'] = job_dict['runs'] else: job_dict['runs'] = 0 job_dict['coalesce'] = False #try to get interval try: if job_dict['interval'] != None: job_dict['trigger'] = IntervalTrigger(timedelta(**job_dict['interval'])) except Exception, err: pass #try to get simple try: if job_dict['date'] != None: job_dict['trigger'] = SimpleTrigger( datetime(*job_dict['date'])) except Exception, err: pass #try to get crontab try: if job_dict['cron'] != None: job_dict['trigger'] = CronTrigger(**job_dict['cron']) except Exception, err: pass
def get_job(self, id): select = self.jobs_t.select().where(self.jobs_t.c.id == id) try: row = self.engine.execute(select).fetchone() except Exception as e: #todo logger.exception(e) if row: try: job = Job.__new__(Job) job_dict = dict(row.items()) job.__setstate__(job_dict) return job except Exception: job_name = job_dict.get('name', 'unknown') logger.exception("Unable to restore job '%s'", job_name) return None
def _prepare_job(job: APSJob) -> APSJob: """ Erase all unpickable data from telegram.ext.Job Args: job (:obj:`apscheduler.job`): The job to be processed. """ # make new job which is copy of actual job cause # modifying actual job also modifies jobs in threadpool # executor which are currently running/going to run and # we'll get incorrect argument instead of CallbackContext. prepped_job = APSJob.__new__(APSJob) prepped_job.__setstate__(job.__getstate__()) # remove CallbackContext from job args since # it includes refrences to dispatcher which # is unpickleable. we'll recreate CallbackContext # in _reconstitute_job method. if isinstance(job.args[0], CallbackContext): tg_job = job.args[0].job # APScheduler stores args as tuple. prepped_job.args = (tg_job.name, tg_job.context) return prepped_job
def load_jobs(self): jobs = [] keys = self.redis.keys(self.key_prefix + "*") pipeline = self.redis.pipeline() for key in keys: pipeline.hgetall(key) results = pipeline.execute() for job_dict in results: job_state = {} try: job = Job.__new__(Job) job_state = pickle.loads(job_dict["job_state".encode()]) job_state["runs"] = long(job_dict["runs".encode()]) dateval = job_dict["next_run_time".encode()].decode() job_state["next_run_time"] = datetime.strptime(dateval, "%Y-%m-%dT%H:%M:%S") job.__setstate__(job_state) jobs.append(job) except Exception: job_name = job_state.get("name", "(unknown)") logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
def load_jobs(self): jobs = [] keys = self.redis.keys(self.key_prefix + '*') pipeline = self.redis.pipeline() for key in keys: pipeline.hgetall(key) results = pipeline.execute() for job_dict in results: job_state = {} try: job = Job.__new__(Job) job_state = pickle.loads(job_dict['job_state'.encode()]) job_state['runs'] = long(job_dict['runs'.encode()]) dateval = job_dict['next_run_time'.encode()].decode() job_state['next_run_time'] = datetime.strptime( dateval, '%Y-%m-%dT%H:%M:%S') job.__setstate__(job_state) jobs.append(job) except Exception: job_name = job_state.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs
async def _decode_job(self, in_job): if in_job is None: return None job_state = in_job["job_state"] job_state = pickle.loads(base64.b64decode(job_state)) new_args = list(job_state["args"]) new_args[0]["config"] = self.config new_args[0]["bot"] = self.bot job_state["args"] = tuple(new_args) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias # task_name, guild_id = _disassemble_job_id(job.id) # task = Task(task_name, guild_id, self.config) # await task.load_from_config() # save_task_objects.append(task) # # job.func = task.execute # log.debug(f"Decoded job id: {job.id}\n" # f"Decoded as {job_state}") return job
def from_dict(cls, d: dict) -> 'Storable': job_state = pickle.loads(d['job_state']) job = Job.__new__(Job) job.__setstate__(job_state) return cls(job=job)
def load_jobs(self): #continue standart execution jobs = [] for job_dict in self.collection.find({'crecord_type': 'schedule'}): try: job = Job.__new__(Job) if job_dict['aaa_owner'] != 'account.root': if job_dict['kwargs']['task'] != 'task_reporting': raise ValueError("User %s isn\'t allow to run task %s" % (job_dict['aaa_owner'],job_dict['kwargs']['task'])) #keep memory of id job_dict_id = job_dict['_id'] job_dict['id'] = job_dict.pop('_id') if job_dict.has_key('runs'): job_dict['runs'] = job_dict['runs'] else: job_dict['runs'] = 0 job_dict['coalesce'] = False #try to get interval interval = job_dict.get('interval') if interval is not None: job_dict[TRIGGER] = IntervalTrigger(timedelta(**interval)) else: #try to get simple date = job_dict.get('date') if date is not None: job_dict[TRIGGER] = SimpleTrigger( datetime(*date)) else: #try to get crontab cron = job_dict.get('cron') if cron is not None: job_dict[TRIGGER] = CronTrigger(**cron) if TRIGGER not in job_dict: raise ValueError("No interval, nor date, nor cron is given in task %s".format(job_dict['crecord_name'])) job_dict['next_run_time'] = job_dict['trigger'].get_next_fire_time(datetime.now()) job_dict['args'] = job_dict['args'] job_dict['kwargs'] = job_dict['kwargs'] job_dict['max_runs'] = None job_dict['max_instances'] = 3 job_dict['name'] = job_dict['crecord_name'] job_dict['misfire_grace_time'] = 1 job_dict['func_ref'] = 'apschedulerlibs.aps_to_celery:launch_celery_task' job.__setstate__(job_dict) jobs.append(job) #change flag to true self.collection.update({'_id':job_dict_id},{"$set":{'loaded':True, 'next_run_time': job_dict['next_run_time']}},True) except Exception: job_name = job_dict.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) logger.info(' + %s jobs loaded' % len(jobs)) self.jobs = jobs