def kwargs(self): default = { "id": self.aps_job_id, "func": app.run, "replace_existing": True, "args": [self.service.id], "kwargs": self.run_properties(), } if self.scheduling_mode == "cron": self.periodic = True expression = self.crontab_expression.split() mapping = { "0": "sun", "1": "mon", "2": "tue", "3": "wed", "4": "thu", "5": "fri", "6": "sat", "7": "sun", "*": "*", } expression[-1] = ",".join(mapping[day] for day in expression[-1].split(",")) trigger = { "trigger": CronTrigger.from_crontab(" ".join(expression)) } elif self.frequency: self.periodic = True frequency_in_seconds = (int(self.frequency) * { "seconds": 1, "minutes": 60, "hours": 3600, "days": 86400 }[self.frequency_unit]) trigger = { "trigger": "interval", "start_date": self.aps_date("start_date"), "end_date": self.aps_date("end_date"), "seconds": frequency_in_seconds, } else: self.periodic = False trigger = { "trigger": "date", "run_date": self.aps_date("start_date") } return default, trigger
def main(config): """ schedule crons with APScheduler in deamon mode all crons are scheduled in UTC """ cron_entry = parse_crontab(config) scheduler = BackgroundScheduler(daemon=True) scheduler.start() for cron in cron_entry: module = getattr(importlib.import_module(cron["import_script"]), cron["exec_func"]) scheduler.add_job( module, CronTrigger.from_crontab(cron["crontab_time"], timezone="UTC"), id=cron["cronname"], )
def __init__(self, config, routine_id, desc, num_cmd, cmd_list=[], crontab_list=[]): self.main_conf = config self.num_cmd = num_cmd self.routine_id = routine_id self.cmd_list, self.crontab_list = cmd_list, crontab_list self.sched = BackgroundScheduler() for i in range(len(cmd_list)): self.sched.add_job(self.sendCommand, CronTrigger.from_crontab(crontab_list[i]), kwargs={'cmd': cmd_list[i]})
async def add_cron_job(job_id: str = Body(..., title="任务id", embed=True), crontab: str = Body('*/1 * * * *', title="crontab 表达式"), run_time: int = Body(time.time(), title="第一次运行时间", description="默认立即执行", embed=True)): res = Schedule.get_job(job_id=job_id) if res: return resp_fail(msg=f"{job_id} job already exists") schedule_job = Schedule.add_job( cron_task, CronTrigger.from_crontab(crontab), args=(job_id, ), id=job_id, # job ID next_run_time=datetime.fromtimestamp(run_time)) return resp_ok(data={"job_id": schedule_job.id})
def update_periodic(self, policy): """ Update scheduled policies using cron type periodic scheduling. """ trigger = CronTrigger.from_crontab(policy.data['mode']['schedule']) trigger.jitter = jitter_seconds self.scheduler.add_job(self.run_policy, trigger, id=policy.name, name=policy.name, args=[policy, None, None], coalesce=True, max_instances=1, replace_existing=True, misfire_grace_time=20)
def add(self, metric): extra = {"local": __name__, "method": sys._getframe().f_code.co_name} try: self.scheduler.add_job(self.tick, CronTrigger.from_crontab(metric.cron), args=[metric], seconds=3, id=metric.getUniq(), jitter=120, replace_existing=True) logMSG = "ADD - UNIQ {} - {}".format(metric.getUniq(), metric) self.ctx.log.debug(logMSG, extra=extra) except Exception as identifier: logMSG = "Fail to ADD in Cron" self.ctx.log.exception(logMSG, error=identifier, extra=extra) raise
def init_socket(): global ws global _authenticated global _connecting global socket_connected global td_connecting try: cluster = STOCKS_CLUSTER host = DEFAULT_HOST url = f"wss://{host}/{cluster}" print ('Polygon url', url) _authenticated = threading.Event() if socket_connected: print ('init_socket - socket previously connected, deleting previous socket.') ws.on_message = None ws.on_open = None ws.close = None del ws # forcebly set ws to None ws = None ws = websocket.WebSocketApp(url, on_message = process_tick_data, on_error = process_on_error, on_close = process_on_close) #websocket.enableTrace(True) ws.on_open = on_open thread = threading.Thread(target=ws.run_forever) thread.start() socket_connected = True # remove previous job on previous socket for job in sched.get_jobs(): if 'check_socket' in job.id: print ('init_socket - removing job \'check_socket\' from previous socket connection.') sched.remove_job('check_socket') break # sometimes socket on close is not called when the socket closes so we must run a perodic check # https://github.com/websocket-client/websocket-client/issues/452 sched.add_job(check_socket, CronTrigger.from_crontab('*/1 * * * *', timezone="US/Eastern"), id='check_socket') print ('init_socket completed.', datetime.datetime.now(tz=est_time_zone).strftime("%Y-%m-%d %H:%M:%S")) except Exception as e: print ('init_socket failed', e)
def __init__( self, client: AsyncClient, store, room_id: str, reminder_text: str, start_time: Optional[datetime] = None, timezone: Optional[str] = None, recurse_timedelta: Optional[timedelta] = None, cron_tab: Optional[str] = None, target_user: Optional[str] = None, alarm: bool = False, ): self.client = client self.store = store self.room_id = room_id self.timezone = timezone self.start_time = start_time self.reminder_text = reminder_text self.cron_tab = cron_tab self.recurse_timedelta = recurse_timedelta self.target_user = target_user self.alarm = alarm # Schedule the reminder # Determine how the reminder is triggered if cron_tab: # Set up a cron trigger trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone) elif recurse_timedelta: # Use an interval trigger (runs multiple times) trigger = IntervalTrigger( # timedelta.seconds does NOT give you the timedelta converted to seconds # Use a method from apscheduler instead seconds=int(timedelta_seconds(recurse_timedelta)), start_date=start_time, timezone=timezone, ) else: # Use a date trigger (runs only once) trigger = DateTrigger(run_date=start_time, timezone=timezone) # Note down the job for later manipulation self.job = SCHEDULER.add_job(self._fire, trigger=trigger) self.alarm_job = None
def validate(self, attrs): name = attrs.get('name') cron = attrs.get('cron') job_function = None for obj in getmembers(tasks): if isfunction(obj[1]) and obj[0] == name: job_function = obj[1] attrs['job_function'] = job_function break if job_function is None: raise serializers.ValidationError('调度任务函数不存在') try: trigger = CronTrigger.from_crontab(cron) attrs['trigger'] = trigger except ValueError: raise serializers.ValidationError('cron表达式格式错误') return attrs
def createTrigger(self, tag): if (not tag): raise ValueError("Tag cannot be empty") customparams = {} customparams = self.batchconfigreader.getConfiguration(tag) if (not any(customparams)): print("not custom params") else: customparams.update(self.globalparam) #create the Triggers print("...", customparams, "...") if not customparams[IniFileTags.CRONEXPRESSION]: customparams[ IniFileTags. CRONEXPRESSION] = ConfigConstants.DAILY_CRON_EXPRESSION return CronTrigger.from_crontab( customparams[IniFileTags.CRONEXPRESSION])
def add_job(ctx: DipDupContext, scheduler: AsyncIOScheduler, job_name: str, job_config: JobConfig) -> None: async def _atomic_wrapper(ctx, args): async with in_global_transaction(): await job_config.callback_fn(ctx, args) trigger = CronTrigger.from_crontab(job_config.crontab) scheduler.add_job( func=_atomic_wrapper if job_config.atomic else job_config.callback_fn, id=job_name, name=job_name, trigger=trigger, kwargs=dict( ctx=ctx, args=job_config.args, ), )
async def startup_event(): cleanupSettings = SETTINGS['database']['cleanup'] if cleanupSettings['automatic']['enable']: cleanupService = DatabaseCleanupService(cleanupSettings) try: cronTrigger = CronTrigger.from_crontab( cleanupSettings['automatic']['cronSchedule']) except ValueError as e: raise ValueError( f'Invalid syntax for settings option "cronSchedule": {str(e)}' ) from e from logic import JobScheduler JobScheduler.SCHEDULER.schedule_automatic_job(cleanupService.cleanup, [next(get_database())], cronTrigger)
def startup(): name = request.values['name'] if 'name' in request.values else '' extendJobDefine = JobDefine.objects((Q(name=name))).first() if extendJobDefine is None: return XaResult.error(msg=u'请提供正确且存在的任务名称') try: cron = CronTrigger.from_crontab(extendJobDefine.jobCron) scheduler.add_job(id=name, func=job_task_load.to_job_instance, trigger=cron, args=(name, )) JobDefine.objects(name=name).update_one(isRun=True) app.logger.info(u'%s 任务启动成功!', name) except Exception, e: app.logger.error(u'%s 任务启动失败!错误信息:%s', name, e.message) return XaResult.error(msg=e.message)
def getTriggerArgs(trigger_args): # Trigger examples: # ['cron', '* * * * *'] # ['interval', 'minutes', '6'] try: targs = {} trigger_type = trigger_args[0] targs['trigger'] = trigger_type if (trigger_type == 'cron'): targs['cron'] = CronTrigger.from_crontab(trigger_args[1]) elif (trigger_type == 'interval'): targs[trigger_args[1]] = int(trigger_args[2]) elif (trigger_type == 'date'): targs['run_date'] = trigger_args[1] return targs except Exception as e: print(str(e)) return {}
def add_task(self, run_id, task_id, periodic_config): if 'cron' in periodic_config: self.sc.add_job(id=self._generate_job_id(run_id, task_id), func=trigger_periodic_task, args=(self.mailbox, run_id, task_id), trigger=CronTrigger.from_crontab(periodic_config['cron'])) elif 'interval' in periodic_config: interval_config: dict = periodic_config['interval'] if 'seconds' in interval_config: seconds = interval_config['seconds'] else: seconds = 0 if 'minutes' in interval_config: minutes = interval_config['minutes'] else: minutes = 0 if 'hours' in interval_config: hours = interval_config['hours'] else: hours = 0 if 'days' in interval_config: days = interval_config['days'] else: days = 0 if 'weeks' in interval_config: weeks = interval_config['weeks'] else: weeks = 0 if seconds < 10 and 0 >= minutes and 0 >= hours and 0 >= days and 0 >= weeks: self.log.error('Interval mast greater than 20 seconds') return self.sc.add_job(id=self._generate_job_id(run_id, task_id), func=trigger_periodic_task, args=(self.mailbox, run_id, task_id), trigger=IntervalTrigger(seconds=seconds, minutes=minutes, hours=hours, days=days, weeks=weeks)) else: self.log.error('Periodic support type cron or interval. current periodic config {}'.format(periodic_config))
def schedule_run(data): #data = request.get_json() response = {} env = {} command = [] renku = False container = data['container'] response['container'] = container print(container) tool = data['tool'] response['tool'] = tool print(tool) dataset = data['dataset'] response['dataset'] = dataset print(dataset) if 'env' in data: env = data['env'] if 'command' in data: command = data['command'] if data['renku']: renku = True if data['cron']: freq = data['freq'] if freq == 'daily': job = scheduler.add_job(run_container, 'interval', days=1, args=[container, command, env, tool, dataset, renku], id=tool, replace_existing=True, misfire_grace_time=3600, coalesce=True) elif freq == 'weekly': job = scheduler.add_job(run_container, 'interval', weeks=1, args=[container, command, env, tool, dataset, renku], id=tool, replace_existing=True, misfire_grace_time=3600, coalesce=True) else: job = scheduler.add_job(run_container, CronTrigger.from_crontab(freq), args=[container, command, env, tool, dataset, renku], id=tool, replace_existing=True, misfire_grace_time=3600, coalesce=True) response['job'] = job.id return response else: response['exec_result'] = run_container(container, command, env, tool, dataset, renku) return response
def add_all_jobs(scheduler, jobs): for name, options in jobs.items(): trigger = CronTrigger.from_crontab(options["crontab"]) scheduler.add_job(id=name, name=name, func=enqueue_dramatiq_job, kwargs={ "queue_name": options.get("queue_name", "default"), "actor_name": options["func"], "args": options.get("args", []), "kwargs": options.get("kwargs", {}), "options": { "max_retries": 0 } }, trigger=trigger, replace_existing=True, misfire_grace_time=10)
def kwargs(self) -> Tuple[dict, dict]: default = { "id": self.aps_job_id, "func": threaded_job, "replace_existing": True, "args": [ self.job.id, self.aps_job_id, self.compute_targets(), self.payload, ], } if self.scheduling_mode == "cron": self.periodic = True trigger = { "trigger": CronTrigger.from_crontab(self.crontab_expression) } elif self.frequency: self.periodic = True frequency_in_seconds = (int(self.frequency) * { "seconds": 1, "minutes": 60, "hours": 3600, "days": 86400 }[self.frequency_unit]) trigger = { "trigger": "interval", "start_date": self.aps_date("start_date"), "end_date": self.aps_date("end_date"), "seconds": frequency_in_seconds, } else: self.periodic = False trigger = { "trigger": "date", "run_date": self.aps_date("start_date") } return default, trigger
def handle(self, *args, **options): sheduler = BackgroundScheduler() tasks = models.Task.objects.all() for task in tasks: sheduler.add_job( function_wrap(task.command), CronTrigger.from_crontab( task.cron_expression, timezone=pytz.UTC ), id=str(task), next_run_time=datetime.datetime.now(tz=pytz.UTC), ) post_delete.connect( on_delete_task, sender=models.Task, dispatch_uid="on_delete_task" ) post_save.connect( on_save_task, sender=models.Task, dispatch_uid="on_save_task" ) sheduler.start() while 1: time.sleep(1000)
def run_task_cron(self, job) -> Job: task = BackgroundJobs.get_job_ref(job.task_name) cron_args = {k: v for k, v in job.cron_options.dict().items() if v} if "expression" in cron_args: trigger = CronTrigger.from_crontab(cron_args.pop("expression")) else: trigger = "cron" tsk_kwargs = {"kwargs": job.task_args, **self.default_task_options()} if job.meta_data: tsk_kwargs.update({**job.meta_data}) job_params = dict( kwargs=tsk_kwargs, replace_existing=True, id=tsk_kwargs["message_id"], trigger=trigger, ) if isinstance(trigger, str): job_params.update(**cron_args) job = self._scheduler.add_job(task.send_with_options, **job_params) self.tasks[tsk_kwargs["message_id"]] = job return job
def schedule(obj, dry_run, create, prune, info, progress): for name in ["apscheduler.scheduler", "apscheduler.executors.default"]: l = logging.getLogger(name) l.setLevel(logger.getEffectiveLevel()) executors = {"default": ThreadPoolExecutor(1)} job_defaults = {"coalesce": True, "max_instances": 1} scheduler = BlockingScheduler(executors=executors, job_defaults=job_defaults) def job(task, repo, logger, verbose, dry_run, create, prune, info, progress): logger.info("Running scheduled job") handle_backup_task(task, repo, logger, verbose, dry_run, create, prune, info, progress) time.sleep(65) tasks = obj["config"]["tasks"] verbose = obj["verbose"] logger.info("Preparing scheduling of %d tasks", len(tasks)) for task in tasks: repos = task["repositories"] logger.info("Task has %d target repositories", len(repos)) for repo in repos: crontab = repo["schedule"] logger.debug("Scheduling backup task with: %s", crontab) args = (task, repo, logger, verbose, dry_run, create, prune, info, progress) scheduler.add_job(job, args=args, trigger=CronTrigger.from_crontab(crontab)) try: logger.info("Starting scheduler") scheduler.start() except (KeyboardInterrupt, SystemExit): pass
async def groups_sending_control(): # Get list of tuples with all groups all_groups = await db.get_all_groups_info() for group in all_groups: group_params = get_group_params(group) cron_obj = scheduler.get_job(job_id=str(group_params['id'])) if cron_obj is not None: scheduler.remove_job(job_id=str(group_params['id'])) if group_params['status'] == 1: from_db_hours = group_params['send_hours'] from_db_hours = from_db_hours.split(', ') from_db_hours = ','.join(from_db_hours) scheduler.add_job( group_regular_sending, CronTrigger.from_crontab( f'{str(random.randint(1, 3))} {from_db_hours} * * *'), args=(group_params, ), id=str(group_params['id']))
def on_save_task(sender, instance, created, **kwargs): if not created: try: scheduler.remove_job(str(instance)) logger.info(f"la tarea '{instance}' fue borrada correctamente") except Exception as e: # noqa logger.error(f"la tarea '{instance}' no pudo ser borrada") logger.exception(e) if instance.enable: try: scheduler.add_job( function_wrap(instance.command), CronTrigger.from_crontab( instance.cron_expression, timezone=pytz.UTC ), id=str(instance), next_run_time=datetime.datetime.now(tz=pytz.UTC), ) logger.info(f"la tarea '{instance}' fue creada correctamente") except Exception as e: logger.error(f"la tarea '{instance}' no pudo ser creada") logger.exception(e)
def main(cfg): with open(cfg) as _in: config = yaml.load(_in) server_cfg = config.pop('server') for section in ('db', 'dump', 'backend'): APP.config[section] = config.get(section) dictConfig(config) SCHEDULER.start() if 'cron' in APP.config['dump']: SCHEDULER.add_job( func=dump, args=(APP, LOGGER), trigger=CronTrigger.from_crontab(APP.config['dump']['cron']), id="cron_dumper" ) # For now serving is available only for fs backand due to # serving public files from s3 makes no sense API.add_resource(Jobs, '/jobs', endpoint='jobs') API.add_resource(Health, '/health', endpoint='health') API.add_resource(StaticContent, "/{}".format(C.ZIP_NAME), endpoint='Json') return serve(APP, **server_cfg)
def get_trigger( schedule_config: ScheduleConfig, ) -> Optional[Union[IntervalTrigger, CronTrigger]]: """Get the appropriate APScheduler trigger for a given ScheduleConfig. Args: schedule (~brut.config.ScheduleConfig): The config to interpret as an APScheduler trigger. Returns: Optional[Union[ ~apscheduler.triggers.interval.IntervalTrigger, ~apscheduler.triggers.cron.CronTrigger ]]: The APScheduler trigger if determined. """ if schedule_config.crontab is not None: return CronTrigger.from_crontab(schedule_config.crontab) elif schedule_config.interval is not None: return IntervalTrigger(**file_config.to_dict(schedule_config.interval)) return None
def create(self, request, *args, **kwargs): name = request.data.get("name") try: plan = Plan.objects.get(name=name) return Response(f"plan {plan.name} existed", status=status.HTTP_500_INTERNAL_SERVER_ERROR) except ObjectDoesNotExist: pass serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) plan = Plan.objects.get(name=request.data.get("name")) project_id = request.data.get("projectId") task_run_env = request.data.get("taskRunEnv") task_status = request.data.get("taskStatus") task_crontab = request.data.get("taskCrontab") task_added = "" if task_status == "1": # 定时任务开关,创建定时任务 run_user_nickname = "定时任务" user_id = "task" task_added = scheduler.add_job( func=run_plan_engine, trigger=CronTrigger.from_crontab(task_crontab), id=str(plan.id), args=[ project_id, plan.id, task_run_env, run_user_nickname, user_id ], max_instances=1, replace_existing=True) data = serializer.data data["taskAdded"] = str(task_added) return Response(data, status=status.HTTP_201_CREATED, headers=headers)
def test_new_circuit_with_frequency( self, validate_mock, scheduler_add_job_mock, trigger_mock ): """Test if add new circuit with frequency.""" scheduler_add_job_mock.return_value = True validate_mock.return_value = True frequency = "* * * * *" circuit_scheduler = CircuitSchedule( action="create", frequency=frequency ) trigger = CronTrigger.from_crontab( circuit_scheduler.frequency, timezone=utc ) trigger_mock.return_value = trigger options = { "controller": get_controller_mock(), "name": "my evc1", "uni_a": "uni_a", "uni_z": "uni_z", "start_date": "2019-08-09T19:25:06", "circuit_scheduler": [circuit_scheduler], } evc = EVC(**options) self.scheduler.add(evc) expected_parameters = { "id": circuit_scheduler.id, "end_date": None, "start_date": datetime.datetime( 2019, 8, 9, 19, 25, 6, 0, tzinfo=datetime.timezone.utc ), } scheduler_add_job_mock.assert_called_once_with( evc.deploy, trigger, **expected_parameters )
def timer(task_id): """ mongo是线程安全的,然后进程传递会有警告.查询说是可以设置参数connect=Flase,但是好像没有啥卵用。 定时函数。 1.告警轮询任务:根据规则的需要能够根据指定分钟间隔执行某种逻辑功能。 2.周期数据统计任务:根据设置配置的crontab规则,执行需要查询的规则。 1.type=1,默认执行原本的轮训监控任务 :return: """ pid = os.getpid() logger.info('task run pid:{}'.format(pid)) # 记录该任务的执行进程,并记录,然后control process就可以不走判断,pass,然后该进程下定时任务定期轮询。 db.tasks.find_one_and_update({'_id': task_id}, {'$set': {'pid': pid}}) task_config = db.tasks.find_one({'_id': task_id}) # 该进程中执行的轮询任务的相关配置信息,可以对一个ObjectId转ObjectId型 type = task_config.get('type') # 如果type为1,触发的是周期告警轮询任务 if type == 1: timeCell = task_config.get('timeCell') # 轮询周期 # logger.info('task timeCell:{}'.format(timeCell)) sched = BlockingScheduler() sched.add_job(logAlert_update_info, 'interval', seconds=timeCell*60, args=(task_id,)) logger.info('logAlert_update_info function will start,task_id is {}, timeCell is {}min'.format(task_id, timeCell)) sched.start() # 周期统计类型,利用icon进行指定时间周期指执行统计,将项目里面配置的所有查询参数,查询汇总到邮件里直接发送。没有短信的选项了。 elif type == 2: interval = task_config.get('interval') # 按照不同的时间周期,有三种,日,周,月,需要根据这个判断查询的起始和结束时间 crontab = task_config.get('crontab') logger.info(crontab) # 按照crontab配置规则,决定统计的周期等参数 sched = BlockingScheduler() sched.add_job(count_info_interval, CronTrigger.from_crontab(crontab), args=(task_id,)) logger.info('count_info_interval will start and interval = {},task_id is {}'.format(interval, task_id)) sched.start() # 统计指定时间间隔的日志错误数量,不是轮询任务,运行一次获取到相关参数就结束统计 elif type == 3: logger.info('count_info_once will start, task_id is {}'.format(task_id)) count_once_info(task_id)
def add(self, circuit): """Add all circuit_schedule from specific circuit.""" for circuit_schedule in circuit.circuit_rules: data = {'id': circuit_schedule.id} action = None if circuit_schedule.action == 'create': action = circuit.deploy elif circuit_schedule.action == 'remove': action = circuit.remove if circuit_schedule.date: data.update({'run_time': circuit_schedule.date, 'start_date': circuit.start_date, 'end_date': circuit.end_date}) self.scheduler.add_job(action, 'date', **data) if circuit_schedule.interval: data = data.update(circuit_schedule.interval) self.scheduler.add_job(action, 'interval', **data) if circuit_schedule.frequency: cron = CronTrigger.from_crontab(circuit_schedule.frequency) self.scheduler.add_job(action, cron, **data)
def run_job(cls, newJob: JobCreate): """Wrapper to run this job in a static context.""" job_id = str(uuid4()) newJob.ctime = datetime.now(timezone.utc) if newJob.recurring == "recurring": cronstr = CronTrigger.from_crontab(newJob.cronExpression) return AfishScheduler.add_job( cls.run, cronstr, jitter=newJob.jitter, id=job_id, name=newJob.name, kwargs=newJob.dict(), ) elif newJob.recurring == "single": return AfishScheduler.add_job( cls.run, name=newJob.name, id=job_id, kwargs=newJob.dict(), next_run_time=None, )