def start_scheduler(pk): scheduler.add_job(send_notification_email, 'cron', [pk], hour='6', replace_existing=True, id=str(pk)) register_events(scheduler)
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") #test scheduler.add_job(update_rankings, 'interval', hours=48) register_events(scheduler) scheduler.start()
def start_scheduler(): if scheduler.state == 0: register_events(scheduler) scheduler.start() print('Started scheduler.') else: print('Attempted to start scheduler, but scheduler was already running.')
def load(request, token, cid, hour): if matchtoken(token): try: cast = Cast.objects.get(pk=cid) except: message = '任务不存在' return HttpResponse(message) job = scheduler.get_job(job_id=str(cast.id)) if job: job.remove() message = '任务已重载' else: message = '任务已加载' scheduler.add_job(buy, "cron", id=str(cast.id), day='*', hour='*/' + str(hour), minute='5', second='30', misfire_grace_time=3600, kwargs={'cid': cast.id}) #scheduler.add_job(buy, "cron", id=str(cast.id), day='*', hour=str(hour), minute='5', second='30', # kwargs={'cid': cast.id}) register_events(scheduler) else: message = '非法请求' return HttpResponse(message)
def start(): """Start Scheduler""" if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler.add_job( delete_old_job_executions, trigger=CronTrigger( day_of_week="mon", hour="00", minute="00" ), # Midnight on Monday, before start of the next work week. id="delete_old_job_executions", max_instances=1, replace_existing=True, ) logger.info("Added weekly job: 'delete_old_job_executions'.") # scheduler.add_job(test_job, # "interval", id="test_job", minutes=5, replace_existing=True) # Add the scheduled jobs to the Django admin interface register_events(scheduler) scheduler.start()
def start(): # run this job every 1 seconds register_events(scheduler) # scheduler.add_job(network_job) scheduler.add_job(nmap_job) scheduler.start() print("Scheduler started...", file=sys.stdout)
def create_scheduler(): # manage = SchedulerManage() scheduler = BackgroundScheduler(daemonic=True) scheduler.add_jobstore(DjangoJobStore(), "default") date = dt.datetime.now() # 报警 scheduler.add_job(cal_kde_value, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True) scheduler.add_job(his_model_update, "date", run_date=date, id='his_model_up', args=[], replace_existing=True) scheduler.add_job(seperate_operate_record.main, "date", run_date=date, id='operate_parsing', args=[], replace_existing=True) scheduler.add_job(clear_database, 'cron', hour='16', minute='04', id='clear_database', replace_existing=True) # scheduler.add_job(operate_resolve, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True) # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[]) # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True) scheduler.add_job(so_run, "interval", minutes=1, id='operate_match', args=[], replace_existing=True) # try: # group, int_list, scats_input = get_scats_int() # except Exception as e: # logger.error(e) # print(e) # else: # logger.info("get scats basic inf successfully!") # scheduler.add_job(thread_creat, "interval", minutes=5, id='scats_salklist', args=[group, int_list, scats_input], # replace_existing=True) # scheduler.add_job(RequestDynaDataFromInt, "interval", minutes=5, id='scats_volumns', args=[int_list], # replace_existing=True) # scheduler.add_job(get_operate, "interval", minutes=3, id='scats_operate', args=[], # replace_existing=True) scheduler.start() logger.info('start scheduler task') print("=======================定时任务启动==========================") print(scheduler.get_jobs()) print(scheduler.state) logger.info('start task register,check on admin platform!') register_events(scheduler)
def start(): print("scheduler initializing...") scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") scheduler.add_job(check_and_send_mail, 'interval', hours=1, name='checking_and_sending_mails', jobstore='default') register_events(scheduler) scheduler.start() print("Scheduler started...", file=sys.stdout)
def test_register_events_raises_deprecation_warning(scheduler, jobstore): with warnings.catch_warnings(record=True) as w: register_events(scheduler, jobstore) assert len(w) == 1 assert issubclass(w[-1].category, DeprecationWarning) assert "deprecated" in str(w[-1].message)
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") # run this job every 24 hours scheduler.add_job(check_obsolete_orders, 'interval', hours=24, name='clean_orders', jobstore='default', id="check_obsolete_orders", replace_existing=True) register_events(scheduler) scheduler.start() print("Scheduler started...", file=sys.stdout)
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") # run this job every 24 hours scheduler.add_job(deactivate_expired_accounts, 'interval', seconds=10, name='clean_accounts', jobstore='default') register_events(scheduler) scheduler.start() print("Scheduler started...", file=sys.stdout)
def start(): scheduler = BackgroundScheduler() scheduler.add_job(check15min, 'interval', minutes=15, name='clean_accounts', replace_existing=True) register_events(scheduler) scheduler.start()
def start_scheduler(): with lock: if scheduler.state == 0: register_events(scheduler) scheduler.start() print("Started scheduler.") else: print( "Attempted to start scheduler, but scheduler was already running." )
def __init__(self, scheduler): """ init manager :param scheduler: """ super(SchedulerManager, self).__init__() self.scheduler = scheduler register_events(self.scheduler) self.setDaemon(True) self.scheduler.start()
def start(): if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) # Add the scheduled jobs to the Django admin interface register_events(scheduler) scheduler.start()
def add_sqlcronjob(job_id, run_date, workflowId, url): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") scheduler.add_job(execute_job, 'date', run_date=run_date, args=[workflowId, url], id=job_id, replace_existing=True) register_events(scheduler) try: scheduler.start() except SchedulerAlreadyRunningError: logger.debug("Scheduler is already running!") logger.debug('add_sqlcronjob:' + job_id + " run_date:" + run_date.strftime('%Y-%m-%d %H:%M:%S'))
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), 'djangojobstore') register_events(scheduler) @scheduler.scheduled_job('cron', minute='*/5', name='auto_mail') def auto_mail(): send_meilmail() scheduler.start()
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") scheduler.add_job(report, 'interval', minutes=1, name='report_accounts', jobstore='default') register_events(scheduler) scheduler.start() print("Scheduler has started")
def start(): if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler.add_job(greeting_job, "cron", id="send_post", # minute='0,12,13,14,15,16,51,52,53,54,55,56,57', replace_existing=True) minute='0,15,30,45', replace_existing=True) # Add the scheduled jobs to the Django admin interface register_events(scheduler) scheduler.start()
def scheduleJobs(): if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) # get job configurations # [(job_id, job_desc, job_rate, timestamp)] job_configs = getJobConfig() try: for job_id, _, job_rate, _ in job_configs: # if job_id == 'task_yelpScraper' and job_rate > 0: # scheduler.add_job( # task_yelpScraper, # 'interval', # days=job_rate, # jitter=43200, # 43,200 sec = 12 hours # id='task_yelpScraper', # max_instances=1, # replace_existing=True, # misfire_grace_time=100) # elif job_id == "task_getVizdata" and job_rate > 0: if job_id == "task_getVizdata" and job_rate > 0: scheduler.add_job( task_getVizdata, 'interval', days=job_rate, id="task_getVizdata", max_instances=1, replace_existing=True, misfire_grace_time=100) register_events(scheduler) scheduler.start() except Exception as e: print(e) # Print out scheduled job list text = '' for job in scheduler.get_jobs(): text = text + str(job) + '\n' text = ("""\n\n\ ======================================================== Jobs scheduled: \n""" + text + """\ ======================================================== \n""") print(text) return text.replace('\n', '<br>')
def create_scheduler(message_queue): # manage = SchedulerManage() scheduler = BackgroundScheduler(daemonic=True) scheduler.add_jobstore(DjangoJobStore(), "default") date = dt.datetime.now() scheduler.add_job(main, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True) # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[]) # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True) scheduler.add_job(so_run, "interval", minutes=1, id='operate_match', args=[], replace_existing=True) try: group, int_list, scats_input = get_scats_int() except Exception as e: logger.error(e) print(e) else: logger.info("get scats basic inf successfully!") scheduler.add_job(thread_creat, "interval", minutes=5, id='scats_salklist', args=[group, int_list, scats_input], replace_existing=True) scheduler.add_job(RequestDynaDataFromInt, "interval", minutes=5, id='scats_volumns', args=[int_list], replace_existing=True) scheduler.add_job(get_operate, "interval", minutes=3, id='scats_operate', args=[], replace_existing=True) scheduler.start() logger.info('start scheduler task') print("=======================定时任务启动==========================") print(scheduler.get_jobs()) print(scheduler.state) logger.info('start task register,check on admin platform!') register_events(scheduler)
def add_sqlcronjob(request): workflowId = request.POST.get('workflowid') run_date = request.POST.get('run_date') if run_date is None or workflowId is None: context = {'errMsg': '时间不能为空'} return render(request, 'error.html', context) elif run_date < datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'): context = {'errMsg': '时间不能小于当前时间'} return render(request, 'error.html', context) workflowDetail = workflow.objects.get(id=workflowId) if workflowDetail.status not in ['审核通过', '定时执行']: context = {'errMsg': '必须为审核通过或者定时执行状态'} return render(request, 'error.html', context) run_date = str(datetime.datetime.strptime(run_date, "%Y-%m-%d %H:%M:%S")) url = getDetailUrl(request) + str(workflowId) + '/' job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId) try: scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") # mysql_add = "mysql://*****:*****@localhost:3308/archer3" # scheduler.add_jobstore(SQLAlchemyJobStore(url=mysql_add)) scheduler.add_job(execute_job, 'date', run_date=run_date, args=[workflowId, url], id=job_id, replace_existing=True) register_events(scheduler) try: scheduler.start() logger.debug("Scheduler started!") except SchedulerAlreadyRunningError: logger.debug("Scheduler is already running!") workflowDetail.status = Const.workflowStatus['tasktiming'] workflowDetail.save() except Exception as e: context = {'errMsg': '任务添加失败,错误信息:' + str(e)} return render(request, 'error.html', context) else: # logger.debug('add_sqlcronjob:' + job_id + "run_date:" + run_date.strftime('%Y-%m-%d %H:%M:%S')) logger.debug('add_sqlcronjob:' + job_id + "run_date:" + run_date) return HttpResponseRedirect( reverse('sql:detail', kwargs={ 'workflowId': workflowId, 'workflowType': 0 }))
def start(): # Hook into the apscheduler logger if settings.DEBUG: logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) #scheduler = BackgroundScheduler() #scheduler.add_jobstore(DjangoJobStore(), "default") #Set all resolved tickets to cloed with resolved date older than 3 days #scheduler.add_interval_job(set_resolved_tickets_closed, min=5) #days=1) scheduler.add_job(set_resolved_tickets_closed, 'interval', hours=24, name='Close Tickets') register_events(scheduler) scheduler.start()
def test_job_events(db, scheduler): register_events(scheduler) scheduler.add_job(job, trigger="interval", seconds=1, id="job") scheduler.start() dj = DjangoJob.objects.last() dj.next_run_time -= datetime.timedelta(seconds=2) dj.save() now = datetime.datetime.now(utc) scheduler._dispatch_event(JobExecutionEvent(4096, "job", None, now)) scheduler._dispatch_event(JobSubmissionEvent(32768, "job", None, [now])) assert DjangoJobExecution.objects.count() == 1
def start(): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") # run this job every 1 hours scheduler.add_job(update_tweets_hourly, 'interval', minutes=60, name='update_tweets_hourly', id="update_tweets_hourly", max_instances=1, replace_existing=True, jobstore='default') register_events(scheduler) scheduler.start() print("Scheduler started...", file=sys.stdout)
def start(): if settings.DEBUG: logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler.add_job(process_data_task, "cron", id="Process data", hour="*/1", minute="0", replace_existing=True) logging.info("Job started successfully!") register_events(scheduler) scheduler.start()
def start(): if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler.add_job(stockMarketExecute, 'cron', day_of_week='mon-fri', hour='9-15', minute='30-59', id="stockMarketExecute1", replace_existing=True) scheduler.add_job(stockMarketExecute, 'cron', day_of_week='mon-fri', hour='10-15', minute='0-29', id="stockMarketExecute2", replace_existing=True) scheduler.add_job(cryptoExecute, 'cron', day_of_week='*', hour='*', minute='*', id="cryptoExecute", replace_existing=True) scheduler.add_job(pendDayDelete, 'cron', day_of_week='tue-sat', hour='0', id="pendDayDelete", replace_existing=True) # scheduler.add_job(job_function, 'interval', seconds=1) # Adding this job here instead of to crons. # This will do the following: # - Add a scheduled job to the job store on application initialization # - The job will execute a model class method at midnight each day # - replace_existing in combination with the unique ID prevents duplicate copies of the job # scheduler.add_job("core.models.MyModel.my_class_method", "cron", id="my_class_method", hour=0, replace_existing=True) # Add the scheduled jobs to the Django admin interface register_events(scheduler) scheduler.start()
def init_scheduler(scheduler): try: scheduler.add_jobstore(DjangoJobStore(), "default") scheduler.remove_all_jobs() from . import models spider_configurations = models.Configuration.objects.all() for instance in spider_configurations: save_job_from_instance(scheduler, instance) register_events(scheduler) scheduler.start() except Exception as e: print(e) scheduler.shutdown() return scheduler
def cronevent(request): if request.GET.get('comshow'): scheduler = BackgroundScheduler() scheduler.add_jobstore(DjangoJobStore(), "default") scheduler.start() register_events(scheduler) if request.GET.get('comshow') == 'yes': scheduler.add_job(scanstatus, 'interval', minutes=1, id='status') #定时任务时间 return HttpResponse(status=200) elif request.GET.get('comshow') == 'no': scheduler.remove_job('status') return HttpResponse(status=200) else: return HttpResponse(status=403) else: return redirect('/index')
def start(): if settings.DEBUG: # Hook into the apscheduler logger logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) scheduler.add_job( greeting_job, "cron", id="send_post", minute= '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59', replace_existing=True) # Add the scheduled jobs to the Django admin interface register_events(scheduler) scheduler.start()
def test_delete_old_job_executions(db, scheduler): register_events(scheduler) scheduler.add_job(job, trigger="interval", seconds=1, id="job_1") scheduler.add_job(job, trigger="interval", seconds=1, id="job_2") scheduler.start() now = datetime.datetime.now(utc) one_second_ago = now - datetime.timedelta(seconds=1) # Simulate scheduler._dispatch_event(JobExecutionEvent(4096, "job_1", None, one_second_ago)) scheduler._dispatch_event(JobExecutionEvent(4096, "job_2", None, now)) scheduler._dispatch_event(JobSubmissionEvent(32768, "job_1", None, [one_second_ago])) scheduler._dispatch_event(JobSubmissionEvent(32768, "job_2", None, [now])) assert DjangoJobExecution.objects.count() == 2 DjangoJobExecution.objects.delete_old_job_executions(1) assert DjangoJobExecution.objects.count() == 1
def scheduler_job(): """ 每分钟检查一次定时任务 :return: """ models = Task.objects.all() for model in models: scheduler_at = model.scheduler_at updated_at = model.updated_at scheduler_at_time_stamp = scheduler_at * 60 updated_at_time_stamp = time.mktime(updated_at.timetuple()) if time.time() - updated_at_time_stamp > scheduler_at_time_stamp: client_id = model.client_id project_name = model.project_name spider_name = model.spider_name client = Client.objects.get(id=client_id) scrapyd = ScrapydAPI(scrapyd_url(client.ip, client.port)) try: job = scrapyd.schedule(project_name, spider_name) model.success = 1 except ConnectionError: model.success = 0 finally: model.save() register_events(scheduler) # scheduler.start() logger.info("Scheduler started!") print("Scheduler started!")