示例#1
0
    def handle(self, *args, **options):
        scheduler = BackgroundScheduler(timezone=settings.TIME_ZONE)
        scheduler.add_jobstore(DjangoJobStore(), "default")

        scheduler.add_job(fetch_tweets,
                          trigger=CronTrigger(hour='*/3'),
                          id="fetch_tweets",
                          max_instances=1,
                          replace_existing=True)
        logger.info("Added job 'fetch_tweets'.")

        scheduler.add_job(delete_old_job_executions,
                          trigger=CronTrigger(day_of_week="mon",
                                              hour="00",
                                              minute="00"),
                          id="delete_old_job_executions",
                          max_instances=1,
                          replace_existing=True)
        logger.info("Added weekly job: 'delete_old_job_executions'.")
        try:
            logger.info("Starting scheduler...")
            scheduler.start()
        except KeyboardInterrupt:
            logger.info("Stopping scheduler...")
            scheduler.shutdown()
            logger.info("Scheduler shut down successfully!")
示例#2
0
    def test_run_selected_jobs_enforces_timeout(self, rf, monkeypatch):
        monkeypatch.setattr(settings, "APSCHEDULER_RUN_NOW_TIMEOUT",
                            1)  # Shorten timeout to reduce test runtime

        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(DjangoJobStore())
        scheduler.start()

        job = scheduler.add_job(print, trigger="interval", seconds=60)

        admin = DjangoJobAdmin(DjangoJob, None)

        r = rf.get("/django_apscheduler/djangojob/")
        # Add support for Django messaging framework
        r._messages = mock.MagicMock(BaseStorage)
        r._messages.add = mock.MagicMock()

        with mock.patch(
                "django_apscheduler.admin.BackgroundScheduler.add_listener"):
            admin.run_selected_jobs(r, DjangoJob.objects.filter(id=job.id))

        assert DjangoJobExecution.objects.count() == 0
        r._messages.add.assert_called_with(
            40,
            format_html(
                "Maximum runtime of {} seconds exceeded! Not all jobs could be completed successfully. "
                "Pending jobs: {}",
                admin._job_execution_timeout,
                ",".join({job.id}),
            ),
            "",
        )

        scheduler.shutdown()
示例#3
0
    def run_selected_jobs(self, request, queryset):
        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(self._memory_jobstore)
        scheduler.add_listener(self._handle_execution_event, events.EVENT_JOB_EXECUTED)

        scheduler.start()

        num_jobs_scheduled = 0
        self._jobs_executed = []
        start_time = timezone.now()

        for item in queryset:
            django_job = self._django_jobstore.lookup_job(item.id)

            if not django_job:
                msg_dict = {"job_id": item.id}
                msg = _(
                    "Could not find job {job_id} in the database! Skipping execution..."
                )
                self.message_user(
                    request, format_html(msg, **msg_dict), messages.WARNING
                )
                continue

            scheduler.add_job(
                django_job.func_ref,
                trigger=None,  # Run immediately
                args=django_job.args,
                kwargs=django_job.kwargs,
                id=django_job.id,
                name=django_job.name,
                misfire_grace_time=django_job.misfire_grace_time,
                coalesce=django_job.coalesce,
                max_instances=django_job.max_instances,
            )

            num_jobs_scheduled += 1

        while len(self._jobs_executed) < num_jobs_scheduled:
            # Wait for selected jobs to be executed.
            if timezone.now() > start_time + timedelta(
                seconds=self._job_execution_timeout
            ):
                msg = _(
                    "Maximum runtime exceeded! Not all jobs could be completed successfully."
                )
                self.message_user(request, msg, messages.ERROR)

                scheduler.shutdown(wait=False)
                return None

            time.sleep(0.1)

        for job_id in self._jobs_executed:
            msg_dict = {"job_id": job_id}
            msg = _("Executed job '{job_id}'!")
            self.message_user(request, format_html(msg, **msg_dict))

        scheduler.shutdown()
        return None
示例#4
0
    def handle(self, *args, **options):
        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(DjangoJobStore(), "default")
        # run this job every 24 hours
        scheduler.add_job(main(),
                          trigger=CronTrigger(hour="24"),  # every 24 hours
                          id="my_job",
                          jobstore='default',
                          replace_existing=True, )
        print("Added job 'my_job'.")

        scheduler.add_job(
            delete_old_job_executions,
            trigger=CronTrigger(
                day_of_week="mon", hour="00", minute="00"
            ),  # Midnight on Monday, before start of the next work week.
            id="delete_old_job_executions",
            max_instances=1,
            replace_existing=True,
        )
        print(
            "Added weekly job: 'delete_old_job_executions'."
        )
        try:
            print("Starting scheduler...")
            scheduler.start()
        except KeyboardInterrupt:
            print("Stopping scheduler...")
            scheduler.shutdown()
            print("Scheduler shut down successfully!")
示例#5
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    #test
    scheduler.add_job(update_rankings, 'interval', hours=48)
    register_events(scheduler)
    scheduler.start()
示例#6
0
    def test_run_selected_jobs_creates_job_execution_entry(
            self, rf, monkeypatch):
        monkeypatch.setattr(settings, "APSCHEDULER_RUN_NOW_TIMEOUT",
                            1)  # Shorten timeout to reduce test runtime

        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(DjangoJobStore())
        scheduler.start()

        job = scheduler.add_job(print, trigger="interval", seconds=60)

        admin = DjangoJobAdmin(DjangoJob, None)

        r = rf.get("/django_apscheduler/djangojob/")
        # Add support for Django messaging framework
        r._messages = mock.MagicMock(BaseStorage)
        r._messages.add = mock.MagicMock()

        assert not DjangoJobExecution.objects.filter(job_id=job.id).exists()

        admin.run_selected_jobs(r, DjangoJob.objects.filter(id=job.id))

        assert DjangoJobExecution.objects.filter(job_id=job.id).exists()
        r._messages.add.assert_called_with(20, f"Executed job '{job.id}'!", "")

        scheduler.shutdown()
示例#7
0
def get_singel_api_task_time(request):
    username = request.session.get('user', '')
    tasks = process_apis_task.objects.all()
    if request.method == "POST":
        singel_task_date = request.POST.get('date')
        singel_task_frequency = request.POST.get('frequency')
        if singel_task_date & singel_task_frequency != '':
            try:
                scheduler = BackgroundScheduler()
                # 指定时间执行
                scheduler.add_jobstore(start_singel_apis_task,
                                       'date',
                                       run_date=singel_task_date)
                #调度开始
                scheduler.start()
                print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                                      'nt' else 'C'))
            except (KeyboardInterrupt, SystemExit):  #键盘打断则退出
                scheduler.shutdown()
        else:
            try:
                scheduler = BackgroundScheduler()
                # 指定间隔时间执行一次
                scheduler.add_jobstore(start_singel_apis_task,
                                       'interval',
                                       minutes=singel_task_frequency)
                #调度开始
                scheduler.start()
            except (KeyboardInterrupt, SystemExit):  #键盘打断则退出
                scheduler.shutdown()
        return render(request, "singel_periodic_task.html", {
            "user": username,
            "tasks": tasks
        })
示例#8
0
def create_scheduler():
    # manage = SchedulerManage()
    scheduler = BackgroundScheduler(daemonic=True)
    scheduler.add_jobstore(DjangoJobStore(), "default")
    date = dt.datetime.now()
    # 报警
    scheduler.add_job(cal_kde_value,
                      "date",
                      run_date=date,
                      id='alarm_proj',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(his_model_update,
                      "date",
                      run_date=date,
                      id='his_model_up',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(seperate_operate_record.main,
                      "date",
                      run_date=date,
                      id='operate_parsing',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(clear_database,
                      'cron',
                      hour='16',
                      minute='04',
                      id='clear_database',
                      replace_existing=True)
    # scheduler.add_job(operate_resolve, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True)
    # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[])
    # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True)
    scheduler.add_job(so_run,
                      "interval",
                      minutes=1,
                      id='operate_match',
                      args=[],
                      replace_existing=True)
    # try:
    #     group, int_list, scats_input = get_scats_int()
    # except Exception as e:
    #     logger.error(e)
    #     print(e)
    # else:
    #     logger.info("get scats basic inf successfully!")
    #     scheduler.add_job(thread_creat, "interval", minutes=5, id='scats_salklist', args=[group, int_list, scats_input],
    #                       replace_existing=True)
    #     scheduler.add_job(RequestDynaDataFromInt, "interval", minutes=5, id='scats_volumns', args=[int_list],
    #                       replace_existing=True)
    #     scheduler.add_job(get_operate, "interval", minutes=3, id='scats_operate', args=[],
    #                       replace_existing=True)
    scheduler.start()
    logger.info('start scheduler task')
    print("=======================定时任务启动==========================")
    print(scheduler.get_jobs())
    print(scheduler.state)
    logger.info('start task register,check on admin platform!')
    register_events(scheduler)
示例#9
0
def start():
    print("scheduler initializing...")
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    scheduler.add_job(check_and_send_mail, 'interval', hours=1, name='checking_and_sending_mails', jobstore='default')
    register_events(scheduler)
    scheduler.start()
    print("Scheduler started...", file=sys.stdout)
示例#10
0
 def __init__(self,interval=2):
     global se_scheduler
     se_scheduler = BackgroundScheduler()
     se_scheduler.add_jobstore("mongodb", database="felix_se",
                               collection="ScheduledJobs")
     se_scheduler.start()
     super(SESchedulerService, self).__init__("SESchedulerService",interval)
     self.first_time = True
示例#11
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    # run this job every 24 hours
    scheduler.add_job(deactivate_expired_accounts, 'interval', seconds=10, name='clean_accounts', jobstore='default')
    register_events(scheduler)
    scheduler.start()
    print("Scheduler started...", file=sys.stdout)
示例#12
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    # run this job every 24 hours
    scheduler.add_job(check_obsolete_orders, 'interval', hours=24, name='clean_orders', jobstore='default', id="check_obsolete_orders", replace_existing=True)
    register_events(scheduler)
    scheduler.start()
    print("Scheduler started...", file=sys.stdout)
def start():
    backscheduler = BackgroundScheduler()
    backscheduler.add_jobstore(DjangoJobStore(), "default")
    backscheduler.add_job(scheduleScrape(),
                          trigger="interval",
                          name='scrape_all',
                          seconds=300,
                          jobstore="default")
    backscheduler.start()
示例#14
0
文件: jobs.py 项目: xftbee/archeryold
def add_sqlcronjob(job_id, run_date, workflowId, url):
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    scheduler.add_job(execute_job, 'date', run_date=run_date, args=[workflowId, url], id=job_id, replace_existing=True)
    register_events(scheduler)
    try:
        scheduler.start()
    except SchedulerAlreadyRunningError:
        logger.debug("Scheduler is already running!")
    logger.debug('add_sqlcronjob:' + job_id + " run_date:" + run_date.strftime('%Y-%m-%d %H:%M:%S'))
示例#15
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), 'djangojobstore')
    register_events(scheduler)

    @scheduler.scheduled_job('cron', minute='*/5', name='auto_mail')
    def auto_mail():
        send_meilmail()

    scheduler.start()
示例#16
0
 def __init__(self, interval=2):
     global se_scheduler
     se_scheduler = BackgroundScheduler()
     se_scheduler.add_jobstore("mongodb",
                               database="felix_se",
                               collection="ScheduledJobs")
     se_scheduler.start()
     super(SESchedulerService, self).__init__("SESchedulerService",
                                              interval)
     self.first_time = True
示例#17
0
class ApsScheduler(Scheduler):
    def __init__(self):
        self._scheduler = BackgroundScheduler()

    def start(self):
        self._scheduler.add_jobstore(DjangoJobStore(), 'default')
        self._scheduler.start()

    def add_job(self, func: Callable, func_kwargs: dict,
                cron_schedule: CronSchedule) -> str:
        job = self._scheduler.add_job(func=func,
                                      kwargs=func_kwargs,
                                      trigger='cron',
                                      start_date=cron_schedule.start_date,
                                      end_date=cron_schedule.end_date,
                                      year=cron_schedule.year,
                                      month=cron_schedule.month,
                                      day=cron_schedule.day,
                                      week=cron_schedule.week,
                                      day_of_week=cron_schedule.day_of_week,
                                      second=cron_schedule.second,
                                      minute=cron_schedule.minute,
                                      hour=cron_schedule.hour)

        return job.id

    def modify(self, cron_schedule: CronSchedule):
        for job_id in self._get_all_affected_job_ids(cron_schedule.pk):
            self._scheduler.reschedule_job(
                job_id,
                trigger='cron',
                start_date=cron_schedule.start_date,
                end_date=cron_schedule.end_date,
                year=cron_schedule.year,
                month=cron_schedule.month,
                day=cron_schedule.day,
                week=cron_schedule.week,
                day_of_week=cron_schedule.day_of_week,
                second=cron_schedule.second,
                minute=cron_schedule.minute,
                hour=cron_schedule.hour)

    def delete(self, cron_schedule_id: int):
        for job_id in self._get_all_affected_job_ids(cron_schedule_id):
            self._scheduler.remove_job(job_id)

    def delete_job(self, job_id):
        self._scheduler.remove_job(job_id)

    @staticmethod
    def _get_all_affected_job_ids(cron_schedule_id):
        objs = Timelapse.objects.filter(schedule_id=cron_schedule_id)
        for obj in objs:
            assert isinstance(obj, Timelapse)
            yield obj.schedule_job_id
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    scheduler.add_job(report,
                      'interval',
                      minutes=1,
                      name='report_accounts',
                      jobstore='default')
    register_events(scheduler)
    scheduler.start()
    print("Scheduler has started")
示例#19
0
def __init__():
    global g_main_scheduler
    g_main_scheduler = BackgroundScheduler(timezone=utc)
    g_main_scheduler.add_jobstore('sqlalchemy', url='sqlite:///jobs.sqlite')
    g_main_scheduler.start()

    log_jobs()

    import threading
    myLogger("g_main_scheduler in this thread:{}".format(
        threading.current_thread()))
示例#20
0
def wechat_sendfile_server():
    data = {}
    itchat.auto_login()
    scheduler = BackgroundScheduler()
    # 调度器使用DjangoJobStore()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    # trigger = DateTrigger(run_date='2019-05-13 15:25:30')
    job = scheduler.add_job(send_file_by_time, trigger='date', next_run_time='2019-05-13 14:06:30')
    # job = scheduler.add_job(send_file_by_time, trigger)
    scheduler.start()
    job.remove()
示例#21
0
class SchedulerProcess(ZMQProcess):
    """ØMQ tasks scheduler process"""
    def __init__(self, zmq_address, handler, auth, clients, registry):
        # pylint: disable=too-many-arguments
        super().__init__(zmq_address, handler, auth, clients, registry)
        self.scheduler = BackgroundScheduler()
        self.jobstore = MemoryJobStore()

    def run(self):
        if self.scheduler is not None:
            self.scheduler.add_jobstore(self.jobstore, 'default')
            self.scheduler.start()
        ZMQProcess.run(self)
示例#22
0
def create_scheduler():
    from apscheduler.schedulers.background import BackgroundScheduler
    scheduler = BackgroundScheduler()
    mongo_client_kwargs = dict(
        host=CONF.MONGO_HOST,
        port=CONF.MONGO_PORT,
    )
    scheduler.add_jobstore(
        'mongodb',
        collection=CONF.SCHEDULER_MONGO_COLLECTION,
        **mongo_client_kwargs
    )
    return scheduler
示例#23
0
def main():
    context = daemon.DaemonContext(
        working_directory='/opt/sendlater',
        pidfile=daemon.pidfile.PIDLockFile('/var/run/sendlater.pid'),
    )

    with context:
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5),
        }

        scheduler = BackgroundScheduler(executors=executors)
        scheduler.add_jobstore(
            'redis',
            jobs_key='sendlater:jobs',
            run_times_key='sendlater:run_times',
        )
        scheduler.start()

        datetime_parser = parsedatetime.Calendar()
        def process(text):
            data = json.loads(text)

            recipient = data.get('recipient')
            sender = data.get('sender')
            when = data.get('when')
            message = data.get('message')

            parsed_tuples = datetime_parser.nlp(when)
            parsed_tuple = parsed_tuples[0]
            dt, flags, start_pos, end_pos, matched_text = parsed_tuple

            trigger = DateTrigger(dt)
            scheduler.add_job(
                send_message,
                trigger=trigger,
                args=[recipient, sender, when, message],
            )

            with open('/opt/sendlater/logs', 'a') as f:
                f.write('SCHEDULED {} {} {} {}\n'.format(recipient, sender, when, message))

        r = redis.Redis()

        while True:
            queue_name, text = r.blpop('sendlater:messages')
            try:
                process(text)
            except:
                pass
示例#24
0
def create_scheduler(message_queue):
    manage = SchedulerManage()
    scheduler = BackgroundScheduler(daemonic=True)
    scheduler.add_jobstore(DjangoJobStore(), "default")
    date = dt.datetime.now()
    scheduler.add_job(main,
                      "date",
                      run_date=date,
                      id='alarm_proj',
                      args=[message_queue],
                      replace_existing=True)
    # scheduler.add_job(manage.message_accept, "date", run_date=date, id='alarm_proj', args=[message_queue], replace_existing=True)
    # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[])
    # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True)
    scheduler.add_job(so_run,
                      "interval",
                      minutes=1,
                      id='operate_match',
                      args=[message_queue],
                      replace_existing=True)

    try:
        group, int_list, scats_input = get_scats_int()
    except Exception as e:
        logger.error(e)
        print(e)
    else:
        logger.info("scats基础信息获取成功")
        scheduler.add_job(thread_creat,
                          "interval",
                          minutes=5,
                          id='scats_salklist',
                          args=[group, int_list, scats_input],
                          replace_existing=True)
        scheduler.add_job(RequestDynaDataFromInt,
                          "interval",
                          minutes=5,
                          id='scats_volumns',
                          args=[int_list],
                          replace_existing=True)
        scheduler.add_job(get_operate,
                          "interval",
                          minutes=3,
                          id='scats_operate',
                          args=[],
                          replace_existing=True)

    scheduler.start()
    logger.error('定时任务开始')
    print("=======================定时任务启动==========================")
    print(scheduler.get_jobs())
示例#25
0
def add_sqlcronjob(request):
    workflowId = request.POST.get('workflowid')
    run_date = request.POST.get('run_date')
    if run_date is None or workflowId is None:
        context = {'errMsg': '时间不能为空'}
        return render(request, 'error.html', context)
    elif run_date < datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'):
        context = {'errMsg': '时间不能小于当前时间'}
        return render(request, 'error.html', context)
    workflowDetail = workflow.objects.get(id=workflowId)
    if workflowDetail.status not in ['审核通过', '定时执行']:
        context = {'errMsg': '必须为审核通过或者定时执行状态'}
        return render(request, 'error.html', context)

    run_date = str(datetime.datetime.strptime(run_date, "%Y-%m-%d %H:%M:%S"))
    url = getDetailUrl(request) + str(workflowId) + '/'
    job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)

    try:
        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(DjangoJobStore(), "default")
        # mysql_add = "mysql://*****:*****@localhost:3308/archer3"
        # scheduler.add_jobstore(SQLAlchemyJobStore(url=mysql_add))
        scheduler.add_job(execute_job,
                          'date',
                          run_date=run_date,
                          args=[workflowId, url],
                          id=job_id,
                          replace_existing=True)
        register_events(scheduler)
        try:
            scheduler.start()
            logger.debug("Scheduler started!")
        except SchedulerAlreadyRunningError:
            logger.debug("Scheduler is already running!")
        workflowDetail.status = Const.workflowStatus['tasktiming']
        workflowDetail.save()
    except Exception as e:
        context = {'errMsg': '任务添加失败,错误信息:' + str(e)}
        return render(request, 'error.html', context)
    else:
        # logger.debug('add_sqlcronjob:' + job_id + "run_date:" + run_date.strftime('%Y-%m-%d %H:%M:%S'))
        logger.debug('add_sqlcronjob:' + job_id + "run_date:" + run_date)

    return HttpResponseRedirect(
        reverse('sql:detail',
                kwargs={
                    'workflowId': workflowId,
                    'workflowType': 0
                }))
示例#26
0
文件: views.py 项目: EAFA0/muxin
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        Config = get_config()
        mysql_url = (f'mysql://{Config.MYSQL_USER}:{Config.MYSQL_PASSWORD}'
                     f'@{Config.MYSQL_HOST}:{Config.MYSQL_PORT}'
                     f'/{Config.MYSQL_DATABASE}')

        jobstore = SQLAlchemyJobStore(url=mysql_url)

        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(jobstore)
        scheduler.start()

        self.scheduler = scheduler
示例#27
0
def start_worker(config):
    connection_string = "mysql://%s:%s@%s/%s" % (
        config.get('mysql', 'user'),
        config.get('mysql', 'passwd'),
        config.get('mysql', 'host'),
        config.get('mysql', 'database')
    )
    
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(SQLAlchemyJobStore(url=connection_string), alias='db')
    scheduler.start()
    scheduler.add_job(
        update_all_job, args=[config], replace_existing=True, id='worker',
        trigger='cron', minute='*/5', jobstore='db', timezone='UTC')
示例#28
0
class Scheduler(metaclass=Singleton):
    def __init__(self):
        _settings, _ = import_settings()
        self._scheduler = BackgroundScheduler()
        if 'REDIS_URL' in _settings:
            redis_config = gen_config_dict(_settings)
            self._scheduler.add_jobstore('redis', **redis_config)

    def __getattr__(self, item):
        return getattr(self._scheduler, item)

    @staticmethod
    def get_instance():
        return Scheduler()
示例#29
0
    def get_paused_scheduler(cls):
        """Return paused scheduler for feeding jobs

        :return: scheduler instance
        """
        # As we would like to feed default jobs, not to run any job, make sure we start scheduler in paused mode.
        if cls._scheduler is not None:
            return cls._scheduler
        else:
            scheduler = BackgroundScheduler()
            scheduler.add_jobstore('sqlalchemy',
                                   url=get_postgres_connection_string())
            scheduler.start(paused=True)
            return scheduler
示例#30
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore(DjangoJobStore(), "default")
    # run this job every 1 hours
    scheduler.add_job(update_tweets_hourly,
                      'interval',
                      minutes=60,
                      name='update_tweets_hourly',
                      id="update_tweets_hourly",
                      max_instances=1,
                      replace_existing=True,
                      jobstore='default')
    register_events(scheduler)
    scheduler.start()
    print("Scheduler started...", file=sys.stdout)
def add_periodic_job(sched_sql_loc, function_to_run, time_df, id_modifier,
                     args):
    """
    Adds a job to the scheduler database. This function must have the same
        arguements for all runs.
    
    :param: sched_sql_loc: location of the sql job database generated by 
        this program and used by the scheduler
    :type: sched_sql_loc: string
    
    :param: function_to_run: The function that is being scheduled
    :type: csv_path_in: function   

    :param: time_df: pandas dataframe that contains when the function should
        run
    :type: time_df: pandas dataframe
    
    :param: id_modifier: string that will be added to the id for add job
    :type: string
    
    :param: args: list of arguements that are used by function_to_run
    :type: args: list   
    """
    # opne the scheduler object and associate the job database with it
    scheduler = BackgroundScheduler()
    scheduler.add_jobstore('sqlalchemy', url='sqlite:///%s' % sched_sql_loc)
    sched_time_hours = time_df['hours'].values
    sched_time_minutes = time_df['minutes'].values
    sched_time_seconds = time_df['seconds'].values
    sched_day_code = time_df['day_code'].values
    sched_index = time_df.index.values
    for ind in range(len(sched_time_hours)):
        # misfire_grace_time - seconds after the designated runtime that
        # the job is still allowed to be run
        tempargs = args.copy()
        tempargs.append(sched_index[ind])
        scheduler.add_job(function_to_run,
                          'cron',
                          day_of_week=sched_day_code[ind],
                          hour=int(sched_time_hours[ind]),
                          minute=int(sched_time_minutes[ind]),
                          second=int(sched_time_seconds[ind]),
                          misfire_grace_time=120,
                          id=(id_modifier + str(sched_index[ind])),
                          args=tempargs)
    scheduler.print_jobs()
    scheduler.start()
    scheduler.shutdown()
示例#32
0
class SchedulerManager(SingletonMixin):
    def __init__(self):
        self._scheduler = BackgroundScheduler()

    def start(self, db_url):
        self._scheduler.add_jobstore('sqlalchemy', db_url)
        self._scheduler.start()

    def stutdown(self):
        self._scheduler.shutdown()

    def add_interval_job(self, func, args, seconds=60):
        self._scheduler.add_job(func, 'interval', args, seconds)

    def add_cron_job(self, func, hour, minute):
        self._scheduler.add_job(func, 'cron', hour, minute)
示例#33
0
def schedule_job(message):
    log.info("Received request to schedule the exporter job %s with job_id %s"
             % (message['exporter_name'], message['exporter_id']))
    metrics_list = message['metrics_list']

    for i in metrics_list:
        scheduler = BackgroundScheduler()
        scheduler.add_jobstore('sqlalchemy', url=CONF.gexporter.scheduler_db_url)
        job_id = message['exporter_id']
        scheduler.add_job(create_job, args=[i, message["time_interval"],
                                            job_id],
                          trigger='interval',
                          minutes=int(message["time_interval"]), id=job_id)
        try:
            print("Starting scheduler")
            scheduler.start()
        except Exception as ex:
            log.error("last cycle of Scheduler has hit an exception")
        """
示例#34
0
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        self.scheduler = self.config.get("scheduler")
        interval = int(self.scheduler.get("frequency"))
        master_ro = self.config.get("master_ro")
        mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        db_name = "felix_ro"
        if mro_enabled:
            db_name = "felix_mro"
        global ro_scheduler
        ro_scheduler = BackgroundScheduler()
        ro_scheduler.add_jobstore(
            MongoDBJobStore(database=db_name, collection="scheduler.jobs"))
        ro_scheduler.start()

        super(ROSchedulerService, self).__init__(
            "ROSchedulerService", interval)
        self.first_time = True
示例#35
0
class CommandScheduler(object):

    def __init__(self, start=True):
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }

        self.scheduler = BackgroundScheduler(executors=executors)
        self.scheduler.add_jobstore('redis',
            jobs_key='shh:jobs',
            run_times_key='shh:run_times'
        )

        if start:
            self.scheduler.start()

        self.datetime_parser = parsedatetime.Calendar()

    def parse(self, at):
        return self.datetime_parser.parseDT(at)[0]

    def already_scheduled(self, what):
        # TODO(Bieber): Improve efficiency with a dict
        for scheduled_at, scheduled_what in self.get_jobs():
            if what == scheduled_what:
                return True
        return False

    def schedule(self, at, what):
        dt = self.parse(at)
        trigger = DateTrigger(dt)
        self.scheduler.add_job(execute_command,
            trigger=trigger,
            args=[what.strip()],
        )

    def get_jobs(self):
        jobs = self.scheduler.get_jobs()
        return [(job.next_run_time, job.args[0]) for job in jobs]
示例#36
0
def add_schedule_backup_job():
    # if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    scheduler.remove_all_jobs(jobstore="sqlite_js")

    scheduler.print_jobs()

    print "remove"

    # v_current_jobs = scheduler.get_jobs()

    # print v_current_jobs

    # if v_current_jobs:  # 如果job存在的话,先请客

    #     scheduler.remove_job('backup')

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list:  # 有server需要配置

        i = 0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = "update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0"

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server["instance_id"]

            from_host = bak_server["ip"]

            # print from_host

            mysql_port = bak_server["port"]

            backup_interval_type = bak_server["backup_interval_type"]

            backup_start_time = bak_server["backup_start_time"]

            str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

            print str_start_date

            v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

            if backup_interval_type == 1:  # every day

                # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    days=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 2:  # every week weeks=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    weeks=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 3:  # every hour hours=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    hours=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            else:
                pass
            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (
                instance_id
            )

            db.execute(v_sche_start_sql)

            i = i + 1

        scheduler.print_jobs()

        print "b"

        scheduler.start()

        scheduler.print_jobs()

        print "c"

    db.close()
示例#37
0
import requests

logging.basicConfig()
#import click
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
    DATABASE=os.path.join(app.root_path, 'db', 'notifications.db'),
    SECRET_KEY='secret',
    USERNAME='******',
    PASSWORD='******'
))

# scheduler
sched = BackgroundScheduler(timezone='America/Los_Angeles')
sched.add_jobstore('sqlalchemy', url='sqlite:///db/notifications.db')
sched.add_executor('threadpool')
sched.start()

def connect_db():
    """Connects to the specific database."""
    rv = sqlite3.connect(app.config['DATABASE'])
    rv.row_factory = dict_factory
    return rv

@app.cli.command('initdb')
@click.option('--file',help='the schema file')
def initdb_command(file):
    initdb(file)

def initdb(file):
示例#38
0
文件: app.py 项目: thnkloud9/Tandem
    return True

if __name__ == '__main__':
    app_dir = os.path.dirname(os.path.realpath(__file__))
    filename= app_dir + '/../log/scheduler.log'
    f = open(filename, 'w+')
    f.close()
    logging.basicConfig(
        filename=filename,
        format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', 
        datefmt='%m/%d/%Y %I:%M:%S %p',
        level=logging.DEBUG)
    logging.info('TandemApi Scheduler Started')

    scheduler = BackgroundScheduler()
    scheduler.add_jobstore('mongodb', database="tandem", collection='jobs')
    if len(sys.argv) > 1 and sys.argv[1] == '--clear':
        scheduler.remove_all_jobs()

    scheduler.start()
    print('To clear the alarms, run this example with the --clear argument.')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    client = MongoClient('localhost', 27017)
    db = client['tandem']
    new_jobs_queue = db['new_jobs_queue']
 
    try:
        while True:
            time.sleep(2)
            addJobs(new_jobs_queue)
示例#39
0
class ScheduleCommand(Command):
    schedule_sender = None
    def __init__(self, logger, message_sender):
        super().__init__(logger, message_sender)
        # Message scheduler configuration
        self.scheduler = BackgroundScheduler()
        configuration = self.message_sender.get_configuration()
        sdb_url = configuration.get('Message Scheduler', 'db_path')
        self.scheduler.add_jobstore('sqlalchemy', url=sdb_url)
        self.scheduler.start()
        ScheduleCommand.schedule_sender = message_sender

    def process(self, chat_id, user_id, username, arguments):
        if len(arguments) < 1:
            return self.help()
        
        operation = arguments[0] 
        command_arguments = arguments[1:]

        if operation == "add":
            return self.add_message(username, chat_id, command_arguments)
        elif operation == "time":
            return self.get_local_time()
        elif operation == "remove":
            return self.remove_message(username, command_arguments)
        else:
            return self.help()

    def get_local_time(self):
        date_str =  str(datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
        return "My time is {}".format(date_str)

    def add_message(self, username, chat_id, arguments):
        if len(arguments) <2:
            return self.help()
        date = None
        message = None
        if arguments[0] == 'relative':
            if len(arguments)<3:
                return self.help()
            date = self.on_delta_parse(arguments[1])
            message = arguments[2]
        else:
            date = self.on_date_parse(arguments[0])
            message = arguments[1]
        
        if message.startswith('"') and message.endswith('"'):
            message = message[1:-1]

        reference = self.user_reference(username)
        if date is None:
            return "{}Date format not recognized".format(reference)

        current_date = datetime.today()
        if date < current_date:
            current_str = self.get_human_string_date(current_date)
            return "{}Sorry, I can't travel to the past, my current date is: {}".format(reference, current_str)
 
        message_id = str(random.randrange(0, 999999999))
        self.scheduler.add_job(ScheduleCommand.send_programmed_message, \
                'date', run_date=date, args=[username, \
                chat_id, message], id = message_id) 
        
        date_str = self.get_human_string_date(date)
        return "{}The message [{}] has been successfully scheduled on {}"\
                .format(reference,  message_id, date_str)

    def send_programmed_message(username, chat_id, message):
        reference = ScheduleCommand.user_reference(username)
        text = "{}{}".format(reference, message)
        ScheduleCommand.schedule_sender.send_message(chat_id, text)    
    send_programmed_message = staticmethod(send_programmed_message)

    def remove_message(self, username, arguments): 
        if len(arguments) < 1:
            return self.help()
        
        founds = 0
        ids = self.get_comma_arguments(arguments[0])
        
        for message_id in ids:
            job = self.scheduler.get_job(message_id)
            if job == None:
                continue
            if job.args[0] != username:
                continue
            self.scheduler.remove_job(message_id)
            founds += 1
        
        reference = self.user_reference(username)
        if founds == len(ids):
            if len(ids) ==1:
                return "{}The scheduled message was canceled.".format(reference)
            else:
                return "{}The scheduled messages were canceled.".format(reference)
        elif founds > 0:
            return "{}Some scheduled messages were canceled, but some others were not found or you aren't the owner.".format(reference)
        else:
            if len(ids) == 1:
                return "{}The scheduled message was not found or you aren't the owner.".format(reference)
            else:
                return "{}The scheduled message were not found or you aren't the owner.".format(reference)

    def get_human_string_date(self,datetime):
        return str(datetime.strftime('%Y-%m-%d %H:%M:%S'))

    def on_delta_parse(self, text_date):
        regex = re.compile(r'^((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?$')
        parts = regex.match(text_date)
        if not parts:
            return None
        parts = parts.groupdict()
        time_params = {}
        valid_params = 0
        for (name, param) in parts.items():
            if param:
                time_params[name] = int(param)
                valid_params = valid_params + 1
        if valid_params == 0:
            return None
        return datetime.now() + timedelta(**time_params)

    def on_date_parse(self, text_date):
        try:
            return parser.parse(text_date, dayfirst = True, yearfirst = True)
        except ValueError:
            return None

    def user_reference(username):
        if username is None:
            return ""
        else:
            return "@{}: ".format(username)
    user_reference = staticmethod(user_reference)

    def help(self):
        self.logger.info("Printing help for schedule message command.")
        return self.get_file_help(__file__, "schedule_message.man")
    def name(self):
        return "schedule_message"
    
    def description(self):
        return "Schedule a message to be sent in a specified date."     
示例#40
0
 def initializeScheduler(self):
     scheduler = BackgroundScheduler()
     connect_url = self.config.get(self.ORCH_SECTION,"sched_connect_url")
     scheduler.add_jobstore('sqlalchemy',url=connect_url)
     return scheduler
def connect(dbapi_connection, connection_record):
	connection_record.info['pid'] = os.getpid()

@event.listens_for(Engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
	pid = os.getpid()
	if 'pid' not in connection_record.info or connection_record.info['pid'] != pid:
		connection_record.connection = connection_proxy.connection = None
		raise exc.DisconnectionError(
				"Connection record belongs to pid %s, "
				"attempting to check out in pid %s" %
				(connection_record.info['pid'], pid)
		)

scheduler = BackgroundScheduler()
scheduler.add_jobstore('sqlalchemy', url='mysql+pymysql://{}:{}@{}/{}'.format(config.db_user, config.db_pass, config.db_host, config.db_name))

def post_diamondtime():
	now = datetime.now(timezone('US/Eastern'))
	episode_date = now + relativedelta(weekday=TU, hour=22, minute=0, second=0, microsecond=0)
	title = u'Weekly Diamond Time Submissions - For Episode on {0:%b %d, %Y}'.format(episode_date)

	f = open('diamondtime-text.md', 'r')
	text = f.read()

	sub = r.submit('diamondclub',title,text,save=False)
	sub.set_contest_mode(True)
	sub.set_flair(flair_text=u'Diamond Time',flair_css_class=u'diamondtime')
	sub.sticky(bottom=False)
	sub.distinguish(as_made_by=u'mod')
示例#42
0
def init_scheduler():
    global _scheduler
    if _scheduler is None:
        _scheduler = BackgroundScheduler()
        _scheduler.add_jobstore('sqlalchemy', url=SQLALCHEMY_DATABASE_URI)
    return _scheduler
示例#43
0
def add_single_backup_job(v_setup_id):  # mysql_ins_bak_setup 表的当前更新或插入的备份实例id

    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id and a.id=%d""" % (
        v_setup_id
    )

    print v_sql

    bak_server = db.get(v_sql)

    instance_id = bak_server["instance_id"]

    from_host = bak_server["ip"]

    # print from_host

    mysql_port = bak_server["port"]

    backup_interval_type = bak_server["backup_interval_type"]

    backup_start_time = bak_server["backup_start_time"]

    str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

    print str_start_date

    v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

    if backup_interval_type == 1:  # every day

        # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            days=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 2:  # every week weeks=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            weeks=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 3:  # every hour hours=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            hours=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    else:
        pass

    scheduler.print_jobs()

    print "b"

    scheduler.start()

    scheduler.print_jobs()

    print "c"

    # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

    v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
    values(%d,0)""" % (
        instance_id
    )

    db.execute(v_sche_start_sql)

    db.close()
class HackathonScheduler(object):
    """An helper class for apscheduler"""
    jobstore = "ohp"

    def get_scheduler(self):
        """Return the apscheduler instance in case you have to call it directly

        :return the instance of APScheduler

        .. notes:: the return value might be None in flask debug mode
        """
        return self.__apscheduler

    def add_once(self, feature, method, context=None, id=None, replace_existing=True, run_date=None, **delta):
        """Add a job to APScheduler and executed only once

        Job will be executed at 'run_date' or after certain timedelta.

        :Example:
            scheduler = RequiredFeature("scheduler")

            # execute task once in 5 minutes:
            context = Context(user_id=1)
            scheduler.add_once("user_manager","get_user_by_id",context=context, minutes=5)
            # 5 minutes later, user_manager.get_user_by_id(context) will be executed

        :type feature: str|unicode
        :param: the feature that are used to look for instance through hackathon_factory. All features are defined in __init__.py

        :type method: str|unicode
        :param method: the method name defined in the instance

        :type context: Context, see hackathon/__init__.py
        :param context: the execution context. Actually the parameters of 'method'

        :type id: str
        :param id: id for APScheduler job. Random id will be generated if not specified by caller

        :type replace_existing: bool
        :param replace_existing: if true, existing job with the same id will be replaced. If false, exception will be raised

        :type run_date: datetime | None
        :param run_date: job run date. If None, job run date will be datetime.now()+timedelta(delta)

        :type delta: kwargs for timedelta
        :param delta: kwargs for timedelta. For example: minutes=5. Will be ignored if run_date is not None
        """
        if not run_date:
            run_date = get_now() + timedelta(**delta)

        if self.__apscheduler:
            self.__apscheduler.add_job(scheduler_executor,
                                       trigger='date',
                                       run_date=run_date,
                                       id=id,
                                       max_instances=1,
                                       replace_existing=replace_existing,
                                       jobstore=self.jobstore,
                                       args=[feature, method, context])

    def add_interval(self, feature, method, context=None, id=None, replace_existing=True, next_run_time=undefined,
                     **interval):
        """Add an interval job to APScheduler and executed.

        Job will be executed firstly at 'next_run_time'. And then executed in interval.

        :Example:
            scheduler = RequiredFeature("scheduler")

            context = Context(user_id=1)
            scheduler.add_interval("user_manager","get_user_by_id", context=context, minutes=10)
            # user_manager.get_user_by_id(context) will be called every 10 minutes

        :type feature: str|unicode
        :param: the feature that are used to look for instance through hackathon_factory. All features are defined in __init__.py

        :type method: str|unicode
        :param method: the method name defined in the instance

        :type context: Context, see hackathon/__init__.py
        :param context: the execution context. Actually the parameters of 'method'

        :type id: str
        :param id: id for APScheduler job. Random id will be generated if not specified by caller

        :type replace_existing: bool
        :param replace_existing: if true, existing job with the same id will be replaced. If false, exception will be raised

        :type next_run_time: datetime | undefined
        :param next_run_time: the first time the job will be executed. leave undefined to don't execute until interval time reached

        :type interval: kwargs for "interval" trigger
        :param interval: kwargs for "interval" trigger. For example: minutes=5.
        """
        if self.__apscheduler:
            self.__apscheduler.add_job(scheduler_executor,
                                       trigger='interval',
                                       id=id,
                                       max_instances=1,
                                       replace_existing=replace_existing,
                                       next_run_time=next_run_time,
                                       jobstore=self.jobstore,
                                       args=[feature, method, context],
                                       **interval)

    def remove_job(self, job_id):
        """Remove job from APScheduler job store

        :type job_id: str | unicode
        :param job_id: the id of job
        """
        if self.__apscheduler:
            try:
                self.__apscheduler.remove_job(job_id, self.jobstore)
            except JobLookupError:
                log.debug("remove job failed because job %s not found" % job_id)
            except Exception as e:
                log.error(e)

    def has_job(self, job_id):
        """Check the existence of specific job """
        if self.__apscheduler:
            job = self.__apscheduler.get_job(job_id, jobstore=self.jobstore)
            return job is not None
        return False

    def __init__(self, app):
        """Initialize APScheduler

        :type app: Flask
        :param app: the Flask app
        """
        self.app = app
        self.__apscheduler = None

        # NOT instantiate while in flask DEBUG mode or in the main thread
        # It's to avoid APScheduler being instantiated twice
        if not app.debug or os.environ.get("WERKZEUG_RUN_MAIN") == "true":
            self.__apscheduler = BackgroundScheduler(timezone=utc)

            # add MySQL job store
            job_store_type = safe_get_config("scheduler.job_store", "memory")
            if job_store_type == "mysql":
                log.debug("add aps_cheduler job store based on mysql")
                self.__apscheduler.add_jobstore('sqlalchemy',
                                                alias=self.jobstore,
                                                url=get_config("scheduler.job_store_url"))
            elif job_store_type == "mongodb":
                log.debug("add aps_cheduler job store based on mongodb")
                self.__apscheduler.add_jobstore('mongodb',
                                                alias=self.jobstore,
                                                database=safe_get_config("scheduler.database", "apscheduler"),
                                                collection=safe_get_config("scheduler.collection", "jobs"),
                                                host=safe_get_config("scheduler.host", "localhost"),
                                                port=safe_get_config("scheduler.port", 27017))

            # add event listener
            self.__apscheduler.add_listener(scheduler_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED)
            log.info("APScheduler loaded")
            self.__apscheduler.start()
示例#45
0
class BileanScheduler(object):
    """Billing scheduler based on apscheduler"""

    job_types = (
        NOTIFY, DAILY, FREEZE,
    ) = (
        'notify', 'daily', 'freeze',
    )
    trigger_types = (DATE, CRON) = ('date', 'cron')

    def __init__(self, **kwargs):
        super(BileanScheduler, self).__init__()
        self._scheduler = BackgroundScheduler()
        self.notifier = notifier.Notifier()
        self.engine_id = kwargs.get('engine_id', None)
        self.context = kwargs.get('context', None)
        if not self.context:
            self.context = bilean_context.get_admin_context()
        if cfg.CONF.bilean_task.store_ap_job:
            self._scheduler.add_jobstore(cfg.CONF.bilean_task.backend,
                                         url=cfg.CONF.bilean_task.connection)

    def init_scheduler(self):
        """Init all jobs related to the engine from db."""
        jobs = db_api.job_get_all(self.context, engine_id=self.engine_id)
        if not jobs:
            LOG.info(_LI("No job found from db"))
            return True
        for job in jobs:
            if self.bilean_scheduler.is_exist(job.id):
                continue
            task_name = "_%s_task" % (job.job_type)
            task = getattr(self, task_name)
            self.bilean_task.add_job(task, job.id,
                                     job_type=job.job_type,
                                     params=job.parameters)

    def add_job(self, task, job_id, trigger_type='date', **kwargs):
        """Add a job to scheduler by given data.

        :param str|unicode user_id: used as job_id
        :param datetime alarm_time: when to first run the job

        """
        mg_time = cfg.CONF.bilean_task.misfire_grace_time
        job_time_zone = cfg.CONF.bilean_task.time_zone
        user_id = job_id.split('-')[1]
        if trigger_type == 'date':
            run_date = kwargs.get('run_date')
            if run_date is None:
                msg = "Param run_date cannot be None for trigger type 'date'."
                raise exception.InvalidInput(reason=msg)
            self._scheduler.add_job(task, 'date',
                                    timezone=job_time_zone,
                                    run_date=run_date,
                                    args=[user_id],
                                    id=job_id,
                                    misfire_grace_time=mg_time)
            return True

        # Add a cron type job
        hour = kwargs.get('hour', None)
        minute = kwargs.get('minute', None)
        if not hour or not minute:
            hour, minute = self._generate_timer()
        self._scheduler.add_job(task, 'cron',
                                timezone=job_time_zone,
                                hour=hour,
                                minute=minute,
                                args=[user_id],
                                id=job_id,
                                misfire_grace_time=mg_time)
        return True

    def modify_job(self, job_id, **changes):
        """Modifies the properties of a single job.

        Modifications are passed to this method as extra keyword arguments.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.modify_job(job_id, **changes)

    def remove_job(self, job_id):
        """Removes a job, preventing it from being run any more.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.remove_job(job_id)

    def start(self):
        LOG.info(_('Starting Billing scheduler'))
        self._scheduler.start()

    def stop(self):
        LOG.info(_('Stopping Billing scheduler'))
        self._scheduler.shutdown()

    def is_exist(self, job_id):
        """Returns if the Job exists that matches the given ``job_id``.

        :param str|unicode job_id: the identifier of the job
        :return: True|False
        """

        job = self._scheduler.get_job(job_id)
        return job is not None

    def _notify_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        msg = {'user': user.id, 'notification': 'The balance is almost use up'}
        self.notifier.info('billing.notify', msg)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'notify'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))
        self._add_freeze_job(user)

    def _daily_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'daily'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _freeze_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'freeze'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _add_notify_job(self, user):
        if not user.rate:
            return False
        total_seconds = user['balance'] / user['rate']
        prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
        notify_seconds = total_seconds - prior_notify_time
        notify_seconds = notify_seconds if notify_seconds > 0 else 0
        run_date = timeutils.utcnow() + timedelta(seconds=notify_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user['id'], self.NOTIFY)
        self.add_job(self._notify_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.NOTIFY,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)

    def _add_freeze_job(self, user):
        if not user.rate:
            return False
        total_seconds = user.balance / user.rate
        run_date = timeutils.utcnow() + timedelta(seconds=total_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user.id, self.FREEZE)
        self.add_job(self._freeze_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.FREEZE,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)
        return True

    def _add_daily_job(self, user):
        job_id = self._generate_job_id(user.id, self.DAILY)
        params = {'hour': random.randint(0, 23),
                  'minute': random.randint(0, 59)}
        self.add_job(self._daily_task, job_id, trigger_type='cron',
                     params=params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.DAILY,
               'engine_id': self.engine_id,
               'parameters': params}
        db_api.job_create(self.context, job)
        return True

    def _delete_all_job(self, user):
        for job_type in self.job_types:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def update_user_job(self, user):
        """Update user's billing job"""
        if user.status not in [user.ACTIVE, user.WARNING]:
            self._delete_all_job(user.id)
            return

        for job_type in self.NOTIFY, self.FREEZE:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

        daily_job_id = self._generate_job_id(user.id, self.DAILY)
        if not self.is_exist(daily_job_id):
            self._add_daily_job(user)

        if user.status == user.ACTIVE:
            self._add_notify_job(user)
        else:
            self._add_freeze_job(user)

    def _generate_timer(self):
        """Generate a random timer include hour and minute."""
        hour = random.randint(0, 23)
        minute = random.randint(0, 59)
        return hour, minute

    def _generate_job_id(self, user_id, job_type):
        """Generate job id by given user_id and job type"""
        return "%s-%s" % (job_type, user_id)
示例#46
0
class MainRunner(object):

    class FilterAllLog(logging.Filter):
        # default we will filter logger from apscheduler.executors.default, apscheduler.scheduler,
        # you can config filter logger in config.json
        def filter(self, record):
            return ""

    def __init__(self, input_cmd_config_fp, input_job_config_fp, input_config_fp):

        # init value
        cmd_config_fp = os.path.abspath(input_cmd_config_fp)
        job_config_fp = os.path.abspath(input_job_config_fp)
        config_fp = os.path.abspath(input_config_fp)

        # load configuration json files
        self.cmd_config = CommonUtil.load_json_file(cmd_config_fp)
        self.job_config = CommonUtil.load_json_file(job_config_fp)
        self.config = CommonUtil.load_json_file(config_fp)

        # init schedulers
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore('sqlalchemy', url=self.config['job_store_url'])
        self.scheduler.start()

        # init variables
        mananger = Manager()
        self.sync_queue = mananger.Queue()
        self.async_queue = mananger.Queue()
        self.current_job_list = []

        # Slack Sending Queue
        # TODO: prevent the Slack bot is disable, the sending queue will use too much memory.
        self.slack_sending_queue = mananger.Queue(50)

        # init logger
        self.set_logging(self.config['log_level'], self.config['log_filter'])

    def set_logging(self, log_level, log_filter_list):
        default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s'
        default_datefmt = '%Y-%m-%d %H:%M'
        if log_level.lower() == "debug":
            logging.basicConfig(level=logging.DEBUG, format=default_log_format, datefmt=default_datefmt)
        else:
            logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt)

        my_filter = self.FilterAllLog()
        for target_logger in log_filter_list:
            logging.getLogger(target_logger).addFilter(my_filter)

    def scheduler_del_job(self, **kwargs):
        input_cmd_str = kwargs.get("input_cmd_str", "")
        cmd_str_list = input_cmd_str.split(" ")
        if len(cmd_str_list) == 2:
            job_id = cmd_str_list[1]
            current_job_list = self.scheduler.get_jobs()
            current_job_id_list = [j.id for j in current_job_list]
            if job_id in current_job_id_list:
                self.scheduler.remove_job(job_id)
            else:
                logging.error("Cannot find the specify job id [%s]" % job_id)
        else:
            logging.error("Incorrect cmd format! [%s]" % input_cmd_str)

    def scheduler_list_job(self, **kwargs):
        self.scheduler.print_jobs()

    def scheduler_shutdown(self, **kwargs):
        self.scheduler.shutdown()
        sys.exit(0)

    def list_all_commands(self, **kwargs):
        print "Current supported commands as below:"
        print "-" * 80
        for cmd_str in self.cmd_config['cmd-settings']:
            print '{:30s} {:50s} '.format(cmd_str, self.cmd_config['cmd-settings'][cmd_str]['desc'])
        print "-" * 80

    def scheduler_job_handler(self, input_cmd_obj, input_cmd_str):
        cmd_match_pattern = input_cmd_obj.keys()[0]
        func_point = getattr(self, input_cmd_obj[cmd_match_pattern]['func-name'])
        func_point(cmd_configs=input_cmd_obj[cmd_match_pattern]['configs'], input_cmd_str=input_cmd_str)

    def cmd_queue_composer(self, input_cmd_str):
        for cmd_pattern in self.cmd_config['cmd-settings']:
            re_compile_obj = re.compile(cmd_pattern)
            re_match_obj = re_compile_obj.search(input_cmd_str)
            if re_match_obj:
                current_command_obj = self.cmd_config['cmd-settings'][cmd_pattern]
                logging.debug("job matched [%s]" % cmd_pattern)
                target_queue_type = current_command_obj.get('queue-type', None)
                if target_queue_type == "async":
                    self.async_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                elif target_queue_type == "sync":
                    self.sync_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                else:
                    self.scheduler_job_handler({cmd_pattern: current_command_obj}, input_cmd_str)
                break

    def load_default_jobs(self, input_scheduler, input_job_config):
        current_jobs = input_scheduler.get_jobs()
        current_jobs_name = [job.name for job in current_jobs]
        for job_name in input_job_config:
            if input_job_config[job_name]['default-loaded']:
                if job_name not in current_jobs_name:
                    func_point = getattr(importlib.import_module(input_job_config[job_name]['module-path']), job_name)
                    self.scheduler.add_job(func_point, input_job_config[job_name]['trigger-type'],
                                           id=job_name,
                                           seconds=input_job_config[job_name]['interval'],
                                           max_instances=input_job_config[job_name]['max-instances'],
                                           kwargs={
                                               'async_queue': self.async_queue,
                                               'sync_queue': self.sync_queue,
                                               'slack_sending_queue': self.slack_sending_queue,
                                               'configs': input_job_config[job_name]['configs'],
                                               'cmd_config': self.cmd_config}
                                           )

    def job_exception_listener(self, event):
        if event.exception:
            logging.error("Job [%s] crashed [%s]" % (event.job_id, event.exception))
            logging.error(event.traceback)

    def add_event_listener(self):
        self.scheduler.add_listener(self.job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    def run(self):
        # load default job into scheduler if the job is not exist
        self.load_default_jobs(self.scheduler, self.job_config)

        # add event listener into scheduler
        self.add_event_listener()

        # enter the loop to receive the interactive command
        while True:
            user_input = raw_input()
            self.cmd_queue_composer(user_input)
            time.sleep(3)
示例#47
0
class isardScheduler():
    def __init__(self):
        '''
        JOB SCHEDULER
        '''
#<<<<<<< HEAD
#~ host=app.config['RETHINKDB_HOST'],
                                         #~ port=app.config['RETHINKDB_PORT'],
                                         #~ auth_key=app.config['RETHINKDB_AUTH']
        #~ rConn=r.connect(host=app.config['RETHINKDB_HOST'],
                         #~ port=app.config['RETHINKDB_PORT'],
                         #~ auth_key=app.config['RETHINKDB_AUTH'],
                         #~ db=app.config['RETHINKDB_DB'])
        self.rStore=RethinkDBJobStore()
#=======
        # ~ self.rStore=RethinkDBJobStore(host=app.config['RETHINKDB_HOST'],
                                         # ~ port=app.config['RETHINKDB_PORT'],
                                         # ~ auth_key=app.config['RETHINKDB_AUTH'])
#>>>>>>> fe171dc30ddd8a2dabafa7b2085cbb60e6432c35
        self.scheduler = BackgroundScheduler(timezone=pytz.timezone('UTC'))
        self.scheduler.add_jobstore('rethinkdb',self.rStore, database='isard', table='scheduler_jobs',host=app.config['RETHINKDB_HOST'],
                         port=app.config['RETHINKDB_PORT'],
                         auth_key=app.config['RETHINKDB_AUTH'])
        self.scheduler.remove_all_jobs()
        #~ scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
        #~ app.sched.shutdown(wait=False)
        self.turnOn()
        
        
    def add_scheduler(self,kind,action,hour,minute):
        id=kind+'_'+action+'_'+str(hour)+str(minute)
        function=getattr(isardScheduler,action) 
        if kind == 'cron':
            self.scheduler.add_job(function, kind, hour=int(hour), minute=int(minute), jobstore=self.rStore, replace_existing=True, id=id)
        if kind == 'interval':
            self.scheduler.add_job(function, kind, hours=int(hour), minutes=int(minute), jobstore=self.rStore, replace_existing=True, id=id)
        if kind == 'date':
            alarm_time = datetime.now() + timedelta(hours=int(hour),minutes=int(minute))
            self.scheduler.add_job(function, kind, run_date=alarm_time, jobstore=self.rStore, replace_existing=True, id=id)
        with app.app_context():
            r.table('scheduler_jobs').get(id).update({'kind':kind,'action':action,'name':action.replace('_',' '),'hour':hour,'minute':minute}).run(db.conn)
        return True

    '''
    Scheduler actions
    '''
    def stop_domains():
        with app.app_context():
            r.table('domains').get_all('Started',index='status').update({'status':'Stopping'}).run(db.conn)
        
    def stop_domains_without_viewer():
        with app.app_context():
            r.table('domains').get_all('Started',index='status').filter({'viewer':{'client_since':False}}).update({'status':'Stopping'}).run(db.conn)
          
    def delete_old_stats(reduce_interval=300,delete_interval=86400): # 24h
        with app.app_context():
            # domains_status
            r.table('domains_status_history').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn)
            reduced=[]
            cursor = r.table('domains_status').filter(r.row['when'] < int(time.time()) - reduce_interval).order_by('when').run(db.conn)
            r.table('domains_status').filter(r.row['when'] < int(time.time()) - reduce_interval).delete().run(db.conn)
            i=0
            for c in cursor:
                if i % 50 == 0: reduced.append(c)
                i+=1
            r.table('domains_status_history').insert(reduced).run(db.conn)
            
            
            # Hypervisors_status
            r.table('hypervisors_status_history').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn)
            reduced=[]
            cursor = r.table('hypervisors_status').filter(r.row['when'] < int(time.time()) - reduce_interval).order_by('when').run(db.conn)
            r.table('hypervisors_status').filter(r.row['when'] < int(time.time()) - reduce_interval).delete().run(db.conn)
            i=0
            for c in cursor:
                if i % 50 == 0: reduced.append(c)
                i+=1
            r.table('hypervisors_status_history').insert(reduced).run(db.conn)
            
            # Hypervisors_events (does not grow at the same speed)
            r.table('hypervisors_events').filter(r.row['when'] < int(time.time()) - delete_interval).delete().run(db.conn)
      
    def turnOff(self):
        self.scheduler.shutdown()
    
    def turnOn(self):
        self.scheduler.start()
    
    def removeJobs(self):
        self.scheduler.remove_all_jobs()
    '''
    BULK ACTIONS
    '''
    def bulk_action(self,table,tbl_filter,tbl_update):
        with app.app_context():
            log.info('BULK ACTION: Table {}, Filter {}, Update {}'.format(table,filter, update))
            r.table(table).filter(filter).update(update).run(db.conn)
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
toolbar = DebugToolbarExtension(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)

####################
#### Scheduler  ####
####################


from apscheduler.schedulers.background import BackgroundScheduler

scheduler = BackgroundScheduler()
scheduler.add_jobstore("sqlalchemy", url=app.config["SQLALCHEMY_DATABASE_URI"])


###################
### blueprints ####
###################

# from project.user.views import user_blueprint
# app.register_blueprint(user_blueprint)

from project.main.views import main_blueprint
from project.song.views import song_blueprint
from project.event.views import event_blueprint
from project.job.views import job_blueprint

app.register_blueprint(main_blueprint)
示例#49
0
文件: main.py 项目: aychoi/gymcall
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)


# configuration
USERNAME = '******'
PASSWORD = '******'
db_url = 'sqlite:////tmp/test.db'

app = Flask(__name__)
app.debug = True
app.secret_key = '\xf0\xc0\xd7\xbf\x82\xb4\xbbz2N\xa31\xc7k\xb0\x1e\xc9\x1e\xe9\x9egtOn'
app.config['SQLALCHEMY_DATABASE_URI'] = db_url
db = SQLAlchemy(app)
scheduler = BackgroundScheduler(timezone=pytz.utc)
scheduler.add_jobstore('sqlalchemy', url = db_url)
scheduler.start()   


# database models
class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    phone_number = db.Column(db.String(11), unique=True)
    timezone = db.Column(db.String(32))
    verification_code = db.Column(db.Integer)
    is_deleted = db.Column(db.Boolean)

    def __init__(self, phone_number, timezone):
        self.phone_number = phone_number
        self.timezone = timezone
        self.verification_code = random.randint(100000, 999999)
示例#50
0
from django_apscheduler.jobstores import DjangoJobStore, register_events
from apscheduler.executors.pool import ThreadPoolExecutor
from scrapyd_api import ScrapydAPI

from gerapy.server.core.utils import scrapyd_url
from gerapy.server.core.models import Task, Client

logger = logging.getLogger(__name__)

db_time_format = "%Y-%m-%d %H:%M:%S"

executors = {
    'default': ThreadPoolExecutor(20)
}
scheduler = BackgroundScheduler(executors=executors)
scheduler.add_jobstore(DjangoJobStore(), "default")


@scheduler.scheduled_job("interval", seconds=60, id="scheduler_job")
def scheduler_job():
    """
    每分钟检查一次定时任务
    :return:
    """
    models = Task.objects.all()
    for model in models:
        scheduler_at = model.scheduler_at
        updated_at = model.updated_at
        scheduler_at_time_stamp = scheduler_at * 60
        updated_at_time_stamp = time.mktime(updated_at.timetuple())
        if time.time() - updated_at_time_stamp > scheduler_at_time_stamp:
示例#51
0
from functions import safe_get_config, get_config
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from . import app
import os
from log import log


def scheduler_listener(event):
    if event.code == EVENT_JOB_ERROR:
        print('The job crashed :(')
        log.warn("The schedule job crashed because of %s" % repr(event.exception))
    else:
        print('The job executed :)')
        log.debug("The schedule job %s executed and return value is '%s'" % (event.job_id, event.retval))


if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
    scheduler = BackgroundScheduler()

    # job store
    if safe_get_config("scheduler.job_store", "memory") == "mysql":
        scheduler.add_jobstore('sqlalchemy', url=get_config("scheduler.job_store_url"))

    # listener
    # do we need listen EVENT_JOB_MISSED?
    scheduler.add_listener(scheduler_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    scheduler.start()