def test_end_date(self, timezone):
     """Tests that the interval trigger won't return any datetimes past the set end time."""
     start_date = timezone.localize(datetime(2014, 5, 26))
     trigger = IntervalTrigger(minutes=5, start_date=start_date,
                               end_date=datetime(2014, 5, 26, 0, 7), timezone=timezone)
     assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=2)) == \
         start_date.replace(minute=5)
     assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=6)) is None
    def test_dst_change(self):
        """
        Making sure that IntervalTrigger works during the ambiguous "fall-back" DST period.
        Note that you should explicitly compare datetimes as strings to avoid the internal datetime
        comparison which would test for equality in the UTC timezone.

        """
        eastern = pytz.timezone('US/Eastern')
        start_date = datetime(2013, 3, 1)  # Start within EDT
        trigger = IntervalTrigger(hours=1, start_date=start_date, timezone=eastern)

        datetime_edt = eastern.localize(datetime(2013, 3, 10, 1, 5), is_dst=False)
        correct_next_date = eastern.localize(datetime(2013, 3, 10, 3), is_dst=True)
        assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)

        datetime_est = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True)
        correct_next_date = eastern.localize(datetime(2013, 11, 3, 1), is_dst=False)
        assert str(trigger.get_next_fire_time(None, datetime_est)) == str(correct_next_date)
    def manage_master_daemons(self):
        if ((len(self.master_jobs) == 0) and self.config.is_master()):
            # set scheduled jobs for master device
            slave_data_collection = self.scheduler.add_job(
                func = self.pull_and_publish_data,
                trigger = IntervalTrigger(seconds=2),
                id = 'Slave_Data_Pull',
                name = 'Slave_Data_Pull',
                replace_existing = True)
            self.master_jobs.append(slave_data_collection)

            # register cron job cleanup to atexit
            atexit.register(self.cleanup_master_jobs)
        elif((len(self.master_jobs) > 0) and not self.config.is_master()):
            self.cleanup_master_jobs
示例#4
0
def main():
    """Creates schleduler, fills it up with tasks and runs it"""
    init_logging()
    scheduler = BlockingScheduler()

    for job_info in JOBS:
        job_name, interval = job_info.split(':')
        job = import_job(job_name)
        if job:
            LOGGER.info('Adding job: %s, cadence each %s minutes', job_name,
                        interval)
            scheduler.add_job(job.run, IntervalTrigger(minutes=int(interval)))
        else:
            LOGGER.error('Couldn\'t find job data for job: %s', job_name)
    scheduler.start()
示例#5
0
 async def start_fetch_wall(self, job_data: tuple):
     user_id, wall_id, to_chat_id, timeout, fetch_count = job_data
     await self._setup_user_cache(user_id, wall_id, to_chat_id)
     self.scheduler.add_job(
         self.fetch_public_wall,
         IntervalTrigger(minutes=timeout),
         args=(
             wall_id,
             fetch_count,
         ),
         name="vk_fetch_wall",
         id=f"{user_id}:{wall_id}:{to_chat_id}",
         replace_existing=True,
         next_run_time=datetime.now(),
     )
示例#6
0
    def test_pickle(self, timezone):
        """Test that the trigger is pickleable."""

        trigger = IntervalTrigger(weeks=2,
                                  days=6,
                                  minutes=13,
                                  seconds=2,
                                  start_date=date(2016, 4, 3),
                                  timezone=timezone,
                                  jitter=12)
        data = pickle.dumps(trigger, 2)
        trigger2 = pickle.loads(data)

        for attr in IntervalTrigger.__slots__:
            assert getattr(trigger2, attr) == getattr(trigger, attr)
示例#7
0
def initScheduler():
    MyLogger.logMsg("init scheduler" + str(os.getpid()))
    if os.getpid() == 10:
        MyLogger.logMsg("up starting")
        scheduler = BackgroundScheduler()
        scheduler.start()
        scheduler.add_job(func=startNotifier,
                          trigger=IntervalTrigger(
                              hours=24,
                              start_date='2017-05-04 08:40:00',
                              timezone='Asia/Calcutta'),
                          id='notifiying_job_morning',
                          name='Notifiy at 0840AM',
                          replace_existing=True)
        scheduler.add_job(func=startNotifier,
                          trigger=IntervalTrigger(
                              hours=24,
                              start_date='2017-05-04 13:52:00',
                              timezone='Asia/Calcutta'),
                          id='notifiying_job_noon',
                          name='Notifiy at 0152PM',
                          replace_existing=True)
        # Shut down the scheduler when exiting the app
        atexit.register(lambda: scheduler.shutdown())
示例#8
0
def schedule_job(function, name, hours=0, minutes=0):
    """
    Start scheduled job if starting or restarting headphones.
    Reschedule job if Interval Settings have changed.
    Remove job if if Interval Settings changed to 0

    """

    job = SCHED.get_job(name)
    if job:
        if hours == 0 and minutes == 0:
            SCHED.remove_job(name)
            logger.info("Removed background task: %s", name)
        elif job.trigger.interval != datetime.timedelta(hours=hours,
                                                        minutes=minutes):
            SCHED.reschedule_job(name,
                                 trigger=IntervalTrigger(hours=hours,
                                                         minutes=minutes))
            logger.info("Re-scheduled background task: %s", name)
    elif hours > 0 or minutes > 0:
        SCHED.add_job(function,
                      id=name,
                      trigger=IntervalTrigger(hours=hours, minutes=minutes))
        logger.info("Scheduled background task: %s", name)
示例#9
0
def construct_trigger(trigger_args):
    trigger_type = trigger_args['type']
    trigger_args = trigger_args['args']
    try:
        if trigger_type == 'date':
            return DateTrigger(**trigger_args)
        elif trigger_type == 'interval':
            return IntervalTrigger(**trigger_args)
        elif trigger_type == 'cron':
            return CronTrigger(**trigger_args)
        else:
            raise InvalidTriggerArgs(
                'Invalid scheduler type {0} with args {1}.'.format(trigger_type, trigger_args))
    except (KeyError, ValueError, TypeError):
        raise InvalidTriggerArgs('Invalid scheduler arguments')
示例#10
0
 def __init__(self):
     self.history = self._load_persistence()
     self._update_history_count(True)
     self.instance_list = {}
     self.token_list = {}
     self.scheduler = BackgroundScheduler()
     self.scheduler.start()
     self.scheduler.add_job(
         func=self._remove_staleinstances,
         trigger=IntervalTrigger(seconds=300),
         id='stale_instance_remover',
         name='Remove stale instances if no heartbeat in 5 minutes',
         replace_existing=True)
     self.scheduler.add_job(
         func=self._update_history_count,
         trigger=IntervalTrigger(seconds=30),
         id='update history',
         name='update client and instance count every 30 seconds',
         replace_existing=True)
     self.scheduler.add_job(func=self._persist,
                            trigger=IntervalTrigger(seconds=15),
                            id='persist history',
                            name='persists the history to disk',
                            replace_existing=True)
示例#11
0
def before_first_request():

    # Load record keeping config settings
    load_record_conf()

    # First data point on hours scale. Having this point helps JS build charts in the first hour of running.
    record_metrics('hours')

    # Schedule recording of metrics for charting.
    # https://stackoverflow.com/questions/21214270/scheduling-a-function-to-run-every-hour-on-flask
    scheduler = BackgroundScheduler()
    scheduler.start()
    # Job intervals below may be adjusted to display functionality. However, code assumes the values shown in comments at the end of the lines.
    scheduler.add_job(
        func=record_metrics,
        args=["hours"],
        trigger=IntervalTrigger(hours=1), # hours=1 (May use other values for testing.)
        id='hourly_metrics',
        name='Records current metrics on an hourly scope',
        replace_existing=True)
    scheduler.add_job(
        func=record_metrics,
        args=["days"],
        trigger=IntervalTrigger(days=1), # days=1
        id='daily_metrics',
        name='Records current metrics on a daily scope',
        replace_existing=True)
    scheduler.add_job(
        func=record_metrics,
        args=["weeks"],
        trigger=IntervalTrigger(weeks=1), # weeks=1
        id='weekly_metrics',
        name='Records current metrics on a weekly scope',
        replace_existing=True)
    # Shut down the scheduler when exiting the app
    atexit.register(lambda: scheduler.shutdown())
示例#12
0
def rss_trigger(times: int, rss: RSS_class.rss):
    # 制作一个“time分钟/次”触发器
    trigger = IntervalTrigger(minutes=times, jitter=10)
    job_defaults = {'max_instances': 10}
    # 添加任务
    scheduler.add_job(
        func=check_update,  # 要添加任务的函数,不要带参数
        trigger=trigger,  # 触发器
        args=(rss, ),  # 函数的参数列表,注意:只有一个值时,不能省略末尾的逗号
        id=rss.name,
        # kwargs=None,
        misfire_grace_time=60,  # 允许的误差时间,建议不要省略
        # jobstore='default',  # 任务储存库,在下一小节中说明
        job_defaults=job_defaults,
    )
示例#13
0
    def update_cron(self, cron_job_id, project_id, cron_info):
        if not isinstance(cron_job_id, str):
            raise TypeError('cron_id must be str')

        if not isinstance(project_id, str):
            raise TypeError('project_id must be str')

        if not isinstance(cron_info, dict):
            raise TypeError('cron_info must be dict')

        trigger_type = cron_info.get('triggerType')
        interval = cron_info.get('interval')
        run_date = cron_info.get('runDate')
        test_suite_id_list = cron_info.get('testSuiteIdList')
        include_forbidden = cron_info.get('includeForbidden')
        test_env_id = cron_info.get('testEnvId')
        always_send_mail = cron_info.get('alwaysSendMail')
        alarm_mail_group_list = cron_info.get('alarmMailGroupList')
        try:
            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(job_id=cron_job_id, trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                # TODO 判断run_date类型
                self.scheduler.modify_job(job_id=cron_job_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(test_suite_id_list=test_suite_id_list,
                            project_id=project_id,
                            test_env_id=test_env_id,
                            include_forbidden=include_forbidden,
                            always_send_mail=always_send_mail,
                            alarm_mail_group_list=alarm_mail_group_list,
                            trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                            run_date=run_date)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(test_suite_id_list=test_suite_id_list,
                            project_id=project_id,
                            include_forbidden=include_forbidden,
                            test_env_id=test_env_id,
                            always_send_mail=always_send_mail,
                            alarm_mail_group_list=alarm_mail_group_list,
                            trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                            seconds=interval)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_job_id, coalesce=True, args=[cron])
        except BaseException as e:
            raise TypeError('更新定时任务失败: %s' % e)
示例#14
0
def logindeck():
    result = {}
    json_data = request.json
    # if (is_admin()):
    deck = Deck.query.filter_by(id=json_data['deckid']).first()
    if (deck == None):
        result['status'] = -1
        result['msg'] = 'Deck is not exist.'
    else:
        driver = create_driver()
        login_code = login_tweetdeck(driver, deck.name, deck.password)['code']
        Deck.query.filter_by(id=json_data['deckid']).update(
            {"login_code": login_code})
        db.session.commit()
        if (login_code < 0):
            driver.quit()
            result['msg'] = 'Unable to login to twitter.'
            result['status'] = -2
        else:
            if (login_code == 0):
                result['status'] = 0
                result['msg'] = 'Please input verification code.'
            else:
                profiles = (db.session.query(
                    Profile.id, Profile.minutes,
                    Link.deckid).join(Link)).filter_by(deckid=deck.id).all()
                for profile in profiles:
                    scheduler.add_job(
                        func=auto_rt,
                        kwargs={
                            'profile_id': profile.id,
                            'driver': driver
                        },
                        trigger=IntervalTrigger(minutes=profile.minutes),
                        # trigger=IntervalTrigger(minutes=1)
                        id='retweet_%s' % (profile.id),
                        replace_existing=False,
                        misfire_grace_time=app.config['MISFIRE_GRACE_TIME'])
                    Profile.query.filter_by(id=profile.id).update(
                        {"run_status": 1})
                db.session.commit()
                result['msg'] = "Succefully Logged Deck."
                result['status'] = 1
            adddriver(deck.name, driver)
    # else:
    #     result['status'] = -1
    #     result['msg'] = 'Please login now.'
    return jsonify({'result': result})
示例#15
0
def delcache_trigger():
    # 制作一个“time分钟/次”触发器
    trigger = IntervalTrigger(
        days=config.DELCACHE,
        #minutes=1,
        jitter=10)
    # 添加任务
    scheduler.add_job(
        func=del_img,  # 要添加任务的函数,不要带参数
        trigger=trigger,  # 触发器
        args=(1, ),  # 函数的参数列表,注意:只有一个值时,不能省略末尾的逗号
        id='DELCACHE',
        # kwargs=None,
        misfire_grace_time=60,  # 允许的误差时间,建议不要省略
        # jobstore='default',  # 任务储存库,在下一小节中说明
    )
示例#16
0
def reschedule_job(id, minutes):
    if id == 'actors' and minutes < 2:
        print("ERRO! Intervalo mínimo é 2 minutos!")
        return
    elif id == 'relations' and minutes < 10:
        print("ERRO! Intervalo mínimo é 10 minutos!")
        return
    elif minutes < 2:
        print("ERRO! Intervalo mínimo é 2 minutos!")
        return
    try:
        scheduler.reschedule_job(job_id=id,
                                 trigger=IntervalTrigger(minutes=minutes))
        print("Intervalo de " + id + " modificado para " + str(minutes))
    except Exception as e:
        print(e)
示例#17
0
 def get_trigger(self, task):
     """Get loop interval trigger"""
     if not self.marker_interface.providedBy(task):
         raise Exception(
             _("Task is not configured for loop-style scheduling!"))
     info = self.schema(task, None)
     if info is None:
         return None
     return IntervalTrigger(
         weeks=info.weeks,
         days=info.days,
         hours=info.hours,
         minutes=info.minutes,
         seconds=info.seconds,
         start_date=tztime(date_to_datetime(info.start_date)),
         end_date=tztime(date_to_datetime(info.end_date)))
示例#18
0
def start_scheduler():
    global scheduler, loop

    interval = int(APP_CONFIG.get('download.interval', 1800))
    loop = asyncio.new_event_loop()
    scheduler = AsyncIOScheduler(event_loop=loop)
    t1 = datetime.now() + timedelta(seconds=1)
    int_trigger = IntervalTrigger(seconds=interval)
    date_trigger = DateTrigger(run_date=t1)
    urls = (APP_CONFIG['download.root_path'], )
    # add for down at server start
    scheduler.add_job(download, trigger=date_trigger, args=(loop, False, urls))
    scheduler.add_job(download, trigger=int_trigger, args=(loop, False, urls))
    scheduler.start()
    asyncio.set_event_loop(loop)
    loop.run_forever()
示例#19
0
def resume_torrent(scheduler, counter: Counter):
    # scheduler.get_jobs()[0].trigger.interval.total_seconds()//60
    api = QbittrentClient()
    print(arrow.now().format(), '登陆了Qbittrent...')
    api.login()
    has_error = api.resume_torrent()
    api.logout()
    if has_error:
        api.sent_to_server_chan()
        counter.time_reset()
    else:
        counter.time_multiplicate()
    # scheduler.modify_job('resume_torrent', minutes=counter.interval_minutes)
    scheduler.reschedule_job(
        'resume_torrent',
        trigger=IntervalTrigger(minutes=counter.interval_minutes))
示例#20
0
 def jobs(
     self
 ) -> Iterable[Tuple[IntervalTrigger, Callable[[], Iterable[Dict[str,
                                                                 Any]]]]]:
     if not any([self.setting.get(s, True) for s in self.rss.keys()]):
         return tuple()
     sub_groups = self.setting.get("notify_groups", [])
     sub_users = self.setting.get("notify_privates", [])
     if not (sub_groups or sub_users):
         return tuple()
     interval = self.setting.get("news_interval_minutes", 30)
     trigger = IntervalTrigger(minutes=interval,
                               start_date=datetime.datetime.now() +
                               datetime.timedelta(seconds=60))
     job = (trigger, self.send_news_async)
     return (job, )
示例#21
0
def get_rescheduler():
    timer = BlockingScheduler()

    time_spec = {
        'seconds': cfg.CONF.scheduler.rescheduling_interval,
        'timezone': aps_utils.astimezone('UTC')
    }

    timer.add_job(recover_delayed_executions,
                  trigger=IntervalTrigger(**time_spec),
                  max_instances=1,
                  misfire_grace_time=60,
                  next_run_time=date_utils.get_datetime_utc_now(),
                  replace_existing=True)

    return timer
示例#22
0
 def _db_to_job(self, row):
     if row['trigger_type'] == 'date':
         trigger = DateTrigger(run_date=row['run_date'])
     if row['trigger_type'] == 'cron':
         keys = row['crontab'].split(',')[1]
         values = row['crontab'].split(',')[0].split(' ')
         cronMapRev = {v: k for k, v in cronMap.items()}
         crontab = {cronMapRev[k]: values[i] for i, k in enumerate(keys)}
         trigger = CronTrigger(**crontab)
     if row['trigger_type'] == 'interval':
         trigger = IntervalTrigger(seconds=row['interval'])
     job = Job.__new__(Job)
     job.__setstate__({
         'id':
         row['id'],
         'name':
         row['name'],
         'func':
         row['func'],
         'args':
         json.loads(row['args']) if row['args'] else [],
         'kwargs':
         json.loads(row['kwargs']) if row['kwargs'] else {},
         'version':
         1,
         'trigger':
         trigger,
         'executor':
         row['executor'],
         'start_date':
         row['start_date'],
         'end_date':
         row['end_date'],
         'next_run_time':
         utc_timestamp_to_datetime(row['next_run_time'].timestamp()),
         'coalesce':
         row['coalesce'],
         'misfire_grace_time':
         row['misfire_grace_time'],
         'max_instances':
         row['max_instances'],
         'jobstore':
         self,
     })
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     return job
示例#23
0
def main():
    args = parse_args()

    if args.debug:
        logger.setLevel(logging.DEBUG)

    scheme = 'https' if args.secure else 'http'
    status_url = '{}://{}:{}{}'.format(scheme, args.ip, args.port,
                                       args.status_path)
    health_url = '{}://{}:{}{}'.format(scheme, args.ip, args.port,
                                       args.health_path)
    eureka = EurekaClient(args.name,
                          args.port,
                          args.ip,
                          eureka_url=args.eureka,
                          instance_id=args.instance_id,
                          status_page_url=status_url,
                          health_check_url=health_url)

    loop = asyncio.get_event_loop()
    loop.run_until_complete(eureka.register())
    logger.info('Registered with eureka as %s', eureka.instance_id)

    scheduler = AsyncIOScheduler({'event_loop': loop})

    @scheduler.scheduled_job(IntervalTrigger(seconds=args.interval))
    async def renew_lease():
        try:
            logger.debug('Attempting to renew the lease...')
            await eureka.renew()
            logger.info('Lease renewed')
        except EurekaException as e:
            if e.status == HTTPStatus.NOT_FOUND:
                logger.info('Lease expired, re-registering.')
                await eureka.register()
                return
            logger.error('Error performing renewal: %s', e)

    scheduler.start()
    try:
        logger.info('Running')
        with contextlib.suppress(KeyboardInterrupt):
            loop.run_forever()
    finally:
        scheduler.shutdown()
        loop.run_until_complete(eureka.deregister())
        loop.close()
示例#24
0
    def __init__(
        self,
        client: AsyncClient,
        store,
        room_id: str,
        reminder_text: str,
        start_time: Optional[datetime] = None,
        timezone: Optional[str] = None,
        recurse_timedelta: Optional[timedelta] = None,
        cron_tab: Optional[str] = None,
        target_user: Optional[str] = None,
        alarm: bool = False,
    ):
        self.client = client
        self.store = store
        self.room_id = room_id
        self.timezone = timezone
        self.start_time = start_time
        self.reminder_text = reminder_text
        self.cron_tab = cron_tab
        self.recurse_timedelta = recurse_timedelta
        self.target_user = target_user
        self.alarm = alarm

        # Schedule the reminder

        # Determine how the reminder is triggered
        if cron_tab:
            # Set up a cron trigger
            trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone)
        elif recurse_timedelta:
            # Use an interval trigger (runs multiple times)
            trigger = IntervalTrigger(
                # timedelta.seconds does NOT give you the timedelta converted to seconds
                # Use a method from apscheduler instead
                seconds=int(timedelta_seconds(recurse_timedelta)),
                start_date=start_time,
                timezone=timezone,
            )
        else:
            # Use a date trigger (runs only once)
            trigger = DateTrigger(run_date=start_time, timezone=timezone)

        # Note down the job for later manipulation
        self.job = SCHEDULER.add_job(self._fire, trigger=trigger)

        self.alarm_job = None
示例#25
0
def _run_forever(reader: Reader, loader: Loader, recognizer: Recognizer,
                 updater: Updater):
    urls, images = Queue(300), Queue(300)
    for _ in range(15):
        Thread(target=_load_images, args=(loader, urls, images),
               daemon=True).start()
    Thread(target=_recognize_images,
           args=(recognizer, updater, images),
           daemon=True).start()
    scheduler = BlockingScheduler()
    scheduler.add_job(_read_urls, IntervalTrigger(seconds=15), (reader, urls))
    try:
        scheduler.start()
    except KeyboardInterrupt:
        scheduler.shutdown()
        urls.join()
        images.join()
示例#26
0
    def test_compute_run_times(self):
        expected_times = [
            self.RUNTIME + timedelta(seconds=1),
            self.RUNTIME + timedelta(seconds=2)
        ]
        self.job.trigger = IntervalTrigger(timedelta(seconds=1), self.RUNTIME)
        self.job.compute_next_run_time(expected_times[0])
        eq_(self.job.next_run_time, expected_times[0])

        run_times = self.job.get_run_times(self.RUNTIME)
        eq_(run_times, [])

        run_times = self.job.get_run_times(expected_times[0])
        eq_(run_times, [expected_times[0]])

        run_times = self.job.get_run_times(expected_times[1])
        eq_(run_times, expected_times)
    def push_metrics(self, observer):
        self._get_metrics_from_prometheus(
            observer)  # Initial run for metric collection
        # Scheduler schedules a background job that needs to be run regularly
        scheduler = BackgroundScheduler()
        scheduler.start()
        scheduler.add_job(
            func=lambda: self._get_metrics_from_prometheus(
                observer
            ),  # Run this function every 5 minutes to poll for new metric data
            trigger=IntervalTrigger(seconds=self.trigger_interval_secs),
            id='update_metric_data',
            name='Ticker to collect new data from prometheus',
            replace_existing=True)

        atexit.register(lambda: scheduler.shutdown()
                        )  # Shut down the scheduler when exiting the app
示例#28
0
def init():
    """ Wrapper initialization function

    Initially parse the data and store it in the data cache variable. Set up
    timed background daemon to refresh the data at a given interval.
    """
    scheduler.start()
    scheduler.add_job(
        func=refresh_data,
        trigger=IntervalTrigger(days=1),
        id="refresh_data_job",
        name="pull data from course academic timetable",
        replace_existing=True,
    )
    atexit.register(lambda: scheduler.shutdown())
    # delay launching the app until there is some data available
    refresh_data()
示例#29
0
def verifydeck():
    result = {}
    json_data = request.json
    deck = Deck.query.filter_by(id=json_data['deckid']).first()
    if (deck == None):
        result['status'] = -1
        result['msg'] = 'Deck does not existed.'
    else:
        driver = getdriver(deck.name)
        if (driver != None):
            login_code = verify_tweetdeck(deck.id, driver,
                                          json_data['code'])['code']
            if (login_code == 1):
                Deck.query.filter_by(id=json_data['deckid']).update(
                    {"login_code": login_code})
                profiles = (db.session.query(
                    Profile.id, Profile.minutes,
                    Link.deckid).join(Link)).filter_by(deckid=deck.id).all()
                for profile in profiles:
                    scheduler.add_job(
                        func=auto_rt,
                        kwargs={
                            'profile_id': profile.id,
                            'driver': driver
                        },
                        trigger=IntervalTrigger(minutes=profile.minutes),
                        # trigger=IntervalTrigger(minutes=1),
                        id='retweet_%s' % (profile.id),
                        replace_existing=False,
                        misfire_grace_time=app.config['MISFIRE_GRACE_TIME'])
                    Profile.query.filter_by(id=profile.id).update(
                        {"run_status": 1})
                db.session.commit()
                result['status'] = 1
                result['msg'] = 'Success Verify Deck Account'
            elif (login_code == 0):
                result['status'] = 0
                result[
                    'msg'] = 'Cannot login to tweetdeck. Please check your password.'
            else:
                result['status'] = 0
                result['msg'] = 'Failed verification.'
        else:
            result['status'] = -1
            result['msg'] = 'Driver is not available.'
    return jsonify({'result': result})
示例#30
0
def init_commands(args):
    if not args.only_web:
        upd_int = config.check_release_interval.value or config.check_release_interval.default
        upd_id = services.Scheduler.generic.add_command(meta_cmd.CheckUpdate(),
                                                        IntervalTrigger(minutes=upd_int))
        log.i("Initiating background", meta_cmd.CheckUpdate.__name__)
        services.Scheduler.generic.start_command(upd_id, push=True)

        log.i("Initiating background thumbnail", io_cmd.CacheCleaner.__name__)
        thumb_id = services.TaskService.generic.add_command(io_cmd.CacheCleaner())
        constants.task_command.thumbnail_cleaner = services.TaskService.generic.start_command(
            thumb_id, constants.dir_thumbs, size=config.auto_thumb_clean_size.value, silent=True)

        log.i("Initiating background temp", io_cmd.CacheCleaner.__name__)
        temp_id = services.TaskService.generic.add_command(io_cmd.CacheCleaner())
        constants.task_command.temp_cleaner = services.TaskService.generic.start_command(
            temp_id, constants.dir_temp, size=config.auto_temp_clean_size.value, silent=True)
def init(frequency, debug):
    logger.debug("Starting scheduler...")
    scheduler = BackgroundScheduler()
    scheduler.start()

    rabbitmq = RabbitMQ(os.getenv('RABBITMQ_SERVICE_NAME', 'rabbitmq'))

    scheduler.add_job(func=lambda: rabbitmq.log_metrics(),
                      trigger=IntervalTrigger(seconds=int(frequency)),
                      id='rabbit_metrics',
                      name='Print queue stats for all RabbitMQ queues.',
                      replace_existing=True)

    if not debug:
        logging.getLogger('apscheduler').setLevel(logging.ERROR)

    atexit.register(lambda: scheduler.shutdown())
示例#32
0
def initialize_scheduler():
    """
    Start the scheduled background tasks. Because this method can be called
    multiple times, the old tasks will be first removed.
    """

    from headphones import updater, searcher, librarysync, postprocessor, \
        torrentfinished

    with SCHED_LOCK:
        # Remove all jobs first, because this method is also invoked when the
        # settings are saved.
        count = len(SCHED.get_jobs())

        if count > 0:
            logger.debug("Current number of background tasks: %d", count)
            SCHED.shutdown()
            SCHED.remove_all_jobs()

        # Regular jobs
        if CONFIG.UPDATE_DB_INTERVAL > 0:
            SCHED.add_job(
                updater.dbUpdate,
                trigger=IntervalTrigger(hours=CONFIG.UPDATE_DB_INTERVAL))
        if CONFIG.SEARCH_INTERVAL > 0:
            SCHED.add_job(
                searcher.searchforalbum,
                trigger=IntervalTrigger(minutes=CONFIG.SEARCH_INTERVAL))
        if CONFIG.LIBRARYSCAN_INTERVAL > 0:
            SCHED.add_job(
                librarysync.libraryScan,
                trigger=IntervalTrigger(hours=CONFIG.LIBRARYSCAN_INTERVAL))
        if CONFIG.DOWNLOAD_SCAN_INTERVAL > 0:
            SCHED.add_job(
                postprocessor.checkFolder,
                trigger=IntervalTrigger(minutes=CONFIG.DOWNLOAD_SCAN_INTERVAL))

        # Update check
        if CONFIG.CHECK_GITHUB:
            SCHED.add_job(
                versioncheck.checkGithub,
                trigger=IntervalTrigger(minutes=CONFIG.CHECK_GITHUB_INTERVAL))

        # Remove Torrent + data if Post Processed and finished Seeding
        if CONFIG.TORRENT_REMOVAL_INTERVAL > 0:
            SCHED.add_job(torrentfinished.checkTorrentFinished,
                          trigger=IntervalTrigger(
                              minutes=CONFIG.TORRENT_REMOVAL_INTERVAL))

        # Start scheduler
        logger.info("(Re-)Scheduled %d background tasks",
                    len(SCHED.get_jobs()))
        SCHED.start()
示例#33
0
 def test_no_start_date(self, timezone):
     trigger = IntervalTrigger(seconds=2, timezone=timezone)
     now = datetime.now(timezone)
     assert (trigger.get_next_fire_time(None, now) - now) <= timedelta(seconds=2)