def test_repr(self, timezone, serializer): start_time = datetime(2020, 5, 16, 14, 17, 30, 254212, tzinfo=timezone) trigger = AndTrigger([ IntervalTrigger(seconds=4, start_time=start_time, timezone=timezone), IntervalTrigger(seconds=4, start_time=start_time + timedelta(seconds=2), timezone=timezone) ]) if serializer: trigger = serializer.deserialize(serializer.serialize(trigger)) assert repr(trigger) == ( "AndTrigger([IntervalTrigger(seconds=4, " "start_time='2020-05-16 14:17:30.254212+02:00'), IntervalTrigger(seconds=4, " "start_time='2020-05-16 14:17:32.254212+02:00')], threshold=1.0, max_iterations=10000)" )
def __init__(self, ac: AppConfig, sender: MessageSender, scheduler: BaseScheduler, logger: Logger): """ Constructor. Arguments are meant to be dependency injected. :param ac: app configuration :param sender: message sender module :param scheduler: Scheduler :param logger: logger """ self.config = ac self.sender = sender self.scheduler = scheduler self.trigger = IntervalTrigger(seconds=self.config.interval) self.post_find_trigger = IntervalTrigger( seconds=self.config.post_find_interval) casc_path = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml" self.face_casc = cv2.CascadeClassifier(casc_path) self.logger = logger
def schedule_temperature_binders(): """Updates all the channels every 5 minutes to match their temperature settings""" SCHEDULER.add_job( update_all_states, trigger=IntervalTrigger(minutes=5), id='update_channels_state_job', name='Update channel state every 5 minutes.', replace_existing=True ) """Read temperature and humidity data from all sensors""" SCHEDULER.add_job( fetch_thermometers_data, trigger=IntervalTrigger(seconds=30), id='fetch_thermometers_data_job', name='Update thermometers infos every 30 seconds.'.format(min), replace_existing=True )
def MyScheduler(): sched = BackgroundScheduler(daemon=True) interval = IntervalTrigger(minutes=20, start_date='2019-4-24 08:00:00', end_date='2099-4-24 08:00:00', timezone='Asia/Shanghai') sched.add_job(sensor, trigger=interval) sched.start() return ''
def start(self): """start to watch the data nodes""" logger.info('starting the watch-dog ...') self.scheduler.add_job( self.get_node_status, trigger=IntervalTrigger(seconds=self.hunger_time), ) thread = Thread(target=self.scheduler.start, ) thread.start()
def MyScheduler(): sched = BackgroundScheduler(daemon=True) interval = IntervalTrigger(minutes=15, start_date='2019-4-24 08:00:00', end_date='2099-4-24 08:00:00', timezone='Asia/Shanghai') sched.add_job(sensor, trigger=interval) sched.start() atexit.register(lambda: sched.shutdown())
def __init__( self, client: AsyncClient, store, room_id: str, reminder_text: str, start_time: Optional[datetime] = None, timezone: Optional[str] = None, recurse_timedelta: Optional[timedelta] = None, cron_tab: Optional[str] = None, target_user: Optional[str] = None, alarm: bool = False, ): self.client = client self.store = store self.room_id = room_id self.timezone = timezone self.start_time = start_time self.reminder_text = reminder_text self.cron_tab = cron_tab self.recurse_timedelta = recurse_timedelta self.target_user = target_user self.alarm = alarm # Schedule the reminder # Determine how the reminder is triggered if cron_tab: # Set up a cron trigger trigger = CronTrigger.from_crontab(cron_tab, timezone=timezone) elif recurse_timedelta: # Use an interval trigger (runs multiple times) # If the start_time of this reminder was in daylight savings for this timezone, # and we are no longer in daylight savings, alter the start_time by the # appropriate offset. # TODO: Ideally this would be done dynamically instead of on reminder construction tz = pytz.timezone(timezone) start_time = start_time.astimezone(tz) now = tz.localize(datetime.now()) if start_time.dst() != now.dst(): start_time += start_time.dst() trigger = IntervalTrigger( # timedelta.seconds does NOT give you the timedelta converted to seconds # Use a method from apscheduler instead seconds=int(timedelta_seconds(recurse_timedelta)), start_date=start_time, ) else: # Use a date trigger (runs only once) trigger = DateTrigger(run_date=start_time, timezone=timezone) # Note down the job for later manipulation self.job = SCHEDULER.add_job(self._fire, trigger=trigger) self.alarm_job = None
def init(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=getFeed, trigger=IntervalTrigger(minutes=30), id='get_rss_fed', name='Get Feed every 30 minutes', replace_existing=True) atexit.register(lambda: scheduler.shutdown())
def reschedule_job(self, job_id, interval=60): self._logger.info(f"reschedule_job({job_id})") trigger = IntervalTrigger(seconds=interval) try: job = self._scheduler.reschedule_job(job_id, trigger=trigger) except JobLookupError as jle: self._logger.exception(jle, exc_info=False) return None return self._marshal_job(job)
def run(): """ Top-level function for radio controller """ logger.info("Starting grpc server") config = get_config() scheduler = BackgroundScheduler() metricsd_client = get_metricsd_client() scheduler.add_job( process_metrics, args=[metricsd_client, config.SERVICE_HOSTNAME, "radio_controller"], trigger=IntervalTrigger( seconds=config.METRICS_PROCESSING_INTERVAL_SEC, ), max_instances=1, name="metrics_processing_job", ) scheduler.start() logger.info(f"grpc port is: {config.GRPC_PORT}") db_engine = create_engine( url=config.SQLALCHEMY_DB_URI, encoding=config.SQLALCHEMY_DB_ENCODING, echo=config.SQLALCHEMY_ECHO, future=config.SQLALCHEMY_FUTURE, pool_size=config.SQLALCHEMY_ENGINE_POOL_SIZE, max_overflow=config.SQLALCHEMY_ENGINE_MAX_OVERFLOW, ) session_manager = SessionManager(db_engine) server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) add_RadioControllerServicer_to_server( RadioControllerService(session_manager=session_manager), server, ) add_ActiveModeControllerServicer_to_server( ActiveModeControllerService(session_manager=session_manager), server, ) add_DPServiceServicer_to_server( DPService( session_manager=session_manager, now_func=datetime.now, fluentd_client=FluentdClient(), ), server, ) server.add_insecure_port(f"[::]:{config.GRPC_PORT}") server.start() logger.info(f"GRPC Server started on port {config.GRPC_PORT}") def handle_sigterm(*_): logger.info("Received shutdown signal") all_rpcs_done_event = server.stop(30) all_rpcs_done_event.wait(30) logger.info("Shut down gracefully") signal(SIGTERM, handle_sigterm) server.wait_for_termination()
def test_start_end_times_string(self, timezone, monkeypatch): monkeypatch.setattr('apscheduler.triggers.interval.get_localzone', Mock(return_value=timezone)) trigger = IntervalTrigger(start_date='2016-11-05 05:06:53', end_date='2017-11-05 05:11:32') assert trigger.start_date == timezone.localize( datetime(2016, 11, 5, 5, 6, 53)) assert trigger.end_date == timezone.localize( datetime(2017, 11, 5, 5, 11, 32))
def start_stream_monitor(channelsjson, influxdbcfg): """Start monitoring the streams.""" # Cached channel data channels = {} for c in channelsjson.keys(): if "mpd_host" in channelsjson[c] and "mpd_port" in channelsjson[c]: channels[c] = channelsjson[c] channels[c]["client"] = None channels[c]["cache"] = ("Not connected to MPD yet.", 500) # Cached livestream data livestream = db.load_pickle({ 'active': False, 'current_dj': None, 'last_played': [], 'CHANNEL': 'cyberia', }) livestream['STREAM_DELAY'] = 7 # Update caches regularly bg_scheduler = BackgroundScheduler() bg_scheduler.start() playlist_update_counter = 0 influx_client = influxdb.InfluxDBClient(host=influxdbcfg["host"], port=influxdbcfg["port"], username=influxdbcfg["user"], password=influxdbcfg["pass"], database=influxdbcfg["db"]) def playlist_info_update_task(): nonlocal channels, livestream, playlist_update_counter, influx_client for channel in channels: if livestream['active'] and channel == livestream['CHANNEL']: playlist_update_counter = (playlist_update_counter + 1) % 5 if playlist_update_counter == 1: update_livestream_info(channels, livestream) else: continue else: update_mpd_info(channel, channels[channel], influx_client) bg_scheduler.add_job(func=playlist_info_update_task, trigger=IntervalTrigger(seconds=1), id='playlist_update', name='Update [channel].json\'s', replace_existing=True) # Shut down cleanly atexit.register(lambda: bg_scheduler.shutdown()) atexit.register(lambda: db.save_pickle(livestream)) # Return the state which will be mutated by the bg_scheduler return channels, livestream
def __radarr_update_task(self): if settings.general.getboolean('use_radarr'): self.aps_scheduler.add_job(update_movies, IntervalTrigger(minutes=5), max_instances=1, coalesce=True, misfire_grace_time=15, id='update_movies', name='Update Movie list from Radarr')
def _get_sunset_sunrise(self): a = Astral() leeds = a['Leeds'] today = datetime.date.today() self._today_sun_data = leeds.sun(date=today, local=True) self.timezone = leeds.timezone logging.info(pprint.pformat(self._today_sun_data)) self.dawn = self._today_sun_data['dawn'] self.sunrise = self._today_sun_data['sunrise'] self.sunset = self._today_sun_data['sunset'] self.dusk = self._today_sun_data['dusk'] at_dawn = _get_cron_trigger_for_datetime(self.dawn) at_sunrise = _get_cron_trigger_for_datetime(self.sunrise) at_sunset = _get_cron_trigger_for_datetime(self.sunset) at_dusk = _get_cron_trigger_for_datetime(self.dusk) during_sunrise = IntervalTrigger(seconds=5, start_date=self.dawn, end_date=self.sunrise) during_sunset = IntervalTrigger(seconds=5, start_date=self.sunset, end_date=self.dusk) self._scheduler.add_job(func=self._at_dawn, trigger=at_dawn) self._scheduler.add_job(func=self._during_sunrise, trigger=during_sunrise) self._scheduler.add_job(func=self._at_sunrise, trigger=at_sunrise) self._scheduler.add_job(func=self._at_sunset, trigger=at_sunset) self._scheduler.add_job(func=self._during_sunset, trigger=during_sunset) self._scheduler.add_job(func=self._at_dusk, trigger=at_dusk) now = datetime.datetime.now(tz) if now <= self.dawn: day_factor = 0.0 elif self.dawn < now <= self.sunrise: day_factor = colour_helper.get_day_factor(self.dawn, now, self.sunrise, True) elif self.sunrise < now <= self.sunset: day_factor = 1.0 elif self.sunset < now <= self.dusk: day_factor = colour_helper.get_day_factor(self.sunset, now, self.dusk, False) elif now < self.dusk: day_factor = 0.0 else: day_factor = 0.25 self._set_day_factor(day_factor)
def taskname2(): # This is an example for the set up of a autonomous/prescheduled function def wrapper(): return True scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job( func= wrapper, trigger= IntervalTrigger(seconds=5*3600) ) return True
def schedule_scrapping(): """Schedule scrapping the movies website every week""" scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=scrape_movies, trigger=IntervalTrigger(days=7), id='printing_job', name='Print date and time every five seconds', replace_existing=True) atexit.register(lambda: scheduler.shutdown())
def initialize(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=train_models, trigger=IntervalTrigger(hours=24), id='Training_Job', name='Update models every 24 hours', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown())
def start_routine(func): scheduler = BackgroundScheduler() scheduler.add_job( func=func, trigger=IntervalTrigger(seconds=5), id='coletor_de_memoria', name='Limpa os dados em memoria enviados pelo servidor', replace_existing=True) scheduler.start() atexit.register(lambda: scheduler.shutdown())
def configure_scheduler(): """ Configures and starts jobs that need to run on a schedule """ if config.ENVIRONMENT != 'production': return token = binascii.hexlify(os.urandom(32)) redis_store.set('SCHEDULED_JOB_TOKEN', token) bot_log("Configuring scheduled jobs...") from hootbot.jobs.daily_tip_job import daily_tip_job daily_tip_scheduler = BackgroundScheduler() daily_tip_scheduler.start() daily_tip_scheduler.add_job(func=(lambda: daily_tip_job(app, token)), trigger=CronTrigger(day_of_week='mon-fri', hour='16', timezone='UTC'), id='daily_tips_job', name="Sends daily scheduled tips at 9am PST", replace_existing=True) atexit.register(lambda: daily_tip_scheduler.shutdown()) from hootbot.jobs.user_objective_expiry_job import user_objective_expiry_job objective_expiry_scheduler = BackgroundScheduler() objective_expiry_scheduler.start() objective_expiry_scheduler.add_job( func=(lambda: user_objective_expiry_job(app, token)), trigger=IntervalTrigger(hours=1, timezone="UTC"), id='user_objective_expiry_job', name="Periodically clears stale user_objective entries", replace_existing=True) atexit.register(lambda: objective_expiry_scheduler.shutdown()) from hootbot.jobs.admin_token_expiry_job import admin_token_expiry_job admin_token_expiry_scheduler = BackgroundScheduler() admin_token_expiry_scheduler.start() admin_token_expiry_scheduler.add_job( func=(lambda: admin_token_expiry_job(app, token)), trigger=IntervalTrigger(hours=8, timezone="UTC"), id='admin_token_expiry_job', name="Periodically clears stale admin auth tokens", replace_existing=True) atexit.register(lambda: admin_token_expiry_scheduler.shutdown())
def schedule_update_job(): if not args.no_update: if settings.general.getboolean('auto_update'): scheduler.add_job(check_and_apply_update, IntervalTrigger(hours=6), max_instances=1, coalesce=True, misfire_grace_time=15, id='update_bazarr', name='Update Bazarr from source on Github' if not args.release_update else 'Update Bazarr from release on Github', replace_existing=True) else: scheduler.add_job(check_and_apply_update, CronTrigger(year='2100'), hour=4, id='update_bazarr', name='Update Bazarr from source on Github' if not args.release_update else 'Update Bazarr from release on Github', replace_existing=True) scheduler.add_job(check_releases, IntervalTrigger(hours=6), max_instances=1, coalesce=True, misfire_grace_time=15, id='update_release', name='Update Release Info', replace_existing=True) else: scheduler.add_job(check_releases, IntervalTrigger(hours=6), max_instances=1, coalesce=True, misfire_grace_time=15, id='update_release', name='Update Release Info', replace_existing=True)
def cron_job(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=print_time, trigger=IntervalTrigger(seconds=150000), id='printing_time_job', name='Print time every 2 seconds', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown())
def _add_proc(self, procname, interval): # IDs for the current jobs are equal to the procname if procname in [j.id for j in self.thread_scheduler.get_jobs()]: # it already exists, modify it self.thread_scheduler.modify_job( job_id=procname, trigger=IntervalTrigger(minutes=interval)) # go through the list of current procname/interval k/v's and edit the combo for i, proc in enumerate(self.yaml_config): if proc == procname: self.yaml_config[i][procname] = interval else: self.thread_scheduler.add_job( lambda: self._check_for_processes(procname), id=procname, trigger=IntervalTrigger(minutes=interval)) self.yaml_config.append({procname: interval}) self._save_config() self._update_processes()
def __init__(self, fluentd_utils, method, poll_interval, args): """Base scheduler.""" self.fluentd_utils = fluentd_utils self.message_dumper = MessageDumper() self.scheduler = BackgroundScheduler() self.scheduler.add_job(method, IntervalTrigger(seconds=poll_interval), args=args) logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.INFO)
def _getTrigger(self, policy): comp = re.compile("^([0-9]*)([smhd])?$") match = comp.match(policy) policy = match.groups() if (policy[0] == '' or policy[0] == None): raise Exception( "The periodicity of your task is not well defined.") else: period = int(policy[0]) return IntervalTrigger(seconds=period)
def scheduler(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=update_csv_from_repo, trigger=IntervalTrigger(minutes=1), id='update_csv_from_repo', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown())
def test_jitter_produces_different_valid_results(self, timezone): trigger = IntervalTrigger(seconds=5, timezone=timezone, jitter=3) now = datetime.now(timezone) results = set() for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, now) results.add(next_fire_time) assert timedelta(seconds=2) <= (next_fire_time - now) <= timedelta(seconds=8) assert 1 < len(results)
def _init(self): self.scheduler.start() for item in Detection.objects.filter(is_active=True): trigger = IntervalTrigger(minutes=int(item.rate), timezone=self.timezone) self.scheduler.add_job( dispatch, trigger, id=str(item.id), args=(item.type, item.addr, item.extra), )
def initialize(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=get_coin_data, trigger=IntervalTrigger(seconds=100), id='printing_job', name='Get current USD price of all coins in list', replace_existing=True) #Shutdown the scheduler when the app exits atexit.register(lambda: scheduler.shutdown())
def configure_schedule(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=print_date_time, trigger=IntervalTrigger(seconds=10), id='printing_job', name='Print date and time every five seconds', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown())
def initialize(): scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(func=evaluate, trigger=IntervalTrigger(seconds=216000), id='eval_job', name='Evaluate battle report', replace_existing=True) # Shut down the scheduler when exiting the app atexit.register(lambda: scheduler.shutdown())