def get_scheduler(): """ :rtype scheduler.Scheduler """ from .utils import TaskWrapper schedule_list = getattr(settings, 'ROBUST_SCHEDULE', None) if not schedule_list: raise RuntimeError("can't run beat with empty schedule") scheduler = Scheduler() for interval, task in schedule_list: task_cls = import_string(task) if not isinstance(task_cls, type) or not issubclass( task_cls, TaskWrapper): raise RuntimeError('{} is not decorated with @task'.format(task)) if isinstance(interval, datetime.timedelta): # noinspection PyUnresolvedReferences scheduler.every(int(interval.total_seconds())) \ .seconds.do(schedule_task, task, task_cls.tags) else: interval(scheduler).do(schedule_task, task, task_cls.tags) return scheduler
class World: def __init__(self, cursor): self.cursor = cursor def run(self, posting_interval, request_interval, coefs): self.request_generator = Request_generator(request_interval, self.cursor) self.scheduler = Scheduler(posting_interval, coefs, self.cursor) app = QApplication(sys.argv) self.viewer = Viewer(self.cursor, posting_interval, request_interval, coefs) self.request_generator.new_request_signal.connect(self.viewer.on_request) self.request_generator.new_request_signal.connect(self.scheduler.on_request) self.scheduler.new_post_signal.connect(self.viewer.on_post) self.viewer.posting_state_signal.connect(self.scheduler.on_posting_state) self.viewer.requesting_state_signal.connect(self.request_generator.on_requesting_state) self.viewer.posting_interval_signal.connect(self.scheduler.on_interval_change) self.viewer.requesting_interval_signal.connect(self.request_generator.on_interval_change) self.viewer.change_coef_signal.connect(self.scheduler.on_coef_change) self.request_generator.start() self.scheduler.start() sys.exit(app.exec_())
def __init__(self): Scheduler.__init__(self) fc = app.test_client() # Flask client emulator # Schedule uses the local timezone, which has been set to CST. self.every().friday.at("16:10").do(fc.get, '/newdoc') self.every().sunday.at("07:27").do(fc.get, '/updateweather') self.every().wednesday.at("07:27").do(fc.get, '/updateweather')
def start_scheduler(): now = datetime.now() dt_string = now.strftime("%d/%m/%Y %H:%M:%S") flag = GlobalFlag.objects.get(name="SchedulerStarted") if flag is not None: if not flag.active: scheduler = Scheduler() if scheduler is not None: print("start_scheduler called at " + dt_string) scheduler.every(1).minutes.do(pulse_interlocks) scheduler.every().day.at("22:00").do( daily_validate_transactions) scheduler.every(15).minutes.do(print_status) #end_run = scheduler.run_continuously() flag.active = True flag.save() else: print("There was a problem creatings the Scheduler") else: print("The scheduler has already been started") else: print("No global flag named SchedulerStarted has been found")
def _run_job(self, job): try: Scheduler._run_job(self, job) except Exception: logging.exception("Exception:") job.last_run = datetime.now() job._schedule_next_run()
def run_continuously(): scheduler = Scheduler() for task in settings.CELERYBEAT_SCHEDULE.values(): apply_async = TaskWorker.resolve_callable(task['task']).apply_async total_seconds = task['schedule'].total_seconds() scheduler.every(total_seconds).seconds.do(apply_async) return scheduler.run_continuously()
def __init__(self, db_engine, max_workers=100): # Mark if background jobs already running self.working = False self.administration = {} self.status = {} for site in site_helper: self.administration[site] = True self.status[site] = False self.db_engine = db_engine # Period Schedule self.timer = None self.scheduler = Scheduler() # Query necessary accounts for checkin jobs (Flow Control) self.commander = ThreadPoolExecutor(max_workers=2 * len(site_helper) + 2) # ThreadPool for checkin jobs running self.executor = ThreadPoolExecutor(max_workers=max_workers) # Exclude the thread for handle_process_queue and handle_result_queue self.batch = max_workers / len(site_helper) self.process_queue = Queue.Queue() self.result_queue = Queue.Queue()
def _run_job(self, job): try: Scheduler._run_job(self, job) except Exception: print format_exc() job.last_run = datetime.datetime.now() job._schedule_next_run()
def __init__(self, reschedule_on_failure=True): """ If reschedule_on_failure is True, jobs will be rescheduled for their next run as if they had completed successfully. If False, they'll run on the next run_pending() tick. """ self.reschedule_on_failure = reschedule_on_failure Scheduler.__init__(self)
def __init__(self, id, containers=[], data_persistence=None, scheduler=Scheduler(name='schedule.local.DefaultApplianceScheduler'), *args, **kwargs): self.__id = id self.__containers = list(containers) self.__data_persistence = data_persistence \ if not data_persistence or isinstance(data_persistence, DataPersistence) \ else DataPersistence(**data_persistence) self.__scheduler = scheduler if isinstance(scheduler, Scheduler) else Scheduler(**scheduler)
def __init__(self): self.schedule = Scheduler() self.cease_continuous_run = self.run_continously() self.stored_menu = {} self.updateMenu() self.schedule.every().day.at('11:00').do( lambda: self.updateMenu if datetime.datetime.today().weekday() < 5 else False)
def run(self): """Execute the game loop """ pygame.init() screen = Screen(self.window_size) clock = pygame.time.Clock() world = World() scheduler = Scheduler() graphics_system = GraphicsSystem(world, screen) load_assets(graphics_system) tile_system = TileSystem(world, 5) mouse_system = MouseSystem(world) animation_system = AnimationSystem(world) light_system = LightSystem(world) fear_system = FearSystem(world) clock.tick(self.fps) playing = [True] def end_game(): playing[0] = False create_title_screen(world, scheduler, end_game) while playing[0]: for event in pygame.event.get(): if event.type == pygame.QUIT: playing[0] = False elif event.type == pygame.MOUSEBUTTONDOWN: mouse_system.on_mouse_down( event.pos, to_mouse_button(event.button) ) elif event.type == pygame.MOUSEMOTION: mouse_system.on_mouse_motion(event.pos) clock.tick(self.fps) time_elapsed = float(clock.get_time()) / 1000.0 scheduler.update(time_elapsed) animation_system.update(time_elapsed) tile_system.update_tile_positions() light_system.update() fear_system.update() graphics_system.draw_entities() pygame.display.set_caption( "The Family's Treasure Tale --- " + str(clock.get_fps())) pygame.quit()
def __init__(self, broker=None): self.schedule = Scheduler() self.triggers = {} self.pollers = [] self.introspect() self.setup_pollers() self.broker = broker if self.broker is not None: self.username = self.broker.username self.messages = self.broker.messages
def run(self): """Execute the game loop """ pygame.init() screen = Screen(self.window_size) clock = pygame.time.Clock() world = World() scheduler = Scheduler() graphics_system = GraphicsSystem(world, screen) load_assets(graphics_system) tile_system = TileSystem(world, 5) mouse_system = MouseSystem(world) animation_system = AnimationSystem(world) light_system = LightSystem(world) fear_system = FearSystem(world) clock.tick(self.fps) playing = [True] def end_game(): playing[0] = False create_title_screen(world, scheduler, end_game) while playing[0]: for event in pygame.event.get(): if event.type == pygame.QUIT: playing[0] = False elif event.type == pygame.MOUSEBUTTONDOWN: mouse_system.on_mouse_down(event.pos, to_mouse_button(event.button)) elif event.type == pygame.MOUSEMOTION: mouse_system.on_mouse_motion(event.pos) clock.tick(self.fps) time_elapsed = float(clock.get_time()) / 1000.0 scheduler.update(time_elapsed) animation_system.update(time_elapsed) tile_system.update_tile_positions() light_system.update() fear_system.update() graphics_system.draw_entities() pygame.display.set_caption("The Family's Treasure Tale --- " + str(clock.get_fps())) pygame.quit()
def init_schedule(self, scheduler: schedule.Scheduler) -> tuple: return ( scheduler.every(30).minutes.do( self.unique_task_queue.push, self._check_db, priority=TaskPriorities.LOW, ), scheduler.every(2).hours.do( self.unique_task_queue.push, Signal.backup, priority=TaskPriorities.LOW, ), )
def __init__(self): Scheduler.__init__(self) fc = app.test_client() # Flask client emulator from Bot import Chatbot cb = Chatbot() # Schedule uses the local timezone, which has been set to CST. self.every().friday.at("16:10").do(fc.get, '/newdoc') self.every().sunday.at("07:27").do(fc.get, '/updateweather') j = self.every().tuesday.at("09:00").do(startd, cb.run, cb.quit) if week.DaysTo('last Tuesday') == 0: j.run() self.every().tuesday.at("22:00").do(cb.quit) self.every().wednesday.at("07:27").do(fc.get, '/updateweather')
def __init__(self): Thread.__init__(self, name="Updater") self.logger = logging.getLogger(self.getName()) print("Thread started {}: {}".format(self.__class__, "Updater")) self.communication_queue = deque(tuple(), 512) self.scheduler = Scheduler() self.scheduler.every(12).hours.do(self.go) # self.scheduler.every(30).minutes.do(self.upload_log) self.stopper = Event() self.sshkey = SSHManager() self.identifiers = set() self.temp_identifiers = set() self.setupmqtt()
def __init__(self, db_engine, smtp_configs): self.scheduler = Scheduler() self.timer = None self.db_engine = db_engine self.pool = MailSMTPPool() for server, user, password in smtp_configs: self.pool.add_resource(server, user, password) self.administration = False self.monitor = {0: False, 1: False} self.waiting = 0 self.executor = ThreadPoolExecutor(max_workers=2)
class Server: def __init__(self): self.scheduler = Scheduler() self.semaphore = threading.Semaphore() self.con = taps_control.TapsControl() self.read_config() def read_config(self): config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json') with open(config_path, 'r') as f: data = json.load(f) for job in data.values(): self.handle_job(job) def handle_job(self, job): when = job['when'] con = self.con if when == 'daily': logging.info('Queued a daily job on channel ' + str(job['channel'])) self.scheduler.every().day.at(job['start']).do( job_func, int(job['channel']), int(job['duration']), con) elif when == 'every': self.scheduler.every(int(job['interval'])).seconds.do( job_func, int(job['channel']), int(job['duration']), con) elif when == "other": self.scheduler.every(2).days.do(job_func, int(job['channel']), int(job['duration']), con) def run(self): while True: self.scheduler.run_pending()
class LocalController(Thread): def __init__(self): Thread.__init__(self, name='Local Timer') self.__stop = Event() self.__days, self.__start_time = parse_config() self.__scheduler = Scheduler() def stop(self): if not self.__stop.is_set(): self.__stop.set() self.join() def next_run(self): return self.__scheduler.next_run def __run_cycle(self): state.run_zone_action((ZoneAction.RUN_CYCLE, 0)) def __schedule_job(self): self.__scheduler.clear() if in_production(): for day in self.__days: job = Job(1, self.__scheduler) job.start_day = day.name.lower() job.unit = 'weeks' job.at(self.__start_time.strftime("%H:%M")).do( self.__run_cycle) else: self.__scheduler.every(3).minutes.do(self.__run_cycle) logging.info('Next run scheduled for {0}.'.format( self.__scheduler.next_run)) def control_mode_changed(self): mode = state.active_controller_mode() if mode is not ControllerMode.LOCAL: self.__scheduler.clear() elif mode is ControllerMode.LOCAL: self.__schedule_job() def run(self): logging.info('Local cycle run controller started.') self.__schedule_job() while not self.__stop.is_set(): if state.active_controller_mode() is ControllerMode.LOCAL: self.__scheduler.run_pending() sleep(1) self.__scheduler.clear() logging.info('Local cycle run controller stopped.')
def schedule_updates(self) -> threading.Event: scheduler = Scheduler() scheduler.every().day.at('04:30').do(self.update_all) cease_run = threading.Event() class ScheduleThread(threading.Thread): def run(self) -> None: while not cease_run.is_set(): scheduler.run_pending() time.sleep(1) schedule_thread = ScheduleThread() schedule_thread.start() return cease_run
def __init__(self): self.scheduler = Scheduler() self.funcs_time_attrs = [] self.timer_type_map = { 's': 'seconds', 'm': 'minutes', 'h': 'hours', 'd': 'days', 'w': 'weeks', 'mon': 'monday', 'tue': 'tuesday', 'wed': 'wednesday', 'thu': 'thursday', 'fri': 'friday', 'sat': 'saturday', 'sun': 'sunday' }
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for key, value in kwargs.items(): setattr(self, key, value) self.loop.create_task(self.run_scheduler()) self._guild = kwargs['DISCORD_GUILD'] self._scheduler = Scheduler()
async def _pull(self) -> None: def _callback() -> None: loop = asyncio.get_event_loop() if loop.is_running(): asyncio.ensure_future(self._run_schedule()) self._scheduler = Scheduler() self._configure_scheduler(self._scheduler, _callback) if self._instant_run: self._scheduler.run_all() while not self.stopped: self._scheduler.run_pending() await self._sleep(0.5) while self._is_running: # Keep the loop alive until the job is finished await asyncio.sleep(0.1)
def init_schedule(self, scheduler: schedule.Scheduler) -> tuple: return ( scheduler.every(10).seconds.do( self.unique_task_queue.push, self._save_cpu_temperature, priority=TaskPriorities.LOW, ), scheduler.every(5).minutes.do( self.unique_task_queue.push, self._save_weather_data, priority=TaskPriorities.LOW, ), scheduler.every(10).minutes.do( self.unique_task_queue.push, self._save_ram_usage, priority=TaskPriorities.LOW, ), )
def create_schedule(script): def wrap_job(func): @functools.wraps(func) def wrapper(): logger.info('Running %s', func.__name__) func(script, None, None) return wrapper schedule = Scheduler() # hourly jobs schedule.every(55).to(65).minutes.do(wrap_job(run_merge_missing_mbids)) schedule.every(55).to(65).minutes.do(wrap_job(run_update_lookup_stats)) # daily jobs schedule.every(23).to(25).hours.do(wrap_job(run_update_stats)) schedule.every(23).to(25).hours.do(wrap_job(run_update_user_agent_stats)) schedule.every(23).to(25).hours.do(wrap_job(run_cleanup_perf_stats)) return schedule
def schedule_every_monday_at(process, str_time, run_at_start=True): scheduler1 = Scheduler() scheduler1.every().monday.at(str_time).do(process) if run_at_start: # Run the job now scheduler1.run_all() while True: scheduler1.run_pending() time.sleep(1)
class Cron: pattern = re.compile(r'every (\d+ )?(\w+)(?: at (\d\d:\d\d))?$') def __init__(self, app=None): self.app = None self.scheduler = Scheduler() self.stopped = True if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.extensions['cron'] = self app.cli.add_command(Command('cron', callback=self.run)) def task(self, when): def decorator(func): match = self.pattern.match(when) interval = match.group(1) if interval is not None: job = self.scheduler.every(int(interval)) else: job = self.scheduler.every() getattr(job, match.group(2)) time_str = match.group(3) if time_str is not None: job.at(time_str) job.do(func) return func return decorator def run(self): self.app.logger.info('Starting cron') self.stopped = False signal(SIGINT, self.stop) signal(SIGTERM, self.stop) while not self.stopped: self.scheduler.run_pending() sleep(self.scheduler.idle_seconds) self.app.logger.info('Terminating cron') def stop(self, signo=None, frame=None): self.stopped = True
class ChatBot(object): def __init__(self, broker=None): self.schedule = Scheduler() self.triggers = {} self.pollers = [] self.introspect() self.setup_pollers() self.broker = broker if self.broker is not None: self.username = self.broker.username self.messages = self.broker.messages def on_message(self, iteration_nbr, message): self.iteration_nbr = iteration_nbr text = message['text'].lower() for trigger in self.triggers: if trigger in text: response = self.triggers[trigger]() if response is not None: self.on_posted(self.broker.post(response)['message']) return response def on_posted(self, message): """Called with broker response to just posted message""" return def setup_pollers(self): for poller in self.pollers: self.schedule.every().minute.do(poller) def run_pending(self): self.schedule.run_pending() def introspect(self): for name, method in inspect.getmembers(self, predicate=inspect.ismethod): if name.startswith('on_'): if getattr(method, 'is_trigger', False) is True: event_name = name[3:] self.triggers[event_name] = method if getattr(method, 'every_minute', False) is True: self.pollers.append(method)
def _build_scheduler(self): # Possibilities : # # - Scheduler().every(10).minutes.do(job) # - Scheduler().every(5).to(10).days.do(job) # - Scheduler().every().hour.do(job, message='things') # - Scheduler().every().day.at("10:30").do(job) sched = Scheduler().every().day.at("00:00").do(lambda x: x) return sched
def create_schedule(script): # type: (Script) -> Scheduler def wrap_job(func): # type: (Callable[[Script, Any, Any], None]) -> Callable[[], None] @functools.wraps(func) def wrapper(): logger.info('Running %s', func.__name__) func(script, None, None) return wrapper schedule = Scheduler() # schedule.every().minute.do(wrap_job(run_backfill_meta_created)) schedule.every(3).to(9).minutes.do(wrap_job(run_update_lookup_stats)) # schedule.every(55).to(65).minutes.do(wrap_job(run_merge_missing_mbids)) schedule.every(15).to(30).minutes.do(wrap_job(run_cleanup_perf_stats)) schedule.every().day.at("00:10").do(wrap_job(run_update_stats)) schedule.every().day.at("00:10").do(wrap_job(run_update_user_agent_stats)) return schedule
def __init__(self): Thread.__init__(self, name="Updater") self.logger = logging.getLogger(self.getName()) self.communication_queue = deque(tuple(), 512) self.scheduler = Scheduler() self.scheduler.every(60).seconds.do(self.go) # self.scheduler.every(30).minutes.do(self.upload_log) self.stopper = Event() self.sshkey = SSHManager() self.identifiers = set() self.temp_identifiers = set()
def get_scheduler(): """ :rtype scheduler.Scheduler """ from .utils import TaskWrapper schedule_list = getattr(settings, 'ROBUST_SCHEDULE', None) if not schedule_list: raise RuntimeError("can't run beat with empty schedule") scheduler = Scheduler() for interval, task in schedule_list: task_cls = import_string(task) if not isinstance(task_cls, type) or not issubclass(task_cls, TaskWrapper): raise RuntimeError('{} is not decorated with @task'.format(task)) # noinspection PyUnresolvedReferences scheduler.every(int(interval.total_seconds())) \ .seconds.do(schedule_task, task, task_cls.tags) return scheduler
def thread_task(): schedule = Scheduler() schedule.every(30).minutes.do(task) while True: schedule.run_pending() time.sleep(1)
def run(self, essid, connect): self._schedule = Scheduler() self._connect = connect self._alive = True self._timeout_job = None self._ap = network.WLAN(network.AP_IF) self._ap.active(True) self._ap.config( essid=essid) # You can't set values before calling active(...). poller = select.poll() addr = self._ap.ifconfig()[0] slim_server = self._create_slim_server(poller, essid) dns = self._create_dns(poller, addr) _logger.info("captive portal web server and DNS started on %s", addr) # If no timeout is given `ipoll` blocks and the for-loop goes forever. # With a timeout the for-loop exits every time the timeout expires. # I.e. the underlying iterable reports that it has no more elements. while self._alive: # Under the covers polling is done with a non-blocking ioctl call and the timeout # (or blocking forever) is implemented with a hard loop, so there's nothing to be # gained (e.g. reduced power consumption) by using a timeout greater than 0. for (s, event) in poller.ipoll(0): # If event has bits other than POLLIN or POLLOUT then print it. if event & ~(select.POLLIN | select.POLLOUT): self._print_select_event(event) slim_server.pump(s, event) dns.pump(s, event) slim_server.pump_expire() # Expire inactive client sockets. self._schedule.run_pending() slim_server.shutdown(poller) dns.shutdown(poller) self._ap.active(False)
self.plugin_repo = Repo( cfg['module']['login'], cfg['module']['repo'], cfg['module']['branch'], self.credentials ) credentials = Credentials(username, password) model = Model(config_login, config_repo, config_branch, config_filename, credentials) cfg = json.loads(model.config.data) plugins = cfg['plugins'].keys() args = cfg['plugins'].values() scheduler = Scheduler() for name,arg in zip(plugins, args): data_filename = os.path.join(model.uuid, name) data_file = Datastore(model.data_repo, data_filename) plug_filename = name + ".py" plug_file = Datastore(model.plugin_repo, plug_filename) plug = Plugin(plug_file, data_file, name) # Scheduled for running according to some interval schedule_string = arg.get('schedule', None) if schedule_string is not None: exec "scheduler.%s.do(plug.run, arg)" % schedule_string # Always run immidiately plug.run(arg)
class Updater(Thread): def __init__(self): Thread.__init__(self, name="Updater") self.logger = logging.getLogger(self.getName()) self.communication_queue = deque(tuple(), 512) self.scheduler = Scheduler() self.scheduler.every(60).seconds.do(self.go) # self.scheduler.every(30).minutes.do(self.upload_log) self.stopper = Event() self.sshkey = SSHManager() self.identifiers = set() self.temp_identifiers = set() def upload_logs(self): """ uploads rotated logs to the server. :return: """ isonow = SysUtil.get_isonow() validation_msg = isonow+","+self.sshkey.sign_message(isonow) logs_fp = SysUtil.get_log_files() files = {l: open(l, 'rb') for l in logs_fp} a = requests.post("https://{}/raspberrypi{}/logs", data={"sig_msg": isonow, "signature": validation_msg}, files=files) # clear log files if 200 returned if a.status_code == 200: SysUtil.clear_files(logs_fp) def add_to_identifiers(self, identifier: str): """ adds an identifier to the set of identifiers. :param identifier: identifier to add :return: """ self.logger.debug("Adding {} to list of permanent identifiers.".format(identifier)) self.identifiers.add(identifier) def add_to_temp_identifiers(self, temp_identifier: str): """ adds an identifier to the set of temporary identifiers. that may disappear :param temp_identifier: identifier to add :return: """ self.logger.debug("Adding {} to list of transient identifiers.".format(temp_identifier)) self.temp_identifiers.add(temp_identifier) def go(self): try: data = self.gather_data() data["signature"] = self.sshkey.sign_message(json.dumps(data, sort_keys=True)) uri = 'https://{}/api/camera/check-in/{}'.format(remote_server, SysUtil.get_machineid()) response = requests.post(uri, json=data) # do backwards change if response is valid later. try: if response.status_code == 200: # do config modify/parse of command here. data = response.json() for key, value in data.copy().items(): if value == {}: del data[str(key)] if len(data) > 0: self.set_config_data(data) else: self.logger.error("Unable to authenticate with the server.") except Exception as e: self.logger.error("Error getting data from config/status server: {}".format(str(e))) except Exception as e: self.logger.error("Error collecting data to post to server: {}".format(str(e))) def set_config_data(self, data: dict): for identifier, update_data in data.items(): # dont rewrite empty... if not len(update_data): continue if identifier == "meta": hostname = update_data.get("hostname", None) if hostname: SysUtil.set_hostname(hostname) if update_data.get("update", False): SysUtil.update_from_git() config = SysUtil.ensure_config(identifier) sections = set(config.sections()).intersection(set(update_data.keys())) for section in sections: update_section = update_data[section] options = set(config.options(section)).intersection(set(update_section.keys())) for option in options: config.set(section, option, str(update_section[option])) SysUtil.write_config(config, identifier) def set_yaml_data(self, data): pass def process_deque(self, cameras=None): if not cameras: cameras = dict() while len(self.communication_queue): item = self.communication_queue.pop() c = cameras.get(item['identifier'], None) if not c: cameras[item['identifier']] = item continue if item.get("last_capture", 0) > c.get("last_capture", 0): cameras[item['identifier']].update(item) if item.get("last_upload", 0) > c.get("last_upload", 0): cameras[item['identifier']].update(item) return cameras def gather_data(self): free_mb, total_mb = SysUtil.get_fs_space_mb() onion_address, cookie_auth, cookie_client = SysUtil.get_tor_host() cameras = SysUtil.configs_from_identifiers(self.identifiers | self.temp_identifiers) self.logger.debug("Announcing for {}".format(str(list(self.identifiers | self.temp_identifiers)))) camera_data = dict( meta=dict( version=SysUtil.get_version(), machine=SysUtil.get_machineid(), internal_ip=SysUtil.get_internal_ip(), external_ip=SysUtil.get_external_ip(), hostname=SysUtil.get_hostname(), onion_address=onion_address, client_cookie=cookie_auth, onion_cookie_client=cookie_client, free_space_mb=free_mb, total_space_mb=total_mb ), cameras=self.process_deque(cameras=cameras), ) return camera_data def stop(self): self.stopper.set() def run(self): while True and not self.stopper.is_set(): self.scheduler.run_pending() time.sleep(1)