def send_reminder_mail(sender, **kwargs): s = Scheduler() start_date = kwargs['instance'].start_date.date() s.add_date_job(send_reminder_mail_job, start_date, [kwargs['instance']]) s.start()
def main(): sched = Scheduler() sched.start() c_date = datetime.today() date_ = '2013-11-30' delay = timedelta(minutes=2) all_portfolios = combine_lists(PROGRESS_PORTFOLIOS,FIS_GROUP_PORTFOLIOS) # end of week jobs #----------------- if c_date.weekday == 4: # runs at 6pm Friday evening sched_date = datetime(c_date.year, c_date.month, c_date.day, 18, 0, 0) sched.add_date_job( axys_job, sched_date, [MODEL_PORTFOLIOS,all_portfolios,date_]) # monthly jobs #------------- if c_date.day == 1: # runs at 10 am sched_date = datetime(c_date.year, c_date.month, c_date.day, 10, 0, 0) sched.add_date_job( axys_job, sched_date, [MODEL_PORTFOLIOS,all_portfolios,date_]) sched_date = sched_date + delay # keep script 'running' in order to allow the scheduler to stay open and run # the jobs added time.sleep(60) sched.shutdown()
def scheduleNotification(username, password, receivers, subject, message, attachments, timestring): logging.basicConfig() scheduler = Scheduler() scheduler.start() sentOn = datetime.datetime.strptime(timestring,"%Y-%m-%dT%H:%M") scheduler.add_date_job(emailUser,sentOn,[username,password,receivers.split(","),subject,message,attachments]) atexit.register(lambda:scheduler.shutdown(wait=False))
def pre_eva_start(self, conf): # Load all jobs self.invoke('pre_scheduler_load_jobs') sched = APScheduler() conf['scheduler']['scheduler'] = sched for job_name in conf['scheduler']['jobs']: job = conf['scheduler']['jobs'][job_name] if job.get('type') == 'date': # datetime is a datetime string in this case # ie: '2012-11-06 14:25:10.8880' sched.add_date_job(job['func'], job['datetime'], args=[conf]) elif job.get('type') == 'interval': sched.add_interval_job(job['func'], seconds=job['interval'].get('seconds', 0), minutes=job['interval'].get('minutes', 0), hours=job['interval'].get('hours', 0), days=job['interval'].get('days', 0), weeks=job['interval'].get('weeks', 0), start_date=job['interval'].get('start_date'), args=[conf]) elif job.get('type') == 'cron': sched.add_cron_job(job['func'], second=job['interval'].get('second'), minute=job['interval'].get('minute'), hour=job['interval'].get('hour'), day=job['interval'].get('day'), week=job['interval'].get('week'), month=job['interval'].get('month'), year=job['interval'].get('year'), day_of_week=job['interval'].get('day_of_week'), args=[conf]) sched.start() self.invoke('post_scheduler_load_jobs')
def add_job(self, command, hour, minute, sec=0): logger.info("2. scheduler adding job command: %s at %s:%s:%s" % ( command, hour, minute, sec )) sched = Scheduler(standalone=True) #make a db file shelve.open( os.path.join( os.path.dirname(__file__), 'example.db' ) ) sched.add_jobstore(ShelveJobStore('example.db'), 'shelve') exec_time = datetime( date.today().year, date.today().month, date.today().day, int(hour), int(minute), int(sec) ) #test #exec_time = datetime.now() + timedelta(seconds=5) sched.add_date_job( job, exec_time, name='alarm', jobstore='shelve', args=[command] ) sched.start()
class SimpleScheduler: def __init__(self): self._sched = Scheduler() self._sched.start() self._jobs = {} def schedule(self, job): if job.name in self._jobs: logger.warn("Already have job with name: %s" % job.name) return False try: self._sched.add_cron_job(job._execute_and_store, **job.schedule) except TypeError: logger.error("Invalid schedule for job with name: %s" % job.name + " schedule: %s" % job.schedule) self._jobs[job.name] = job return True def schedules(self): return {job.name: job.schedule for job in self._jobs.values()} def execute(self, name): return self._sched.add_date_job(self._jobs[name]._execute_and_store, datetime.now() + timedelta(seconds=1))
def add_items(request): seller = request.user if seller: if request.method == "POST": item_name = request.POST['name'] #find way to pass null to image_url item = { "item_name" : item_name, "seller" : seller, "status" : "Available", "date_added" : datetime.now(), "min_bid" : request.POST['min_bid'] } res = add_doc(index= 'item', type=Item, id = item_name, doc= item) from apscheduler.scheduler import Scheduler sched = Scheduler() sched.start() exec_time= datetime.now() + timedelta(seconds=5) job1 = sched.add_date_job(sell_items, exec_time, [item_name]) return HttpResponse("Added Item: {}".format(item_name), status= 200) else: return HttpResponse("Please log in") #redirect to login
def set_config_deffered(request, ip, config): dajax = Dajax() sched = Scheduler() sched.start() job = sched.add_date_job(snmp_config.set_config, datetime.now() + timedelta(seconds=5), [ip, config]) dajax.alert("Config set deferred for 5 seconds") dajax.script("stopLoading();") return dajax.json()
class Main(Daemon): """ do some things """ def __init__(self, pidfile, cfgfile): Daemon.__init__(self, pidfile) self.jobs = {} self.immediately = False self.scheduler = Scheduler(daemonic=False) self.logger = logging.getLogger(self.__class__.__name__) if os.path.exists(cfgfile): with open(cfgfile, 'rt') as f: config = yaml.load(f.read()) for k1 in config.keys(): if k1 == 'version': pass if k1 == 'immediately': self.immediately = config[k1] elif k1 == 'taobao': self.jobs[k1] = config[k1] self.jobs[k1]['id'] = None if 'chktime' in self.jobs[k1].keys(): self.jobs[k1]['btime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[0], '%H:%M') self.jobs[k1]['etime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[1], '%H:%M') if self.jobs[k1]['btime'] >= self.jobs[k1]['etime']: raise ValueError('"chktime" is illegal') else: raise ValueError('There is no "chktime" be found in configure.') else: pass else: self.logger.error('{0} not found'.format(cfgfile)) def job_main(self): st_beg = self.jobs['taobao']['btime'] st_end = self.jobs['taobao']['etime'] dt_beg = datetime.now().replace(hour=st_beg.tm_hour, minute=st_beg.tm_min) dt_end = datetime.now().replace(hour=st_end.tm_hour, minute=st_end.tm_min) td_rnd = dt_end - dt_beg dt_rnd = dt_beg + timedelta(seconds=randint(1, td_rnd.days * 86400 + td_rnd.seconds - 1)) if dt_rnd <= datetime.now(): dt_rnd += timedelta(days=1) self.jobs['taobao']['id'] = self.scheduler.add_date_job(lambda: self.job_taobao(), dt_rnd) def job_taobao(self): for v in self.jobs['taobao']['account']: taobao = Taobao(v['username'], v['password']) if taobao.login(): taobao.checkin() def run(self): if self.immediately: self.job_taobao() self.immediately = False self.scheduler.add_cron_job(lambda: self.job_main(), hour='0', minute='1') self.scheduler.start() stopevent.wait() self.scheduler.shutdown()
def schedule_false(request): # Start the scheduler sched = Scheduler() sched.start() # Define the time to be in 1 minute now = datetime.now() mini = timedelta(minutes=5) jobi_date = now + mini #schedule the turn_true job with request as a paremeter job = sched.add_date_job(auto_false, jobi_date, request)
class TimeScheduler: instance = None def __init__(self): ''' ''' @staticmethod def getInstance(): if TimeScheduler.instance is None: TimeScheduler.instance = TimeScheduler() return TimeScheduler.instance def init(self,threadpool = None): if threadpool is None : self.sched = Scheduler({'apscheduler.threadpool.core_threads':1, 'apscheduler.threadpool.max_threads':1, 'apscheduler.threadpool.keepalive':1}) else: self.sched = Scheduler({'apscheduler.threadpool':threadpool}) self.sched.daemonic = False def registerCronExp(self,handler,year=None, month=None, day=None, hour=None, minute=None, second=None, start_date=None): return self.sched.add_cron_job(handler.execute,year, month, day, None,None, hour, minute, second,None) def registerCron(self, handler ,year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None): return self.sched.add_cron_job(handler.execute,year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None) ''' register interval task ''' def registerInterval(self, handler,weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None): return self.sched.add_interval_job(handler.execute,weeks,days,hours, minutes, seconds,start_date) def registerDate(self, handler,date): return self.sched.add_date_job(handler.execute,date) def unregister(self,job): self.sched.unschedule_job(job) def start(self): self.sched.start()
) except: System.out.println(" error in connecting to mysql") # Start the scheduler sched = Scheduler() sched.start() current_year = datetime.datetime.today().year current_month = datetime.now().month for x in range(current_month,13): exec_date = date(y, x, 1) # Store the job in a variable in case we want to cancel it job = sched.add_date_job(mailUsers, exec_date) # The job will be executed given date x,year y and on 1st of he month job = sched.add_date_job(mailUsers, datetime(y, x,1, 0, 0 , 0)) for y in range(current_year+1,year+10): for x in range(1,13): exec_date = date(y, x, 1) # Store the job in a variable in case we want to cancel it job = sched.add_date_job(mailUsers, exec_date) # The job will be executed given date x,year y and on 1st of he month job = sched.add_date_job(mailUsers, datetime(y, x,1, 0, 0 , 0))
#UPDATE schedule_job(sched, update_frasi, 600, '2014-09-19 22:00:00',args=['']) schedule_job(sched, update_sensors, 300, '2014-09-19 22:00:00',args=['']) schedule_job(sched, update_weather, 3000, '2014-09-19 22:00:00',args=['']) schedule_job(sched, update_forecast, 86400, '2014-09-20 8:00:00',args=['']) #PERIODIC schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10:00',args=['realtime']) schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['history']) schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['weather']) schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['biometeo']) schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['engagement']) schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['proverbial']) #FIXED job_I1=sched.add_date_job(send_message,'2014-09-20 8:01:00',args=['identity','I1']) job_I2=sched.add_date_job(send_message,'2014-09-20 9:01:00',args=['identity','I2']) job_I2=sched.add_date_job(send_message,'2014-09-20 11:00:00',args=['identity','I3']) job_aqualta=sched.add_date_job(send_message,'2014-09-20 9:05:00',args=['acqualta','AQ1']) while True: pass ############################################################## #
class EventScheduler(): logging.basicConfig() """Class to scheduler regular events in a similar manner to cron.""" __mysql_url = 'mysql+pymysql://powermonitor:%s@localhost/powermonitor' \ % str(base64.b64decode(bytes('cDB3M3JtMG4xdDBy'))) '''This determines the number of seconds after the designated run time that the job is still allowed to be run. If jobs are not being run, try increasing this in increments of 1.''' __GRACE_PERIOD = 31536000 # Amazing grace! Time in seconds before the job is considered misfired. Currently a year __COALESCE = True # Force the job to only run once instead of retrying multiple times '''If there is a problem with thread concurrency, play around with these values. You'd think with all these threads in the pool that the filter would get clogged up!''' __threadpool_corethreads = 0 # Maximum number of persistent threads in the pool __threadpool_maxthreads = 20 # Maximum number of total threads in the pool __threadpool_keepalive = 1 # Seconds to keep non-core worker threads in the pool def __init__(self, start=True): try: config = { 'apscheduler.daemon': True, 'apscheduler.standalone': False, 'apscheduler.threadpool.core_threads': self.__threadpool_corethreads, 'apscheduler.threadpool.max_threads': self.__threadpool_maxthreads, 'apscheduler.threadpool.keepalive': self.__threadpool_keepalive, 'apscheduler.coalesce': self.__COALESCE } self.__sched = Scheduler(config) '''Add the SQLAlchemy job store as the default. This was surprisingly far less tedious than getting the shelve job store working.''' self.__sched.add_jobstore( SQLAlchemyJobStore(url=self.__mysql_url, tablename='SCHEDULE'), 'default') atexit.register(lambda: self.__sched.shutdown(wait=False) ) # Stop the scheduler when the program exits if start: self.__sched.start() except KeyError: logging.warning('An error occurred starting the scheduler.') def start_scheduler(self): self.__sched.start() def add_cron_event(self, func, name, year=None, month=None, week=None, day=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, *args, **kwargs): """Add a cron like event to the schedule. Each job must be given a name in case it needs to be removed. The following expressions can be used in each field: Expression Field Description * any Fire on every value */a any Fire on every 'a' values, starting from the minimum a-b any Fire on any value in the 'a-b' range (a must be smaller than b a-b/c any Fire every 'c' values within the 'a-b' range xth y day Fire on the x -th occurrence of weekday y within the month last x day Fire on the last occurrence of weekday 'x' within the month last day Fire on the last day within the month x,y,z any Fire on any matching expression; can combine any number of any of the above expressions If you want to add **options to the event, use kwargs (keyword arguments dictionary)""" if self.__sched is not None: event_exists = False if self.__find_event(name) is not None: event_exists = True if not event_exists: self.__sched.add_cron_job( func=func, name=name, year=year, month=month, day=day, week=week, day_of_week=day_of_week, hour=hour, minute=minute, second=second, start_date=start_date, args=args, kwargs=kwargs, misfire_grace_time=self.__GRACE_PERIOD) logging.info('New cron event added') else: '''Every event needs a unique name so we can keep track of the little bastards. And please use descriptive names so that they can be properly identified in the job schedule.''' logging.warning('add_cron_event: Event already exists') raise EventExistsError('A job with name %s already exists' % name) else: raise SchedulerNotFoundError( 'add_cron_event: Scheduler does not exist. It may have not started.' ) def __find_event(self, event_name): if self.__sched is not None: events = self.__sched.get_jobs() for event in events: if event.name == event_name: return event else: return None else: logging.warning( '__find_event: Scheduler does not exist. It may have not started.' ) raise SchedulerNotFoundError( 'Scheduler does not exist. It may have not started.') def add_onceoff_event(self, func, name, date, args=None): """Add a once off event to the schedule. The job is executed once at the specified date and time. Date/time format: YYYY-MM-DD HH:MM:SS""" if self.__sched is not None: try: if args is None: # If there are no arguments to be passed to the function self.__sched.add_date_job( func=func, name=name, date=date, misfire_grace_time=self.__GRACE_PERIOD) else: # If there are arguments to be passed to the function self.__sched.add_date_job( func=func, name=name, date=date, arge=args, misfire_grace_time=self.__GRACE_PERIOD) except ValueError: '''If the event is in the past, it will not run. This program is not capable of manipulating space and time. Try import __time_travel__''' raise EventWontRunError( 'The event will not run: Event time has expired.') logging.info('New once off event added') else: logging.warning( 'add_onceoff_event: Scheduler does not exist. It may have not started.' ) raise SchedulerNotFoundError( 'Scheduler does not exist. It may have not started.') def remove_event(self, event_name): """Remove the event 'event_name' from the schedule.""" if self.__sched is not None: removed = False event = self.__find_event(event_name=event_name) if event is not None: # If the event exists, remove it self.__sched.unschedule_job(event) removed = True if not removed: '''Raise an error so that it can be handled correctly''' logging.warning('remove_event: Event not found for removal.') raise EventNotFoundError('Event not found for removal: %s' % event_name) else: raise SchedulerNotFoundError( 'remove_event: Scheduler does not exist. It may have not started.' ) def get_jobs(self): """Get the list of events currently in the job store.""" if self.__sched is not None: return self.__sched.get_jobs() else: raise SchedulerNotFoundError( 'get_events: Scheduler does not exist. It may have not started.' ) def get_job_names(self): """ Get the names of all the jobs in the job store :return: list """ jobs = self.get_jobs() job_list = [] if jobs: for job in jobs: job_list.append(job.name) return job_list def get_scheduler(self): """Returns the Scheduler object. Rather add functionality to this class than call this method.""" if self.__sched is not None: return self.__sched else: raise SchedulerNotFoundError( 'get_scheduler: Scheduler does not exist. It may have not started.' )
class EventScheduler(): """Class to scheduler regular events in a similar manner to cron.""" __mysql_url = 'mysql+pymysql://powermonitor:%s@localhost/powermonitor' \ % str(base64.b64decode(bytes('cDB3M3JtMG4xdDBy'))) '''This determines the number of seconds after the designated run time that the job is still allowed to be run. If jobs are not being run, try increasing this in increments of 1.''' __GRACE_PERIOD = 31536000 # Amazing grace! Time in seconds before the job is considered misfired. Currently a year __COALESCE = True # Force the job to only run once instead of retrying multiple times '''If there is a problem with thread concurrency, play around with these values. You'd think with all these threads in the pool that the filter would get clogged up!''' __threadpool_corethreads = 0 # Maximum number of persistent threads in the pool __threadpool_maxthreads = 20 # Maximum number of total threads in the pool __threadpool_keepalive = 1 # Seconds to keep non-core worker threads in the pool def __init__(self, start=True): try: config = {'apscheduler.daemon': True, 'apscheduler.standalone': False, 'apscheduler.threadpool.core_threads': self.__threadpool_corethreads, 'apscheduler.threadpool.max_threads': self.__threadpool_maxthreads, 'apscheduler.threadpool.keepalive': self.__threadpool_keepalive, 'apscheduler.coalesce': self.__COALESCE} self.__sched = Scheduler(config) '''Add the SQLAlchemy job store as the default. This was surprisingly far less tedious than getting the shelve job store working.''' self.__sched.add_jobstore(SQLAlchemyJobStore(url=self.__mysql_url, tablename='SCHEDULE'), 'default') atexit.register(lambda: self.__sched.shutdown(wait=False)) # Stop the scheduler when the program exits if start: self.__sched.start() except KeyError: logging.warning('An error occurred starting the scheduler.') def start_scheduler(self): self.__sched.start() def add_cron_event(self, func, name, year=None, month=None, week=None, day=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, *args, **kwargs): """Add a cron like event to the schedule. Each job must be given a name in case it needs to be removed. The following expressions can be used in each field: Expression Field Description * any Fire on every value */a any Fire on every 'a' values, starting from the minimum a-b any Fire on any value in the 'a-b' range (a must be smaller than b a-b/c any Fire every 'c' values within the 'a-b' range xth y day Fire on the x -th occurrence of weekday y within the month last x day Fire on the last occurrence of weekday 'x' within the month last day Fire on the last day within the month x,y,z any Fire on any matching expression; can combine any number of any of the above expressions If you want to add **options to the event, use kwargs (keyword arguments dictionary)""" if self.__sched is not None: event_exists = False if self.__find_event(name) is not None: event_exists = True if not event_exists: self.__sched.add_cron_job(func=func, name=name, year=year, month=month, day=day, week=week, day_of_week=day_of_week, hour=hour, minute=minute, second=second, start_date=start_date, args=args, kwargs=kwargs, misfire_grace_time=self.__GRACE_PERIOD) logging.info('New cron event added') else: '''Every event needs a unique name so we can keep track of the little bastards. And please use descriptive names so that they can be properly identified in the job schedule.''' logging.warning('add_cron_event: Event already exists') raise EventExistsError('A job with name %s already exists' % name) else: raise SchedulerNotFoundError('add_cron_event: Scheduler does not exist. It may have not started.') def __find_event(self, event_name): if self.__sched is not None: events = self.__sched.get_jobs() for event in events: if event.name == event_name: return event else: return None else: logging.warning('__find_event: Scheduler does not exist. It may have not started.') raise SchedulerNotFoundError('Scheduler does not exist. It may have not started.') def add_onceoff_event(self, func, name, date, args=None): """Add a once off event to the schedule. The job is executed once at the specified date and time. Date/time format: YYYY-MM-DD HH:MM:SS""" if self.__sched is not None: try: if args is None: # If there are no arguments to be passed to the function self.__sched.add_date_job(func=func, name=name, date=date, misfire_grace_time=self.__GRACE_PERIOD) else: # If there are arguments to be passed to the function self.__sched.add_date_job(func=func, name=name, date=date, arge=args, misfire_grace_time=self.__GRACE_PERIOD) except ValueError: '''If the event is in the past, it will not run. This program is not capable of manipulating space and time. Try import __time_travel__''' raise EventWontRunError('The event will not run: Event time has expired.') logging.info('New once off event added') else: logging.warning('add_onceoff_event: Scheduler does not exist. It may have not started.') raise SchedulerNotFoundError('Scheduler does not exist. It may have not started.') def remove_event(self, event_name): """Remove the event 'event_name' from the schedule.""" if self.__sched is not None: removed = False event = self.__find_event(event_name=event_name) if event is not None: # If the event exists, remove it self.__sched.unschedule_job(event) removed = True if not removed: '''Raise an error so that it can be handled correctly''' logging.warning('remove_event: Event not found for removal.') raise EventNotFoundError('Event not found for removal: %s' % event_name) else: raise SchedulerNotFoundError('remove_event: Scheduler does not exist. It may have not started.') def get_jobs(self): """Get the list of events currently in the job store.""" if self.__sched is not None: return self.__sched.get_jobs() else: raise SchedulerNotFoundError('get_events: Scheduler does not exist. It may have not started.') def get_job_names(self): """ Get the names of all the jobs in the job store :return: list """ jobs = self.get_jobs() job_list = [] if jobs: for job in jobs: job_list.append(job.name) return job_list def get_scheduler(self): """Returns the Scheduler object. Rather add functionality to this class than call this method.""" if self.__sched is not None: return self.__sched else: raise SchedulerNotFoundError('get_scheduler: Scheduler does not exist. It may have not started.')
This example demonstrates the use of persistent job stores. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. """ from datetime import datetime, timedelta from apscheduler.scheduler import Scheduler from apscheduler.jobstores.shelve_store import ShelveJobStore def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = Scheduler(standalone=True) scheduler.add_jobstore(ShelveJobStore('example.db'), 'shelve') alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_date_job(alarm, alarm_time, name='alarm', jobstore='shelve', args=[datetime.now()]) print('To clear the alarms, delete the example.db file.') print('Press Ctrl+C to exit') try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass
class OutcallAction: def __init__(self, host_id, start_time, duration, program): self.__host_id = host_id self.start_time = start_time self.duration = duration self.program = program self.__scheduler = Scheduler() self.__available_calls = dict() self.__in_talkshow_setup = False self.__host = None self.__community_call_UUIDs = dict() self.__call_handler = self.program.radio_station.call_handler self.__phone_status = PhoneStatus.QUEUING self.__interested_participants = Set([]) def start(self): self.__in_talkshow_setup = True self.__host = self.__get_host(self.__host_id) # self.program.set_running_action(self) self.__scheduler.start() self.__call_handler.register_for_incoming_calls(self) self.__call_handler.register_for_incoming_dtmf(self, str(self.__host.phone.raw_number)) self.__call_handler.register_for_host_call(self, str(self.__host.phone.raw_number)) self.request_host_call() def stop(self, graceful=True, call_info=None): self.hangup_call() # Stop scheduler self.__scheduler.shutdown() # deregister from any triggers self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf(str(self.__host.phone.raw_number)) self.program.notify_program_action_stopped(graceful, call_info) def __get_host(self, host_id): host = self.program.db.query(Person).filter(Person.id == host_id).first() return host def request_host_call(self): self.__in_talkshow_setup = True result = self.__call_handler.call(self, self.__host.phone.raw_number, None, None, 15) # call ends in 15 mins max self.program.log_program_activity("result of host call is " + str(result)) def request_station_call(self): # call the number specified thru plivo result = self.__call_handler.call(self, self.program.radio_station.station.primary_transmitter_phone.number, 'play', self.__host.phone.raw_number, self.duration) self.program.log_program_activity("result of station call is " + str(result)) def notify_call_answered(self, answer_info): if self.__host.phone.raw_number not in self.__available_calls: self.__available_calls[answer_info['Caller-Destination-Number'][-10:]] = answer_info self.__inquire_host_readiness() self.program.log_program_activity("host call has been answered") else: # This notification is from answering the host call self.__available_calls[answer_info['Caller-Destination-Number'][-10:]] = answer_info # result1 = self.__schedule_warning() # result2 = self.__schedule_hangup() self.__call_handler.register_for_call_hangup(self, answer_info['Caller-Destination-Number'][-10:]) def warn_number(self): seconds = self.duration - self.__warning_time if self.__host.phone.raw_number in self.__available_calls and 'Channel-Call-UUID' in self.__available_calls[ self.__host.phone.raw_number]: result = self.__call_handler.speak( 'Your call will end in ' + str(seconds) + 'seconds', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("result of warning is " + result) def __pause_call(self): # hangup and schedule to call later self.__schedule_host_callback() self.hangup_call() def notify_call_hangup(self, event_json): if 'Caller-Destination-Number' in event_json: if event_json[ 'Caller-Destination-Number'] in self.__community_call_UUIDs: # a community caller is hanging up del self.__community_call_UUIDs[event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup(event_json['Caller-Destination-Number']) else: # It is a hangup by the station or the host self.program.log_program_activity( "Program terminated because {0} hangup".format(event_json['Caller-Destination-Number'])) self.stop(True) def __inquire_host_readiness(self): self.__call_handler.speak( 'You are scheduled to host a talk show at this time. If you are ready, press one, if not ready, press two', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Asking if host is ready") def hangup_call(self): # hangup the ongoing call for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.hangup(self.__available_calls[available_call]['Channel-Call-UUID']) self.__available_calls = dict() # empty available calls. they all are hung up def notify_incoming_dtmf(self, dtmf_info): dtmf_json = dtmf_info dtmf_digit = dtmf_json["DTMF-Digit"] if dtmf_digit == "1" and self.__in_talkshow_setup: self.program.log_program_activity("Host is ready, we are calling the station") self.request_station_call() self.__in_talkshow_setup = False elif dtmf_digit == "2" and self.__in_talkshow_setup: # stop the music, put this live on air self.program.log_program_activity("Host is not ready. We will hangup Arghhh!") self.hangup_call() self.__in_talkshow_setup = False elif dtmf_digit == "3": # put the station =in auto_answer if self.__phone_status != PhoneStatus.ANSWERING: self.__phone_status = PhoneStatus.ANSWERING self.__call_handler.speak('All incoming calls will be automatically answered', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.speak('All incoming calls will be rejected',self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "4": # disable auto answer, reject and record all incoming calls if self.__phone_status != PhoneStatus.QUEUING: self.__phone_status = PhoneStatus.QUEUING self.__call_handler.speak( 'All incoming calls will be queued for call back', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.speak( 'All incoming calls will be rejected', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "5": # dequeue and call from queue of calls that were queued for caller in self.__interested_participants: result = self.__call_handler.call(self, caller, None, None, self.duration) self.program.log_program_activity("result of participant call is {0}".format(str(result))) self.__community_call_UUIDs[caller] = result[1] self.__call_handler.register_for_call_hangup(self, caller) self.__interested_participants.discard(caller) return elif dtmf_digit == "6": # terminate the current caller for community_call_UUID in self.__community_call_UUIDs: self.__call_handler.hangup(self.__community_call_UUIDs[community_call_UUID]) pass elif dtmf_digit == "7": # Take a 5 min music break self.__call_handler.speak('You will be called back in 5 minutes',self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Host is taking a break") self.__pause_call() def notify_host_call(self, call_info): # hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']) # reset program # self.stop() # restart program self.start() def notify_incoming_call(self, call_info): if self.__phone_status == PhoneStatus.ANSWERING: # answer the phone call, join it to the conference if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call(call_info['Channel-Call-UUID'], "{0}_{1}".format(self.program.id, self.program.radio_station.id)) self.__call_handler.register_for_call_hangup(self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[call_info['Caller-Destination-Number']] = call_info['Channel-Call-UUID'] self.program.log_program_activity( "Call from community caller {0} was auto-answered".format(call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.QUEUING: # Hangup the phone, call back later self.__interested_participants.add(call_info['Caller-ANI']) self.__call_handler.speak( 'You have a new caller on the line', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.__call_handler.hangup(call_info['Channel-Call-UUID']) self.program.log_program_activity( "Call from community caller {0} was queued".format(call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.REJECTING: # Hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']); self.program.log_program_activity( "Call from community caller {0} was rejected".format(call_info['Caller-Destination-Number'])) def __schedule_host_callback(self): time_delta = timedelta(seconds=30) # one minutes now = datetime.now() callback_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'request_host_call'), callback_time) def __schedule_warning(self): time_delta = timedelta(seconds=self.__warning_time) now = datetime.utcnow() warning_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'warn_number'), warning_time) def __schedule_hangup(self): time_delta = timedelta(seconds=self.duration) now = datetime.utcnow() hangup_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'hangup_call'), hangup_time) def __deregister_listeners(self): for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf(str(self.__host.phone.raw_number))
class SchedulerDaemon(Daemon): def __init__(self, pid, config): super( SchedulerDaemon, self ).__init__(pid) self.config = config # set DaemonArgs for CommandDispatcher daemonArgs = DaemonArgs(config) # setup logger self.logger = None if os.path.exists(daemonArgs.log_file): logging.config.fileConfig(daemonArgs.log_file) self.logger = logging.getLogger('framework') # sftp settings self.sftpHost = self.config.get("sftp", "host") self.sftpPort = int(self.config.get("sftp", "port")) self.sftpRemotePath = self.config.get("sftp", "remote_path") self.sftpUsername = self.config.get("sftp", "username") self.sftpPassword = self.config.get("sftp", "password") or None self.sftpPrivateKey = self.config.get("sftp", "pkey") or None self.sftpPrivateKeyPassword = self.config.get("sftp", "pkey_password") or None self.sftpPrivateKeyType = self.config.get("sftp", "pkey_type") or None if self.sftpPrivateKeyType.lower() != 'rsa' \ and self.sftpPrivateKeyType.lower() != 'dss': self.sftpPrivateKeyType = None self.jobSubmitInterval = int(self.config.get("scheduler", "jobsubmit_interval")) or 10 self.jobCleanupInterval = int(self.config.get("scheduler", "jobcleanup_interval")) or 30 self.scheduler = Scheduler(daemonic=True) self.cronScheduleSequence = ('minute', 'hour', 'day', 'month', 'day_of_week') @transaction.commit_on_success def saveJob(self, status, frameworkJobId, scheduledJob): now = datetime.now() newJob = None #create new job if frameworkJobId is not None: newJob, created = Job.objects.get_or_create( frameworkid=frameworkJobId ) newJob.name = scheduledJob.job_name newJob.started = now newJob.workflow = scheduledJob.workflow newJob.is_public = scheduledJob.is_public newJob.owner = scheduledJob.created_by newJob.schedule = scheduledJob newJob.status = status else: newJob = Job( name=scheduledJob.job_name, started = now, workflow = scheduledJob.workflow, is_public = scheduledJob.is_public, owner = scheduledJob.created_by, schedule = scheduledJob, status = status ) newJob.save() @transaction.commit_on_success def submitJobToFramework(self, **kwargs): jobCommand = 'job' daemonArgs = DaemonArgs(self.config) daemonArgs.command = jobCommand unScheduledJob = kwargs['unScheduledJob'] is_fileFeeder = False fileFeederUploadedFile = None del daemonArgs.param[:] # go through all parameters for parameter in unScheduledJob.parameters.all(): # add parameter to daemonArgs.param if parameter.service and parameter.param_key and parameter.param_value: # check if a file feeder is used if parameter.service == settings.FILE_FEEDER_ID: is_fileFeeder = True fileFeederUploadedFile = parameter.param_value remoteFeederFile = os.path.join(self.sftpRemotePath, parameter.param_value) parameterString = '%s.%s=%s' % ( parameter.service, parameter.param_key, remoteFeederFile ) else: parameterString = '%s.%s=%s' % ( parameter.service, parameter.param_key, parameter.param_value ) self.logger.debug("add parameter string: %s" % parameterString) daemonArgs.param.append([parameterString]) # in case of a filefeeder upload file to framework server if is_fileFeeder: self.logger.debug("is file feeder") sftp = None transport = None try: transport = Transport((self.sftpHost, self.sftpPort)) if self.sftpPassword: transport.connect(username=self.sftpUsername, password=self.sftpPassword) else: privateKey = None if self.sftpPrivateKeyType and self.sftpPrivateKeyType.lower() == 'rsa': privateKey = RSAKey.from_private_key_file(self.sftpPrivateKey, password=self.sftpPrivateKeyPassword ) if self.sftpPrivateKeyType and self.sftpPrivateKeyType.lower() == 'dss': privateKey = DSSKey.from_private_key_file(self.sftpPrivateKey, password=self.sftpPrivateKeyPassword ) transport.connect(username=self.sftpUsername, pkey=privateKey) sftp = SFTPClient.from_transport(transport) filePath = os.path.join( settings.MEDIA_ROOT, fileFeederUploadedFile ) remotePath = os.path.join( self.sftpRemotePath, fileFeederUploadedFile ) self.logger.debug("uploading file from %s to %s on remote machine" % (filePath, remotePath)) sftp.put(filePath, remotePath) # sftp.put(filePath, remotePath, confirm=False) sftp.chmod( remotePath, 0644 ) self.logger.debug("put OK") except IOError as e: self.logger.error("IOError: %s. Will continue with next scheduled job." % e) self.saveJob(Job.FAILED_STATUS, None, unScheduledJob) except PasswordRequiredException as e: self.logger.error("PasswordRequiredException: %s. Will continue with next scheduled job." % e) self.saveJob(Job.FAILED_STATUS, None, unScheduledJob) except SSHException as e: self.logger.error("SSH Exception: %s. Will continue with next scheduled job." % e) self.saveJob(Job.FAILED_STATUS, None, unScheduledJob) except Exception as e: self.logger.error("Unkown SFTP problem. Will continue with next scheduled job. %s" % e) self.saveJob(Job.FAILED_STATUS, None, unScheduledJob) finally: if sftp is not None: sftp.close() if transport is not None: transport.close() # set job workflow daemonArgs.jd_workflow = unScheduledJob.workflow.name frameworkJobId = None try: setattr(daemonArgs, jobCommand, 'submit') frameworkJobId = self.sendFrameworkCommand(jobCommand, daemonArgs) self.saveJob(Job.PROCESSING_STATUS, frameworkJobId, unScheduledJob) except WorkflowNotDeployedException: # The workflow is not deployed in the framework. To prevent the scheduler retrying continuously # we disable this job unScheduledJob.status = Schedule.DEACTIVATE_STATUS unScheduledJob.save() except: self.saveJob(Job.FAILED_STATUS, None, unScheduledJob) finally: daemonArgs.clean(jobCommand) if unScheduledJob.scheduled_start is not None: unScheduledJob.status = Schedule.DEACTIVATED_STATUS unScheduledJob.save() def updateProcessingJobs(self): jobCommand = 'job' processingJobs = Job.objects.filter(status=Job.PROCESSING_STATUS) daemonArgs = DaemonArgs(self.config) if len(list(processingJobs)) != 0: jobs_dict = {} try: setattr(daemonArgs, jobCommand, 'list') jobs_dict = self.sendFrameworkCommand(jobCommand, daemonArgs) except: return finally: daemonArgs.clean(jobCommand) for processingJob in processingJobs: if processingJob.frameworkid in jobs_dict \ and int(processingJob.status) != int(jobs_dict[processingJob.frameworkid]): try: setattr(daemonArgs, jobCommand, 'details') setattr(daemonArgs, 'gjd_id', processingJob.frameworkid) job_details = self.sendFrameworkCommand(jobCommand, daemonArgs) except: continue finally: daemonArgs.clean(jobCommand) daemonArgs.clean('gjd_id') processingJob.status = jobs_dict[processingJob.frameworkid] processingJob.finished = job_details['job_end_time'] processingJob.save() elif processingJob.frameworkid not in jobs_dict: processingJob.status = Job.COMPLETED_STATUS processingJob.finished = None processingJob.save() def checkJobs(self): scheduledJobs = self.scheduler.get_jobs() # remove scheduled jobs which are set to be deleted or deactivated deleteAndDeactivateJobs = Schedule.objects.filter( Q(status=Schedule.DELETE_STATUS) | Q(status=Schedule.DEACTIVATE_STATUS) ) for deleteAndDeactivateJob in deleteAndDeactivateJobs: for scheduledJob in scheduledJobs: if scheduledJob.name == deleteAndDeactivateJob.job_name: self.scheduler.unschedule_job(scheduledJob) deleteAndDeactivateJob.status = Schedule.DEACTIVATED_STATUS\ if deleteAndDeactivateJob.status == Schedule.DEACTIVATE_STATUS\ else Schedule.DELETED_STATUS deleteAndDeactivateJob.save() # add/update unscheduled jobs split_re = re.compile("\s+") unScheduledJobs = Schedule.objects.filter( Q(status=Schedule.NEW_STATUS) | Q(status=Schedule.UPDATE_STATUS) ) for unScheduledJob in unScheduledJobs: if unScheduledJob.status == Schedule.UPDATE_STATUS: for scheduledJob in scheduledJobs: if scheduledJob.name == unScheduledJob.job_name: self.scheduler.unschedule_job(scheduledJob) if unScheduledJob.scheduled_start is not None: schedule = { 'kwargs': { 'unScheduledJob': unScheduledJob }, 'name': unScheduledJob.job_name } try: newJob = self.scheduler.add_date_job(self.submitJobToFramework, unScheduledJob.scheduled_start, **schedule) self.logger.debug( 'Job will run on %s' % newJob.next_run_time ) except Exception as e: self.logger.error("Unknown error while submitting jobs to framework: %s" % str(e)) raise Exception else: unScheduledJob.status = Schedule.ACTIVE_STATUS unScheduledJob.save() else: cronList = split_re.split(unScheduledJob.cron_expression) schedule = dict(itertools.izip(self.cronScheduleSequence, cronList)) schedule['kwargs'] = { 'unScheduledJob': unScheduledJob } schedule['name'] = unScheduledJob.job_name try: newJob = self.scheduler.add_cron_job(self.submitJobToFramework, **schedule) self.logger.debug( 'First run of job will be on %s' % newJob.next_run_time ) except Exception as e: self.logger.error("Unknown error while submitting jobs to framework: %s" % str(e)) raise Exception else: unScheduledJob.status = Schedule.ACTIVE_STATUS unScheduledJob.save() def cleanup(self): try: self.updateProcessingJobs() except Exception as e: self.logger.error("Unknown error while updating processing jobs: %s" % str(e)) raise Exception def onNotification(self, eventType, body): if eventType == 'JobFinished': # sleep is added, because a failing job can be quicker than # Django save the frameworkid of that job time.sleep(1) event = JobFinished() event.ParseFromString(body) self.logger.debug('Job with ID %s is finished with status %s', str(event.job), str(event.status)) Job.objects.update() finishedJob = Job.objects.get(frameworkid=event.job) finishedJob.status = event.status finishedJob.finished = datetime.now() finishedJob.save() return True def run(self): self.logger.info('Started scheduler') # add active schedules to scheduler split_re = re.compile("\s+") scheduledJobs = Schedule.objects.filter( status=Schedule.ACTIVE_STATUS ) for scheduledJob in scheduledJobs: if scheduledJob.scheduled_start is not None: schedule = { 'kwargs': { 'unScheduledJob': scheduledJob }, 'name': scheduledJob.job_name } try: newJob = self.scheduler.add_date_job(self.submitJobToFramework, scheduledJob.scheduled_start, **schedule) except Exception as e: self.logger.error("Unknown error while submitting jobs to framework: %s" % str(e)) raise Exception else: cronList = split_re.split(scheduledJob.cron_expression) schedule = dict(itertools.izip(self.cronScheduleSequence, cronList)) schedule['kwargs'] = { 'unScheduledJob': scheduledJob } schedule['name'] = scheduledJob.job_name try: newJob = self.scheduler.add_cron_job(self.submitJobToFramework, **schedule) except Exception as e: self.logger.error("Unknown error while submitting jobs to framework: %s" % str(e)) raise Exception # add job scheduling mechanism and cleanup to scheduler and start scheduler try: self.scheduler.add_interval_job(self.checkJobs, seconds=self.jobSubmitInterval) self.scheduler.add_interval_job(self.cleanup, minutes=self.jobCleanupInterval) self.scheduler.start() except Exception as e: self.logger.error("Unknown error while initializing scheduler: %s" % str(e)) raise Exception # initialize bus instance for receiving job notifications try: notificationBus = Bus.createConfigurableBus(self.logger, self.config, 'notifications') notificationBus.openFwChannel() notificationBus.attachToMonitoring(self.onNotification) notificationBus.close() except BusException, e: self.logger.error("Cannot connect to HSN2 Bus because '%s'" % e) raise Exception except BusTimeoutException, e: self.logger.error("Response timeout") raise Exception
class OutcallAction: def __init__(self, host_id, start_time, duration, program): self.__host_id = host_id self.start_time = start_time self.duration = duration self.program = program self.__scheduler = None self.__available_calls = dict() self.__in_talkshow_setup = False self.__host = None self.__community_call_UUIDs = dict() self.__invitee_call_UUIDs = dict() self.__call_handler = self.program.radio_station.call_handler self.__phone_status = PhoneStatus.QUEUING self.__interested_participants = Set([]) self.__collecting_digits_to_call = False self.__invitee_number = "" self.__prompt_engine = PromptEngine(self.program.radio_station) def start(self): try: self.__in_talkshow_setup = True self.__host = self.__get_host(self.__host_id) if self.__host is None or self.__host.phone is None: self.stop(PlayStatus.no_media) return self.program.set_running_action(self) self.__scheduler = Scheduler() self.__scheduler.start() self.__call_handler.register_for_incoming_calls(self) self.__call_handler.register_for_incoming_dtmf( self, str(self.__host.phone.raw_number)[-9:]) self.__call_handler.register_for_host_call( self, str(self.__host.phone.raw_number)[-9:]) self.request_host_call() except Exception as e: self.program.log_program_activity( "Error in OutcallAction.start: {0}".format(e.message)) print e def stop(self, graceful=PlayStatus.success, call_info=None): self.hangup_call() # Stop scheduler self.__scheduler.shutdown() # deregister from any triggers self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf( str(self.__host.phone.raw_number)[-9:]) self.program.notify_program_action_stopped(graceful, call_info) def __get_host(self, host_id): host = self.program.radio_station.db.query(Person).filter( Person.id == host_id).first() return host def request_host_call(self, guest_triggered=False): self.__in_talkshow_setup = True result = self.__call_handler.call(self, self.__host.phone.raw_number, None, False, 15) # call ends in 15 mins max self.program.log_program_activity("result of host call is " + str(result)) if not result[0] and not guest_triggered: self.stop(PlayStatus.failed) def __request_station_call(self): # call the number specified thru plivo # Check if the call exists, start with the least likely number to be called if self.program.radio_station.station.secondary_transmitter_phone is not None and self.__call_handler.call_exists( self.program.radio_station.station.secondary_transmitter_phone. raw_number): result = self.__call_handler.call( self, self.program.radio_station.station. secondary_transmitter_phone.raw_number, self.program.name, False, self.duration) return result elif self.program.radio_station.station.primary_transmitter_phone is not None and self.__call_handler.call_exists( self.program.radio_station.station.primary_transmitter_phone. raw_number): result = self.__call_handler.call( self, self.program.radio_station.station. primary_transmitter_phone.raw_number, self.program.name, False, self.duration) return result elif self.program.radio_station.station.sip_username is not None and self.__call_handler.call_exists( self.program.radio_station.station.sip_username): result = self.__call_handler.call( self, self.program.radio_station.station.sip_username, self.program.name, True, self.duration) self.program.log_program_activity( "result of station call via SIP is " + str(result)) return result # At this point we are sure that no call to the station exists. We will try to initiate a new call if self.program.radio_station.station.is_high_bandwidth: result = self.__call_station_via_sip() if result is None or not result[ 0]: # Now try calling the SIM (ideally do primary, then secondary) result = self.__call_station_via_goip() else: result = self.__call_station_via_goip() return result def __call_station_via_sip(self): result = None # Try a high bandwidth call first if self.program.radio_station.station.sip_username is not None: result = self.__call_handler.call( self, self.program.radio_station.station.sip_username, self.__host.phone.raw_number, True, self.duration) self.program.log_program_activity( "result of station call via SIP is " + str(result)) return result def __call_station_via_goip(self): result = None if self.program.radio_station.station.primary_transmitter_phone is not None: result = self.__call_handler.call( self, self.program.radio_station.station. primary_transmitter_phone.raw_number, self.__host.phone.raw_number, False, self.duration) self.program.log_program_activity( "result of station call (primary) via GoIP is " + str(result)) if not result[ 0] and self.program.radio_station.station.secondary_transmitter_phone is not None: # Go for the secondary line of the station, if duo SIM phone result = self.__call_handler.call( self, self.program.radio_station.station. secondary_transmitter_phone.raw_number, self.__host.phone.raw_number, False, self.duration) self.program.log_program_activity( "result of station call (secondary) via GoIP is " + str(result)) return result def notify_call_answered(self, answer_info): if self.__host.phone.raw_number not in self.__available_calls: self.__available_calls[answer_info['Caller-Destination-Number'] [-12:]] = answer_info self.__inquire_host_readiness() self.program.log_program_activity("host call has been answered") elif 'Caller-Destination-Number' in answer_info and answer_info[ 'Caller-Destination-Number'][-12:] == self.__invitee_number: self.__available_calls[answer_info['Caller-Destination-Number'] [-12:]] = answer_info self.__invitee_number = "" self.__collecting_digits_to_call = False else: # This notification is from station answering call self.__available_calls[answer_info['Caller-Destination-Number'] [-12:]] = answer_info self.__prompt_engine.play_prompt( self.__prompt_engine.ON_AIR_PROMPT, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) #self.__call_handler.speak('You are now on air', #self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) # result1 = self.__schedule_warning() # result2 = self.__schedule_hangup() self.__call_handler.register_for_call_hangup( self, answer_info['Caller-Destination-Number'][-12:]) def warn_number(self): seconds = self.duration - self.__warning_time if self.__host.phone.raw_number in self.__available_calls and 'Channel-Call-UUID' in self.__available_calls[ self.__host.phone.raw_number]: result = self.__prompt_engine.play_prompt( self.__prompt_engine.CALL_END_WARNING, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) #result = self.__call_handler.speak( #'Your call will end in ' + str(seconds) + 'seconds', #self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("result of warning is " + result) def __pause_call(self): # hangup and schedule to call later self.__schedule_host_callback() self.hangup_call() def notify_call_hangup(self, event_json): if 'Caller-Destination-Number' in event_json: if event_json[ 'Caller-Destination-Number'] in self.__community_call_UUIDs: # a community caller is hanging up del self.__community_call_UUIDs[ event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup( event_json['Caller-Destination-Number']) elif event_json[ 'Caller-Destination-Number'] in self.__invitee_call_UUIDs: del self.__invitee_call_UUIDs[ event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup( event_json['Caller-Destination-Number']) elif event_json['Caller-Destination-Number'] == self.__host.phone.raw_number or \ event_json['Caller-Destination-Number'] == self.program.radio_station.station.sip_username or \ (self.program.radio_station.station.primary_transmitter_phone is not None and event_json['Caller-Destination-Number'] == self.program.radio_station.station.primary_transmitter_phone.raw_number) or \ (self.program.radio_station.station.secondary_transmitter_phone is not None and event_json['Caller-Destination-Number'] == self.program.radio_station.station.secondary_transmitter_phone.raw_number): # It is a hangup by # the station or the host self.program.log_program_activity( "Program terminated because {0} hangup".format( event_json['Caller-Destination-Number'])) self.stop(PlayStatus.success) def __inquire_host_readiness(self): if self.__phone_status == PhoneStatus.WAKE: self.__prompt_engine.play_prompt( self.__prompt_engine.CALLER_ON_THE_LINE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) #self.__call_handler.speak( #'You have a caller on the line. To connect to the station, press one, to cancel, press two', #self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__prompt_engine.play_prompt( self.__prompt_engine.INQUIRE_HOST_READY, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) #self.__call_handler.speak( #'You are scheduled to host a talk show at this time. If you are ready, press one, if not ready, press two', #self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Asking if host is ready") def hangup_call(self): # hangup the ongoing call for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.hangup( self.__available_calls[available_call]['Channel-Call-UUID']) self.__available_calls = dict( ) # empty available calls. they all are hung up def notify_incoming_dtmf(self, dtmf_info): dtmf_json = dtmf_info dtmf_digit = dtmf_json["DTMF-Digit"] if dtmf_digit == "*": # enter a number to be called followed by the # key self.__collecting_digits_to_call = not self.__collecting_digits_to_call if not self.__collecting_digits_to_call: self.__invitee_number = "" elif not self.__collecting_digits_to_call: if dtmf_digit == "1" and self.__in_talkshow_setup: self.program.log_program_activity( "Host is ready, we are calling the station") self.__prompt_engine.play_prompt( self.__prompt_engine.AWAIT_STATION_CONNECTION, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) self.__request_station_call() self.__in_talkshow_setup = False elif dtmf_digit == "2" and self.__in_talkshow_setup: # stop the music, put this live on air self.program.log_program_activity( "Host is not ready. We will hangup Arghhh!") self.hangup_call() self.__in_talkshow_setup = False elif dtmf_digit == "1": # Wake mode, the station will wake host when someone calls in and host is off air if self.__phone_status != PhoneStatus.WAKE: self.__phone_status = PhoneStatus.WAKE self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_WAKE_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) time.sleep(5) self.hangup_call() else: self.__phone_status = PhoneStatus.REJECTING self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_REJECT_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "3": # put the station =in auto_answer if self.__phone_status != PhoneStatus.ANSWERING: self.__phone_status = PhoneStatus.ANSWERING self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_AUTO_ANSWER_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_REJECT_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "4": # disable auto answer, reject and record all incoming calls if self.__phone_status != PhoneStatus.QUEUING: self.__phone_status = PhoneStatus.QUEUING self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_QUEUING_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_REJECT_MODE, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "5": # dequeue and call from queue of calls that were queued for caller in self.__interested_participants: result = self.__call_handler.call(self, caller, None, None, self.duration) self.program.log_program_activity( "result of participant call is {0}".format( str(result))) self.__community_call_UUIDs[caller] = result[1] self.__call_handler.register_for_call_hangup(self, caller) self.__interested_participants.discard(caller) return elif dtmf_digit == "6": # terminate the current caller for community_call_UUID in self.__community_call_UUIDs: self.__call_handler.hangup( self.__community_call_UUIDs[community_call_UUID]) pass elif dtmf_digit == "7": # terminate the current caller (invitee) for invitee_call_key in self.__invitee_call_UUIDs: self.__call_handler.hangup( self.__invitee_call_UUIDs[invitee_call_key]) pass elif dtmf_digit == "9": # Take a 5 min music break self.__prompt_engine.play_prompt( self.__prompt_engine.ENTERING_5_MIN_BREAK, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Host is taking a break") self.__pause_call() else: if dtmf_digit == "#": # Call invitee number if self.__invitee_number == "": self.__prompt_engine.play_prompt( self.__prompt_engine.ENTER_NUMBER_TO_CALL, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__prompt_engine.play_prompt( self.__prompt_engine.CALLING_OUT, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) # CARLOS - Should the phone number be passed here - prompt engine?? #self.__call_handler.speak('You are calling "{0}"'.format(self.__invitee_number), # self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) result = self.__call_handler.call( self, self.__invitee_number, self.__host.phone.raw_number, False, self.duration) self.__call_handler.register_for_call_hangup( self, self.__invitee_number) if result[0]: self.__invitee_call_UUIDs[ self.__invitee_number] = result[1] # Disable this mode self.__phone_status = PhoneStatus.REJECTING else: # CARLOS TODO - Have to format this! self.__prompt_engine.play_prompt( self.__prompt_engine.CALL_FAILED, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number] ['Channel-Call-UUID']) #self.__call_handler.speak('The call to {0} failed. Please pres the hash key to try again'.format(self.__invitee_number), # self.__available_calls[self.__host.phone.raw_number][ # 'Channel-Call-UUID']) else: # Collect digits to call self.__invitee_number = "{0}{1}".format( self.__invitee_number, dtmf_digit) def notify_host_call(self, call_info): # accept then hangup the call self.__call_handler.bridge_incoming_call( call_info['Channel-Call-UUID'], "temp") self.__call_handler.hangup(call_info['Channel-Call-UUID']) sleep(10) # reset program # self.stop() # restart program self.start() def notify_incoming_call(self, call_info): if self.__phone_status == PhoneStatus.ANSWERING: # answer the phone call, join it to the conference if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call( call_info['Channel-Call-UUID'], "{0}_{1}".format(self.program.id, self.program.radio_station.id)) self.__call_handler.register_for_call_hangup( self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[ call_info['Caller-Destination-Number']] = call_info[ 'Channel-Call-UUID'] self.program.log_program_activity( "Call from community caller {0} was auto-answered".format( call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.QUEUING: # Hangup the phone, call back later self.__interested_participants.add(call_info['Caller-ANI']) self.__prompt_engine.play_prompt( self.__prompt_engine.INCOMING_CALL_QUEUED, self.__call_handler, self.__available_calls[ self.__host.phone.raw_number]['Channel-Call-UUID']) self.__prompt_engine.play_prompt( self.__prompt_engine.CALL_BACK_NOTIFICATION, self.__call_handler, call_info['Channel-Call-UUID']) time.sleep(5) self.__call_handler.hangup(call_info['Channel-Call-UUID']) self.program.log_program_activity( "Call from community caller {0} was queued".format( call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.REJECTING: # Hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']) self.__call_handler.bridge_incoming_call( call_info['Channel-Call-UUID'], "{0}_temp{1}".format(self.program.id, self.program.radio_station.id)) self.__prompt_engine.play_prompt( self.__prompt_engine.CALL_BACK_NOTIFICATION, self.__call_handler, call_info['Channel-Call-UUID']) self.__call_handler.register_for_call_hangup( self, call_info['Caller-Destination-Number']) self.program.log_program_activity( "Call from community caller {0} was rejected".format( call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.WAKE: # Hangup the call if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call( call_info['Channel-Call-UUID'], "{0}_{1}".format(self.program.id, self.program.radio_station.id)) self.__call_handler.register_for_call_hangup( self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[ call_info['Caller-Destination-Number']] = call_info[ 'Channel-Call-UUID'] self.program.log_program_activity( "Call from community caller {0} was auto-answered".format( call_info['Caller-Destination-Number'])) self.__prompt_engine.play_prompt( self.__prompt_engine.AWAIT_HOST_CONNECTION, self.__call_handler, call_info['Channel-Call-UUID']) self.request_host_call(True) #self.__call_handler.speak('Please wait while we connect you to the host', call_info['Channel-Call-UUID']) def __schedule_host_callback(self): time_delta = timedelta(seconds=300) # one minutes now = datetime.now() callback_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'request_host_call'), callback_time) def __schedule_warning(self): time_delta = timedelta(seconds=self.__warning_time) now = datetime.utcnow() warning_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'warn_number'), warning_time) def __schedule_hangup(self): time_delta = timedelta(seconds=self.duration) now = datetime.utcnow() hangup_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'hangup_call'), hangup_time) def __deregister_listeners(self): for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf( str(self.__host.phone.raw_number))
class Bus(BotPlugin): def activate(self): super(Bus, self).activate() self.sched = Scheduler(coalesce=True) self.sched.start() @botcmd(split_args_with=' ') def bus(self, mess, args): argsLength = len(args) if argsLength < 2 : route = 49 else : route = args[1] now = arrow.now() t = self.next_buses(*args) buses = [] if t: for bus in t: buses.append( 'No. %s bus leaves from %s %s' % ( route, args[0], bus.humanize(now) ) ) for s in buses: yield s @botcmd(split_args_with=' ') def bus_remind(self, mess, args): t = self.next_bus(*args) reminder = t.replace(minutes=-10) remind = partial(self.remind, mess, args) self.sched.add_date_job(remind, reminder.naive) return "%s: you'll be reminded %s" % ( mess.getMuckNick(), reminder.humanize() ) def remind(self, mess, args): now = arrow.now() t = self.next_bus(args[0], args[1]) if t: self.send( mess.getFrom(), '%s: the next no. %s bus leaves from %s %s' % ( mess.getMuckNick(), args[1], args[0], t.humanize(now) ), message_type=mess.getType() ) def parse_timetable(self, stop, route): if stop in STOPS: stop = STOPS[stop] url = posixpath.join( "http://www.nextbuses.mobi", "WebView/BusStopSearch/BusStopSearchResults/", stop ) res = requests.get( url, params={'searchType': 'route', 'searchFilter': route} ) soup = BeautifulSoup(res.text) bus_stops = soup.findAll('table', {'class': 'BusStops'}) times = bus_stops[0].findAll('p', {'class': 'Stops'}) #should loop instead of return one return times def next_bus(self, stop, route=49, time=0): times = self.parse_timetable(stop, route) now = arrow.now() then = now.replace(minutes=+int(time)) nextbuses = [] for i in times: logging.info(i.text) if 'DUE' in i.text: continue elif ';at ' in i.text: t = i.text.split('at ')[-1].strip().split(':') next = now.replace(hour=int(t[0]), minute=int(t[1])) logging.info(next) else: t = i.text.split('in ')[-1].strip().split() next = now.replace(minutes=int(t[0])) logging.info(next) if next > then: return next return False def next_buses(self,stop, route=49,time=0): times = self.parse_timetable(stop,route) now = arrow.now() then = now.replace(minutes=+int(time)) buses = [] for i in times: logging.info(i.text) if 'DUE' in i.text: continue elif ';at ' in i.text: t = i.text.split('at ')[-1].strip().split(':') if t[1].find('(') == -1: logging.info("replacing hour with %s and minute with %s" % (t[0], t[1])) next = now.replace(hour=int(t[0]), minute=int(t[1])) buses.append(next) logging.info("Next bus parsed is %s" % next) else: t = i.text.split('in ')[-1].strip().split() next = now.replace(minutes=int(t[0])) buses.append(next) logging.info(next) if len(buses) != 0: return buses; return False
#Function Dictionary controlOptions = { 'updatemealtime': updateMealTime, 'addeveningmeal': addEveningMeal, 'givetreat': giveTreat #, # 'feednow' : feedNow } #tweetSearch= Timer(60,searchForTweets) #tweetSearch.start() #setNextMeals() #morningMealTimer = Tim if datetime.datetime.today() < morningMealTime: morningFeed = sched.add_date_job(feedingAction, morningMealTime, ['morning']) if datetime.datetime.today() < eveningMealTime: eveningFeed = sched.add_date_job(eveningFeedingAction, eveningMealTime) if datetime.datetime.today() < midnightMealTime: midnightFeed = sched.add_date_job(feedingAction, midnightMealTime, ['midnight']) if datetime.datetime.today() < timeToSetNextMeals: setNextDayMealTimes = sched.add_date_job(setTomorrowsMeals, timeToSetNextMeals) lastTwitterStatusID = 1 #updateLastTweetID() sched.add_interval_job(searchForTweets, minutes=1) api.PostUpdate(status="The Nelly Feeder is Initalized! Meow!")
class OutcallAction: def __init__(self, host_id, start_time, duration, program): self.__host_id = host_id self.start_time = start_time self.duration = duration self.program = program self.__scheduler = Scheduler() self.__available_calls = dict() self.__in_talkshow_setup = False self.__host = None self.__community_call_UUIDs = dict() self.__invitee_call_UUIDs = dict() self.__call_handler = self.program.radio_station.call_handler self.__phone_status = PhoneStatus.QUEUING self.__interested_participants = Set([]) self.__collecting_digits_to_call = False self.__invitee_number = "" def start(self): try: self.__in_talkshow_setup = True self.__host = self.__get_host(self.__host_id) if self.__host is None or self.__host.phone is None: self.stop(False) return self.program.set_running_action(self) self.__scheduler.start() self.__call_handler.register_for_incoming_calls(self) self.__call_handler.register_for_incoming_dtmf(self, str(self.__host.phone.raw_number)) self.__call_handler.register_for_host_call(self, str(self.__host.phone.raw_number)) self.request_host_call() except Exception as e: print e def stop(self, graceful=True, call_info=None): self.hangup_call() # Stop scheduler self.__scheduler.shutdown() # deregister from any triggers self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf(str(self.__host.phone.raw_number)) self.program.notify_program_action_stopped(graceful, call_info) def __get_host(self, host_id): host = self.program.radio_station.db.query(Person).filter(Person.id == host_id).first() return host def request_host_call(self): self.__in_talkshow_setup = True result = self.__call_handler.call(self, self.__host.phone.raw_number, None, False, 15) # call ends in 15 mins max self.program.log_program_activity("result of host call is " + str(result)) if not result[0]: self.stop(False) def __request_station_call(self): if self.program.radio_station.station.is_high_bandwidth: result = self.__call_station_via_sip() if result is None or not result[0]: # Now try calling the SIM (ideally do primary, then secondary) result = self.__call_station_via_goip() else: result = self.__call_station_via_goip() if result is None or not result[0]: self.stop(False) def __call_station_via_sip(self): result = None # Try a high bandwidth call first if self.program.radio_station.station.sip_username is not None: result = self.__call_handler.call(self, self.program.radio_station.station.sip_username, self.__host.phone.raw_number, True, self.duration) self.program.log_program_activity("result of station call via SIP is " + str(result)) return result def __call_station_via_goip(self): result = None if self.program.radio_station.station.primary_transmitter_phone is not None: result = self.__call_handler.call(self, self.program.radio_station.station.primary_transmitter_phone.raw_number, self.__host.phone.raw_number, False, self.duration) self.program.log_program_activity("result of station call (primary) via GoIP is " + str(result)) if not result[ 0] and self.program.radio_station.station.secondary_transmitter_phone is not None: # Go for the secondary line of the station, if duo SIM phone result = self.__call_handler.call(self, self.program.radio_station.station.secondary_transmitter_phone.raw_number, self.__host.phone.raw_number, False, self.duration) self.program.log_program_activity("result of station call (secondary) via GoIP is " + str(result)) return result def notify_call_answered(self, answer_info): if self.__host.phone.raw_number not in self.__available_calls: self.__available_calls[answer_info['Caller-Destination-Number'][-12:]] = answer_info self.__inquire_host_readiness() self.program.log_program_activity("host call has been answered") elif 'Caller-Destination-Number' in answer_info and answer_info['Caller-Destination-Number'][-12:] == self.__invitee_number: self.__available_calls[answer_info['Caller-Destination-Number'][-12:]] = answer_info self.__invitee_number = ""; self.__collecting_digits_to_call = False else: # This notification is from station answering call self.__available_calls[answer_info['Caller-Destination-Number'][-12:]] = answer_info self.__call_handler.speak('You are now on air', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) # result1 = self.__schedule_warning() # result2 = self.__schedule_hangup() self.__call_handler.register_for_call_hangup(self, answer_info['Caller-Destination-Number'][-12:]) def warn_number(self): seconds = self.duration - self.__warning_time if self.__host.phone.raw_number in self.__available_calls and 'Channel-Call-UUID' in self.__available_calls[ self.__host.phone.raw_number]: result = self.__call_handler.speak( 'Your call will end in ' + str(seconds) + 'seconds', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("result of warning is " + result) def __pause_call(self): # hangup and schedule to call later self.__schedule_host_callback() self.hangup_call() def notify_call_hangup(self, event_json): if 'Caller-Destination-Number' in event_json: if event_json[ 'Caller-Destination-Number'] in self.__community_call_UUIDs: # a community caller is hanging up del self.__community_call_UUIDs[event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup(event_json['Caller-Destination-Number']) elif event_json['Caller-Destination-Number'] in self.__invitee_call_UUIDs: del self.__invitee_call_UUIDs[event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup(event_json['Caller-Destination-Number']) elif event_json['Caller-Destination-Number'] == self.__host.phone.raw_number or \ event_json['Caller-Destination-Number'] in \ [self.program.radio_station.station.sip_username, self.program.radio_station.station.primary_transmitter_phone.raw_number, self.program.radio_station.station.secondary_transmitter_phone.raw_number]: # It is a hangup by # the station or the host self.program.log_program_activity( "Program terminated because {0} hangup".format(event_json['Caller-Destination-Number'])) self.stop(True) def __inquire_host_readiness(self): if self.__phone_status == PhoneStatus.WAKE: self.__call_handler.speak( 'You have a caller on the line. To connect to the station, press one, to cancel, press two', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__call_handler.speak( 'You are scheduled to host a talk show at this time. If you are ready, press one, if not ready, press two', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Asking if host is ready") def hangup_call(self): # hangup the ongoing call for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.hangup(self.__available_calls[available_call]['Channel-Call-UUID']) self.__available_calls = dict() # empty available calls. they all are hung up def notify_incoming_dtmf(self, dtmf_info): dtmf_json = dtmf_info dtmf_digit = dtmf_json["DTMF-Digit"] if dtmf_digit == "*": # enter a number to be called followed by the # key self.__collecting_digits_to_call = not self.__collecting_digits_to_call if not self.__collecting_digits_to_call: self.__invitee_number = "" elif not self.__collecting_digits_to_call: if dtmf_digit == "1" and self.__in_talkshow_setup: self.program.log_program_activity("Host is ready, we are calling the station") self.__call_handler.speak('Please wait while we connect you to the radio station', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.__request_station_call() self.__in_talkshow_setup = False elif dtmf_digit == "2" and self.__in_talkshow_setup: # stop the music, put this live on air self.program.log_program_activity("Host is not ready. We will hangup Arghhh!") self.hangup_call() self.__in_talkshow_setup = False elif dtmf_digit == "1": # Wake mode, the station will wake host when someone calls in and host is off air if self.__phone_status != PhoneStatus.WAKE: self.__phone_status = PhoneStatus.WAKE self.__call_handler.speak('Your call will be terminated and you will be called when someone calls into the station', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.hangup_call() else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.speak('All incoming calls will be rejected', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "3": # put the station =in auto_answer if self.__phone_status != PhoneStatus.ANSWERING: self.__phone_status = PhoneStatus.ANSWERING self.__call_handler.speak('All incoming calls will be automatically answered', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.speak('All incoming calls will be rejected', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "4": # disable auto answer, reject and record all incoming calls if self.__phone_status != PhoneStatus.QUEUING: self.__phone_status = PhoneStatus.QUEUING self.__call_handler.speak( 'All incoming calls will be queued for call back', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.speak( 'All incoming calls will be rejected', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) elif dtmf_digit == "5": # dequeue and call from queue of calls that were queued for caller in self.__interested_participants: result = self.__call_handler.call(self, caller, None, None, self.duration) self.program.log_program_activity("result of participant call is {0}".format(str(result))) self.__community_call_UUIDs[caller] = result[1] self.__call_handler.register_for_call_hangup(self, caller) self.__interested_participants.discard(caller) return elif dtmf_digit == "6": # terminate the current caller for community_call_UUID in self.__community_call_UUIDs: self.__call_handler.hangup(self.__community_call_UUIDs[community_call_UUID]) pass elif dtmf_digit == "7": # terminate the current caller (invitee) for invitee_call_key in self.__invitee_call_UUIDs: self.__call_handler.hangup(self.__invitee_call_UUIDs[invitee_call_key]) pass elif dtmf_digit == "9": # Take a 5 min music break self.__call_handler.speak('You will be called back in 5 minutes', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.program.log_program_activity("Host is taking a break") self.__pause_call() else: if dtmf_digit == "#": # Call invitee number if self.__invitee_number == "": self.__call_handler.speak('Please enter the number to call and press the # key to dial', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) else: self.__call_handler.speak('You are calling {0}'.format(self.__invitee_number), self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) result = self.__call_handler.call(self, self.__invitee_number, self.__host.phone.raw_number, False, self.duration) self.__call_handler.register_for_call_hangup(self, self.__invitee_number) if result[0]: self.__invitee_call_UUIDs[self.__invitee_number] = result[1] else: self.__call_handler.speak('The call to {0} failed. Please pres the hash key to try again'.format(self.__invitee_number), self.__available_calls[self.__host.phone.raw_number][ 'Channel-Call-UUID']) else: # Collect digits to call self.__invitee_number = "{0}{1}".format(self.__invitee_number, dtmf_digit) def notify_host_call(self, call_info): # hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']) # reset program # self.stop() # restart program self.start() def notify_incoming_call(self, call_info): if self.__phone_status == PhoneStatus.ANSWERING: # answer the phone call, join it to the conference if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call(call_info['Channel-Call-UUID'], "{0}_{1}".format(self.program.id, self.program.radio_station.id)) self.__call_handler.register_for_call_hangup(self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[call_info['Caller-Destination-Number']] = call_info['Channel-Call-UUID'] self.program.log_program_activity( "Call from community caller {0} was auto-answered".format(call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.QUEUING: # Hangup the phone, call back later self.__interested_participants.add(call_info['Caller-ANI']) self.__call_handler.speak( 'You have a new caller on the line', self.__available_calls[self.__host.phone.raw_number]['Channel-Call-UUID']) self.__call_handler.hangup(call_info['Channel-Call-UUID']) self.program.log_program_activity( "Call from community caller {0} was queued".format(call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.REJECTING: # Hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']) self.program.log_program_activity( "Call from community caller {0} was rejected".format(call_info['Caller-Destination-Number'])) elif self.__phone_status == PhoneStatus.WAKE: # Hangup the call if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call(call_info['Channel-Call-UUID'], "{0}_{1}".format(self.program.id, self.program.radio_station.id)) self.__call_handler.register_for_call_hangup(self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[call_info['Caller-Destination-Number']] = call_info['Channel-Call-UUID'] self.program.log_program_activity( "Call from community caller {0} was auto-answered".format(call_info['Caller-Destination-Number'])) self.request_host_call() def __schedule_host_callback(self): time_delta = timedelta(seconds=300) # one minutes now = datetime.now() callback_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'request_host_call'), callback_time) def __schedule_warning(self): time_delta = timedelta(seconds=self.__warning_time) now = datetime.utcnow() warning_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'warn_number'), warning_time) def __schedule_hangup(self): time_delta = timedelta(seconds=self.duration) now = datetime.utcnow() hangup_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'hangup_call'), hangup_time) def __deregister_listeners(self): for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup(available_call) self.__call_handler.deregister_for_incoming_calls(self) self.__call_handler.deregister_for_incoming_dtmf(str(self.__host.phone.raw_number))
class AlarmManager(object): def __init__(self, get_plugins, read_bedsensor): self.get_plugins = get_plugins self.read_bedsensor = read_bedsensor self.sched = Scheduler(daemonic=True) self.sched.start() self.sched.add_interval_job(self.check_events, hours=1) self.check_events() def check_events(self): """Gets called every hour and after an alarm ended to collect, process and schedule the next event. """ logging.getLogger(__name__).info("Checking for events.") events = [] for event in self.collect_events(): events.append(event) if events: events.sort() now = datetime.now() next_event_collection = now + timedelta(hours=1) for event in events: # if alarm_time already passed skip if event.get_alarm_time() < now: continue # adjust alarm_time self.process_event(event) # if alarm_time is after next event collection stop scheduling # events till next event collection if event.get_alarm_time() > next_event_collection: return self.schedule_alarm(event) def collect_events(self): """Collects events from all eventcollector plugins. Returns a list of events. Shedules an alarm if it's alarm_time is before the next event collection. """ logging.getLogger(__name__).info("Collecting events.") for plugin in self.get_plugins("eventcollector"): for event in plugin.collect(): yield event def process_event(self, event): """Passes an Event through every eventprocessor plugin to make modifications. """ for plugin in self.get_plugins("eventprocessor"): event = plugin.process(event) return event def schedule_alarm(self, event): """Shedules an alarm if alarm_time hasn't passed yet.""" # if alarm_time hasn't passed schedule alarm if event.get_alarm_time() > datetime.now(): alarm_time = event.get_alarm_time() logging.info("Sheduling alarm for {0}.".format(alarm_time)) self.sched.add_date_job(self.begin_alarms, alarm_time, [event]) self.sched.add_date_job(self.end_alarms, event.end_time) def bedsensor_changed(self, state): # pragma: no cover if state == Bedsensor.PRESSED: self.play_alarms() else: self.pause_alarms() def begin_alarms(self, event): logging.info("Begining alarms.") for plugin in self.get_plugins("alarm"): plugin.begin(event) if self.read_bedsensor() == Bedsensor.PRESSED: self.play_alarms() def play_alarms(self): logging.info("Bedsensor is pressed, playing alarms.") for plugin in self.get_plugins("alarm"): plugin.play() def pause_alarms(self): logging.info("Bedsensor is released, pausing alarms.") for plugin in self.get_plugins("alarm"): plugin.pause() def end_alarms(self): logging.info("Ending alarms.") for plugin in self.get_plugins("alarm"): plugin.end() def interval_alarms(self): for plugin in self.get_plugins("alarm"): plugin.interval() def test_alarm(self): # pragma: no cover event = Event(name="Some Event", reminder=timedelta(hours=0), start_time=datetime.now() + timedelta(seconds=2), duration=timedelta(seconds=30)) self.schedule_alarm(event) self.sched.add_date_job(self.play_alarms, event.get_alarm_time() + timedelta(seconds=1))
class MessageScheduler(object): def __init__(self, jobstore, url): self.socket = zmq.Context().socket(zmq.SUB) self.logger = setup(__name__) self.logger.debug("Creating MessageScheduler") self.logger.debug("id = {}".format(id(self))) config = read_env('config.cfg') self._scheduler = Scheduler(daemonic=True) config_scheduler = {'apscheduler.jobstores.file.class': 'apscheduler.jobstores%s' % jobstore, 'apscheduler.jobstores.file.url': url} self._scheduler.configure(config_scheduler) # Open a publishing socket to the forwarder to pass messages out self.broadcast_socket = zmq.Context().socket(zmq.PUB) self.broadcast_socket.connect(config['ZMQ_FORWARDER_SUCKS_IN']) def start_ap_daemon(self): self.logger.info("scheduler start") setup("apscheduler.scheduler") self._scheduler.start() def shutdown(self): self.logger.info("scheduler shutdown") self._scheduler.shutdown() def schedule(self, topic, msg): """ Takes incoming message, massages it, and dispatches to appropriate function. """ self.logger.debug("schedule received {}: {}".format(topic, msg)) if 'obj_id' in msg: obj_id = msg.pop('obj_id') if 'start_time' in msg: if 'window' in msg: msg_time = msg['start_time'] - timedelta(seconds=msg['window']) else: msg_time = msg['start_time'] else: offset = timedelta(seconds=10) # needs to be a little bit in the future, so scheduler can run it msg_time = datetime.now() + offset if 'operation' in msg: if msg['operation'] == 'insert': self.schedule_message(topic, msg, msg_time, obj_id) elif msg['operation'] == 'update': self.reschedule_message(obj_id, topic, msg, msg_time) elif msg['operation'] == 'delete': self.cancel_message(obj_id) else: self.logger.debug("Scheduler has been sent unknown database signal operation.") else: self.schedule_message(topic, msg, msg_time) def send_to_station(self, topic, msg): """ Send a message on to rootio_telephony """ topic = "station.{}.db".format(msg['station_id']) # reserialize any datetime elements for zmq -- unpack again at ts for key, value in msg.items(): if isinstance(value, datetime): msg[key] = isodate.datetime_isoformat(value) msg = json.dumps(msg) self.logger.debug("fwd %s: %s" % (topic, msg)) self.broadcast_socket.send_multipart((topic, msg)) def schedule_message(self, topic, message, send_at, obj_id): self.logger.info("schedule message %s:%s at %s" % (topic, message, send_at)) # create lambda for scheduler to call at execution time # and add it message['obj_id'] = obj_id try: job = self._scheduler.add_date_job(self.send_to_station, send_at, args=(topic, message), name=obj_id) self.logger.debug("scheduled job: {}".format(job)) self.logger.debug("scheduled job_name: {}".format(job.name)) except ValueError, e: self.logger.error(e)
class EventHandler(pyinotify.ProcessEvent): def create(self, theFile): if ListenerContainer.is_syncing: try: if self.isDir: ListenerContainer.add_watch(theFile) ListenerContainer.client.mkdir(theFile) else: ListenerContainer.client.upload(theFile) except error: print error def delete(self, theFile): if ListenerContainer.is_syncing: try: if self.isDir: ListenerContainer.rm_watch(theFile) ListenerContainer.client.delete_folder(theFile) else: ListenerContainer.client.delete_file(theFile) except error: print error def renameModification(self, fromFile, toFile): self.delete(fromFile) self.create(toFile) def __init__(self): # Initialize Time self.initialTS = datetime.datetime(2005, 7, 14, 12, 30) self.currentTS = datetime.datetime.now() # Initialize Scheduler self.schedule = Scheduler() logging.basicConfig() self.schedule.start() # Initialize History self.stack = [] self.happened = False # IMPLEMENT THIS SO THAT IF EVENT HAPPENED WE CANCEL THE SCHEDULE EVENT def setup(self, event): self.currentTS = datetime.datetime.now() self.file = event.name self.path = event.path self.full = event.pathname self.isDir = event.dir def process_IN_CREATE(self, event): self.setup(event) difference = self.currentTS - self.initialTS if (difference.total_seconds() < 1 and self.full is self.previousFull): self.stack.append("Create") else: executionTime = self.currentTS + datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS = datetime.datetime.now() del self.stack[:] self.stack.append("Create") def process_IN_MOVED_FROM(self, event): self.setup(event) self.previous = event.pathname difference = self.currentTS - self.initialTS if (difference.total_seconds() < 1 and self.stack[0] is "Create"): self.stack.append("Moved_From") else: executionTime = self.currentTS + datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS = datetime.datetime.now() del self.stack[:] self.stack.append("Moved_From") def process_IN_MOVED_TO(self, event): self.setup(event) difference = self.currentTS - self.initialTS if (difference.total_seconds() < 1 and self.stack[len(self.stack) - 1] is "Moved_From"): self.stack.append("Moved_To") else: executionTime = self.currentTS + datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS = datetime.datetime.now() del self.stack[:] self.stack.append("Moved_To") def process(self): global o # --- Creation/Deletion --- if len(self.stack) is 1: if self.stack[0] is "Create": self.create(self.full) print "Created: %s" % self.full elif self.stack[0] is "Moved_From": self.delete(self.full) print "Deleted: %s" % self.full else: print "Error" # --- Rename --- elif len(self.stack) is 2: if self.stack[0] is "Moved_From" and self.stack[1] is "Moved_To": self.renameModification(self.previous, self.full) print "Renamed: %s" % self.full else: print "Error" # --- Modified --- elif len(self.stack) is 3: if self.stack[0] is "Create" and self.stack[ 1] is "Moved_From" and self.stack[2] is "Moved_To": self.renameModification(self.previous, self.full) print "Modified: %s" % self.full else: print "Error" self.happened = True
from datetime import datetime, timedelta import sys import time from apscheduler.scheduler import Scheduler from apscheduler.jobstores.shelve_store import ShelveJobStore def alarm(time): sys.stdout.write('Alarm! This alarm was scheduled at %s.\n' % time) if __name__ == '__main__': scheduler = Scheduler() scheduler.add_jobstore(ShelveJobStore('example.db'), 'shelve') alarm_time = datetime.now() + timedelta(minutes=1) scheduler.add_date_job(alarm, alarm_time, name='alarm', jobstore='shelve', args=[datetime.now()]) sys.stdout.write('To clear the alarms, delete the example.db file.\n') sys.stdout.write('Press Ctrl+C to exit\n') scheduler.start() try: # This is here to prevent the main thread from exiting so that the # scheduler has time to work -- this is rarely necessary in real world # applications time.sleep(9999) finally: # Shut down the scheduler so that the job store gets closed properly scheduler.shutdown()
1. 指定特定时间运行某一任务,可以通过如下方式: from datetime import datetime from apscheduler.scheduler import Scheduler sched = Scheduler() sched.daemonic = False # daemonic参数,表示执行线程是非守护的,在Schduler的文档中推荐使用非守护线程 # 上面两行,也可以简写为: sched = Scheduler(daemonic = False) def job_function(text): print text # 指定时间运行,且只运行一次 job = sched.add_date_job(job_function, datetime(2013, 10, 30, 17, 13, 59), ['Hello World']) sched.start() 2. 有些时候,我们需要每隔一定时间运行一下任务 Interval-based scheduling 的方式,如下: from apscheduler.scheduler import Scheduler sched = Scheduler() sched.daemonic = False def job_function(): print "Hello World" # 指定每隔多久执行一次,可以用 weeks,days, hours, minutes, seconds 来指定间隔时间(如果所有值都设为0,则默认每个1秒执行一次; 秒允许使用小数,0.1秒也生效) sched.add_interval_job(job_function, seconds=1)
# coding: utf-8 import datetime from apscheduler.scheduler import Scheduler from autowb.account.models import User from autowb.cron.models import WeiboContent def default_callback(user, wb_cnt): return user.update_weibo(wb_cnt) wb_cnt_unsent = WeiboContent.find({'sent': False, 'push_date': {'$gt': datetime.datetime.now()}}) # _scheduler = Scheduler(daemonic=False) _scheduler = Scheduler() # reload unsent job for wb_cnt in wb_cnt_unsent: user = User.get_by_id(wb_cnt.user_id) _scheduler.add_date_job(default_callback, date=wb_cnt.push_date, name=str(wb_cnt.id), args=[user, wb_cnt, ]) _scheduler.start() def get_scheduler(): return _scheduler
class RadioProgram: def __init__(self, program, radio_station, program_handler): self.__rootio_mail_message = RootIOMailMessage() self.__program_actions = [] self.__status = False self.__call_info = None self.id = program.id self.name = program.id self.__program_handler = program_handler self.scheduled_program = program self.radio_station = radio_station self.__shutting_down = False self.__scheduler = Scheduler() self.__running_action = None return def start(self): self.__load_program_actions() if len(self.__program_actions) == 0: return else: self.__program_handler.set_running_program(self) self.__run_program_action() # will call the next one when done ''' Load the definition of components of the program from a JSON definition ''' def __load_program_actions(self): try: data = json.loads(self.scheduled_program.program.structure) except ValueError as e: print e return for action in data: if "type" in action: if action['type'] == "Advertisements": if "track_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, AdvertisementAction(action["track_id"], action["start_time"], action["duration"], self)) self.radio_station.logger.info( "Advertisements program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "Media": if "track_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, MediaAction(action["track_id"], action["start_time"], action["duration"], self)) self.radio_station.logger.info( "Media program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "Community": if "category_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, CommunityAction(action["category_id"], action["start_time"], action["duration"], self)) self.radio_station.logger.info( "Community program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "Podcast": if "track_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, PodcastAction(action["track_id"], action["start_time"], action["duration"], self)) self.radio_station.logger.info( "Podcast program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "Music": if "start_time" in action and "duration" in action: self.radio_station.logger.info( "Music program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "News": if "track_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, NewsAction(action["track_id"], action["start_time"], action["duration"], self)) self.radio_station.logger.info( "News program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) if action['type'] == "Outcall": if "host_id" in action and "start_time" in action and "duration" in action: self.__program_actions.insert(0, OutcallAction(action['host_id'], action["start_time"], action['duration'], self)) self.radio_station.logger.info( "Outcall program scheduled to start at {0} for a duration {1}".format(action["start_time"], action["duration"])) return ''' Schedule the actions of a particular program for playback within the program ''' def __schedule_program_actions(self): for program_action in self.__program_actions: self.__scheduler.add_date_job(getattr(program_action, 'start'), self.__get_start_datetime(program_action.start_time).replace(tzinfo=None), misfire_grace_time=program_action.duration) def stop(self): self.__shutting_down = True if self.__running_action is not None: self.__running_action.stop() def set_running_action(self, running_action): #if self.__running_action is not None: # self.__running_action.stop() # clean up any stuff that is not necessary anymore self.__running_action = running_action def log_program_activity(self, program_activity): self.radio_station.logger.info(program_activity) self.__rootio_mail_message.append_to_body( '%s %s' % (datetime.now().strftime('%y-%m-%d %H:%M:%S'), program_activity)) def __run_program_action(self): if self.__program_actions is not None and len(self.__program_actions) > 0: self.radio_station.logger.info("Popping program action from program actions: {0}".format(self.__program_actions)) self.__program_actions.pop().start() def notify_program_action_stopped(self, played_successfully, call_info): # the next action might need the call. if self.__shutting_down: self.radio_station.logger.info("Shutting down this program...") self.__status = self.__status or played_successfully self.radio_station.call_handler.hangup(call_info['Channel-Call-UUID']) self.__log_program_status() else: self.__status = self.__status or played_successfully # For program with multiple actions, if one succeeds then flagged as success! if call_info is not None and 'Channel-Call-UUID' in call_info: self.__call_info = call_info if len(self.__program_actions) == 0: # all program actions have run self.radio_station.logger.info("Program actions array is empty. Program will terminate") if self.__call_info is not None: self.radio_station.call_handler.hangup(self.__call_info['Channel-Call-UUID']) self.__log_program_status() self.__send_program_summary() else: self.__run_program_action() def __send_program_summary(self): try: self.__rootio_mail_message.set_subject( '[%s] %s ' % (self.radio_station.station.name, self.scheduled_program.program.name)) self.__rootio_mail_message.set_from('*****@*****.**') # This will come from DB in future users = self.__get_network_users() for user in users: if user.receive_station_notifications: self.__rootio_mail_message.add_to_address(user.email) self.__rootio_mail_message.send_message() except Exception as e: self.radio_station.logger.error("Error {er} in send program summary for {prg}".format(er=str(e), prg=self.scheduled_program.program.name)) def __log_program_status(self): try: conn = psycopg2.connect(DefaultConfig.SQLALCHEMY_DATABASE_URI) cur = conn.cursor() cur.execute("update radio_scheduledprogram set status = %s where id = %s", (self.__status, self.scheduled_program.id)) conn.commit() except psycopg2.Error as e: try: self.radio_station.logger.error("Error(1) {err} in radio_program.__log_program_status".format(err=e.message)) except Exception as e: return except Exception as e: self.radio_station.logger.error("Error(3) {err} in radio_program.__log_program_status".format(err=e.message)) finally: try: cur.close() conn.close() except Exception as e: self.radio_station.logger.error( "Error(4) {err} in radio_program.__log_program_status".format(err=e.message)) def __get_network_users(self): station_users = self.radio_station.station.network.networkusers return station_users ''' Get the time at which to schedule the program action to start ''' def __get_start_datetime(self, time_part): now = datetime.now(dateutil.tz.tzlocal()) t = datetime.strptime(time_part, "%H:%M:%S") time_delta = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second) return now + time_delta + timedelta(seconds=2) # 2 second scheduling allowance
class RadioProgram: def __init__(self, db, program, radio_station): logging.basicConfig(filename='rootioweb.log') self.__program_actions = [] self.id = program.id self.__db = db self.__program = program self.radio_station = radio_station self.__scheduler = Scheduler() self.__running_action = None return ''' Starts a station program and does the necessary preparations ''' def start(self): self.__load_program_actions() self.__schedule_program_actions() self.__scheduler.start() return ''' Load the definition of components of the program from a JSON definition ''' def __load_program_actions(self): print self.__program.program.description data = json.loads(self.__program.program.description) for category in data: if category == "Jingle": for action in data[category]: self.__program_actions.append(JingleAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Jingle scheduled to start at " + str(record["start_time"]) if category == "Media": for action in data[category]: self.__program_actions.append(MediaAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Media Scheduled to start at " + str(action["start_time"]) if category == "Interlude": for action in data[category]: self.__program_actions.append(InterludeAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Interlude Scheduled to start at " + str(action["start_time"]) if category == "Stream": #self.__program_actions.add(JingleAction(j['argument'])) print "Stream would have started here" if category == "Music": #self.__program_actions.add(MediaAction(j['argument'])) print "This would have started here" if category == "Outcall": for action in data[category]: print "Call to host scheduled to start at " + str(action["start_time"]) self.__program_actions.append(OutcallAction(action['argument'],action["start_time"], action['duration'], action['is_streamed'], action['warning_time'],self, action["hangup_on_complete"]) ) return ''' Schedule the actions of a particular program for playback within the program ''' def __schedule_program_actions(self): for program_action in self.__program_actions: self.__scheduler.add_date_job(getattr(program_action,'start'), self.__get_start_datetime(program_action.start_time).replace(tzinfo=None)) def set_running_action(self, running_action): if not self.__running_action == None: self.__running_action.stop()#clean up any stuff that is not necessary anymore self.__running_action = running_action ''' Get the time at which to schedule the program action to start ''' def __get_start_datetime(self, time_part): t = datetime.strptime(time_part, "%H:%M:%S") time_delta = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second) return self.__program.start + time_delta
import imp from apscheduler.scheduler import Scheduler usage = 'python -m apschedulerweb --conf=file' parser = argparse.ArgumentParser(usage=usage) parser.add_argument('--conf', required=True) args = parser.parse_args() with open(args.conf, 'r') as f: conf = json.load(f) if 'jobs' not in conf or len(conf['jobs']) == 0: print('List of jobs should be defined') sys.exit(1) s = Scheduler() for job in conf['jobs']: fil = job.pop('file') name = os.path.basename(fil)[:-3] module = imp.load_source(name, fil) job['func'] = getattr(module, job['func']) job_trigger = job.pop('trigger') if job_trigger == 'interval': s.add_interval_job(**job) elif job_trigger == 'date': s.add_date_job(**job) elif job_trigger == 'cron': s.add_cron_job(**job) else: raise ValueError('Unknown job type') web_conf = conf.get('web', {}) bottle_conf = conf.get('bottle', None) start(s, bottle_conf=bottle_conf, **web_conf)
class TestOfflineScheduler(object): def setup(self): self.scheduler = Scheduler() def teardown(self): if self.scheduler.running: self.scheduler.shutdown() @raises(KeyError) def test_jobstore_twice(self): self.scheduler.add_jobstore(RAMJobStore(), 'dummy') self.scheduler.add_jobstore(RAMJobStore(), 'dummy') def test_add_tentative_job(self): job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore='dummy') assert isinstance(job, Job) eq_(self.scheduler.get_jobs(), []) def test_configure_jobstore(self): conf = { 'apscheduler.jobstore.ramstore.class': 'apscheduler.jobstores.ram_store:RAMJobStore' } self.scheduler.configure(conf) self.scheduler.remove_jobstore('ramstore') def test_shutdown_offline(self): self.scheduler.shutdown() def test_configure_no_prefix(self): global_options = {'misfire_grace_time': '2', 'daemonic': 'false'} self.scheduler.configure(global_options) eq_(self.scheduler.misfire_grace_time, 1) eq_(self.scheduler.daemonic, True) def test_configure_prefix(self): global_options = { 'apscheduler.misfire_grace_time': 2, 'apscheduler.daemonic': False } self.scheduler.configure(global_options) eq_(self.scheduler.misfire_grace_time, 2) eq_(self.scheduler.daemonic, False) def test_add_listener(self): val = [] self.scheduler.add_listener(val.append) event = SchedulerEvent(EVENT_SCHEDULER_START) self.scheduler._notify_listeners(event) eq_(len(val), 1) eq_(val[0], event) event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN) self.scheduler._notify_listeners(event) eq_(len(val), 2) eq_(val[1], event) self.scheduler.remove_listener(val.append) self.scheduler._notify_listeners(event) eq_(len(val), 2) def test_pending_jobs(self): # Tests that pending jobs are properly added to the jobs list when # the scheduler is started (and not before!) self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9)) eq_(self.scheduler.get_jobs(), []) self.scheduler.start() jobs = self.scheduler.get_jobs() eq_(len(jobs), 1)
from datetime import date from apscheduler.scheduler import Scheduler # 启动Scheduler sched = Scheduler() sched.start() # 定义被自动调度的函数 def my_job(text): print text # 定义任务的执行时间(2013年5月17日) exec_date = date(2013, 5, 17) # 加入到任务队列,并将其赋值给变量以方便取消等操作 job = sched.add_date_job(my_job, exec_date, ['text']) 2.周期任务: def job_function(): print "Hello World" # job_function将会每两小时执行一次 sched.add_interval_job(job_function, hours=2) # 与上面的任务相同,不过规定在2013-5-17 18:30之后才开始运行 sched.add_interval_job(job_function, hours=2, start_date='2013-5-17 18:30') 装饰器版本: @sched.interval_schedule(hours=2) def job_function():
class SLAAlarmingEngine(): def __init__(self): self.sched = Scheduler() self.sched.start() def _record_action(self, name, data, request_id, target_resource, project_id=None): try: act = Action.from_data(name=name, data=data, request_id=request_id, target_resource=target_resource, project_id=project_id) act.create() return act except Exception as e: LOG.exception(e) return None def _process_resource_alarm(self, ctx, alarm, contract, source, resource_id=None, project_id=None): """ For ceilometer it can be tenant based. For other alarms if it's tenant based it must specify the resource_id in the query""" contract = contract[0] resources = [] project = contract.project_id or project_id # for notification alarms. if resource_id: resources = [resource_id] elif contract.resource_id: resources = [contract.resource_id] if (not resources and isinstance(alarm, cel_alarms.CeilometerAlarm)): time_frame = (alarm.period * alarm.evaluation_period) resources = alarm.affected_resources( period=alarm.period, delta_seconds=time_frame, result_process=filters.FormatResources) affected_contracts = [{'name': contract.name, 'id': contract.id}] id_alarm = alarm.alarm_id or alarm.alarm_track_id failure_id = self._track_failure(id_alarm, resources, contract_names=affected_contracts) if not resources: LOG.warning('No resources found on ResourceAlarm %s' % alarm.alarm_id) return "" actions = [] for x in resources: record = self._record_action(name=contract.action, data=contract.action_options, project_id=project, request_id=failure_id, target_resource=x) if record: actions.append(record) if actions: handler_manager().start_plugins_group(ctx, actions) # WARN: we may want to change state alarm now if it's tenant # scoped, so it get repeated? def _get_alarm_resources(self, alarm, resource_id): if not resource_id: time_frame = (alarm.period * alarm.evaluation_period) # we use the_process_host_down_alarm cache to avoid hosts # processed in the last time # can be dne with filter periods, but we may loose # hosts that we failed to process for other reaons resources = alarm.affected_resources( period=alarm.period, delta_seconds=time_frame, result_process=filters.FormatResources) else: resources = [resource_id] if resources: #penalize if already in cache resources = [ x for x in resources if not utils.get_cache_value(x, penalize=True) ] LOG.debug('Resources after cache check %s' % str(resources)) #resources = ['ubuntu-SVT13125CLS'] if not resources: LOG.warning('no affected resources associated to the alarm ' 'in time frame seconds: %s' % time_frame) return resources def _get_actions_for_contracts(self, failure_id, vms_by_tenant, contracts): # specific contracts # TODO: ActionData should be sent tr rpc and workers splitted spec_contract_actions = {} generic_contract = False for x in contracts: if x.project_id: spec_contract_actions[x.project_id] = (x.action, x.action_options) else: generic_contract = (x.action, x.action_options) actions = [] for prj, action in spec_contract_actions.iteritems(): vms = [x for x in vms_by_tenant.get(prj, [])] for vm in vms: record = self._record_action(name=action[0], data=action[1], request_id=failure_id, target_resource=vm['id']) if record: actions.append(record) vms_by_tenant.pop(prj, None) # may need refactor, need to process twice if generic_contract: for prj, vms in vms_by_tenant.iteritems(): for vm in vms: record = self._record_action(name=generic_contract[0], data=generic_contract[1], request_id=failure_id, target_resource=vm['id']) if record: actions.append(record) return actions def _process_vm_error_alarm(self, ctx, alarm, contracts, source, resource_id=None): """Special alarm ceilometer based. It can be triggered by external systems too if resource_id is included in the query. """ resources = self._get_alarm_resources(alarm, resource_id) if not resources: return affected_contracts = [{'name': x.name, 'id': x.id} for x in contracts] failure_id = self._track_failure(alarm.alarm_id, resources, contract_names=affected_contracts) client = utils.get_nova_client(ctx) vms_by_tenant = {} try: # any particular state? Running only? vms_by_tenant.update(utils.get_nova_vms(client, vms_id=resources)) except Exception as e: LOG.exception(e) return not_scheduled_contracts = [] for contract in contracts: if contract.type == 'MAX_VM_DOWNTIME': self.prepare_scheduled_actions(ctx, contract, failure_id, vms_by_tenant) else: not_scheduled_contracts.append(contract) actions = self._get_actions_for_contracts(failure_id, vms_by_tenant, not_scheduled_contracts) for x in resources: utils.set_cache_value(x) if actions: handler_manager().start_plugins_group(ctx, actions, block=True) def prepare_scheduled_actions(self, ctx, contract, failure_id, vms_by_tenant): if not isinstance(contract.value, int): raise ValueError('Contract value has to be int when the type is ' 'MAX_VM_DOWNTIME') actions = self._get_actions_for_contracts(failure_id, vms_by_tenant, (contract, )) if actions: self.sched.add_date_job(handler_manager().start_plugins_group, datetime.now() + timedelta(seconds=contract.value), args=[ctx, actions], kwargs=dict(block=True)) def _process_host_down_alarm(self, ctx, alarm, contracts, source, resource_id=None): """Special alarm ceilometer based. It can be triggered by external systems too if resource_id is included in the query. """ resources = self._get_alarm_resources(alarm, resource_id) if not resources: return affected_contracts = [{'name': x.name, 'id': x.id} for x in contracts] failure_id = self._track_failure(alarm.alarm_id, resources, contract_names=affected_contracts) client = utils.get_nova_client(ctx) vms_by_tenant = {} # WARN; if fails and filtered by statistics we may never act # again on host we think we did... for host in resources: try: # any particular state? Running only? vms_by_tenant.update(utils.get_nova_vms(client, host=host)) except Exception as e: LOG.exception(e) continue actions = self._get_actions_for_contracts(failure_id, vms_by_tenant, contracts) for x in resources: utils.set_cache_value(x) if actions: handler_manager().start_plugins_group(ctx, actions, block=True) def alert(self, ctx, alarm_id, source=None, contract_id=None, resource_id=None, project_id=None): if contract_id: alarm = alarm_manager.get_by_contract_id(ctx, contract_id) contract_ids = [contract_id] else: alarm = alarm_manager.get_by_id(ctx, alarm_id) contract_ids = AlarmTrack.get_contracts_by_alarm_id(alarm_id) contracts = [] for x in contract_ids: try: contracts.append(sla_contract.get_by_contract_id(x)) except exc.NotFoundException: pass if not contracts: raise exc.NotFoundException('No contracts or alarms found') if alarm.type == SLA_TYPES['HOST_DOWN']['alarm']: return self._process_host_down_alarm(ctx, alarm, contracts, source, resource_id=resource_id) elif alarm.type == SLA_TYPES['VM_ERROR']['alarm']: return self._process_vm_error_alarm(ctx, alarm, contracts, source, resource_id=resource_id) else: return self._process_resource_alarm(ctx, alarm, contracts, source, resource_id=resource_id, project_id=project_id) def _track_failure(self, alarm_id, data, contract_names=None): failure = failure_track() failure.alarm_id = alarm_id if data: failure.data = jsonutils.dumps(data) if contract_names: failure.contract_names = jsonutils.dumps(contract_names) failure.create() return failure.id @classmethod def track_failure_get_all(cls, start_date=None, end_date=None): failures = [ failure.to_dict() for failure in failure_track.get_all(start_date, end_date) ] return failures def track_failure_create(self, ctx, track_failure_dict): self._track_failure(track_failure_dict['created_at'], track_failure_dict['alarm_id'], track_failure_dict['data'])
class GPCAlgGlobFSM(): def __init__(self, debugFlag=False, configFile = None): #SM FSM specific initialization self._fsm = GPCAlgGlobProc_sm(self) self._fsm.setDebugFlag(debugFlag) self.MPCAlgo = {'Active':None} self.logger = logging.getLogger("GPCAlgGlobProc") tmpLogger = logging.getLoggerClass() logging.setLoggerClass(sm_Logger) self._fsm.setDebugStream(logging.getLogger("GPCAlgGlobProc.fsm")) logging.setLoggerClass(tmpLogger) self.eventDeque = deque() #APScheduler self.sched = Scheduler() self.configFile = configFile def __del__(self): self.sched.shutdown(wait=False) def start(self): self._fsm.enterStartState() self.sched.start() def initInit(self): self.doInitMemory = {"Count":0, "S0_tUpdate":{'State':None}, "MPCData":{'State':None}, "Config":{'State':None}} def doInit(self): mem = self.doInitMemory mem["Count"] += 1 # Get the GPC Config file if mem["Config"]['State'] != 'Done': conf = mem["Config"] try: if self.configJob.isConfigRead(): conf['State'] = 'Done' except AttributeError: conf['State'] = 'Running' # Get the main "S0_tUpdate" from the SCADA system. if mem["S0_tUpdate"]['State'] == None \ and self.configJob.isConfigRead(): AlgConfVars = AlgData_OPC(variables=["S0_tUpdate",], opcserver = self.config["Tree"]["Global"]["OPCServer"]) AlgConfVars.logger = self.logger mem["S0_tUpdate"]["Data"] = AlgConfVars mem["S0_tUpdate"]['State'] = 'Running' if mem["S0_tUpdate"]['State'] in ['Running']: S0 = mem["S0_tUpdate"] for i in xrange(3): sleep(0.2) S0['Data'].readOPC() if S0['Data'].opcVarsDict["S0_tUpdate"].value not in [None, 0]: self.S0_tUpdate = S0['Data'].opcVarsDict["S0_tUpdate"].value S0['State'] = 'Done' break else: # only if the for loop is not stopped with break (no usable value found) if S0['Data'].opcVarsDict["S0_tUpdate"].isProblem(): S0['Data'].opcVarsDict["S0_tUpdate"]._reset() S0['State'] = 'Running' # Initialize the MPC OPC data objects. if mem["MPCData"]['State'] == None \ and self.configJob.isConfigRead(): # Basic dynamic System variables needed for MPC as input variables self.MPCData = AlgData_OPC(opcserver = self.config["Tree"]["Global"]["OPCServer"]) self.MPCData.logger = self.logger # All MPC related output variables Variables = {'OPC_Group':'MPCOutVariables' } Variables.update(GPC_OutVars) self.MPCOutData = AlgData_OPC(opcserver = self.config["Tree"]["Global"]["OPCServer"], variables = Variables) self.MPCOutData.logger = self.logger self.MPCOutData.readOPC() # Need to read this variable ones because otherwise it will not be usable for writing. # All GPC/MPC State related variables Variables = {'OPC_Group':'GPCStateVariables' } Variables.update(GPC_StateVars) self.MPCStateData = AlgData_OPC(opcserver = self.config["Tree"]["Global"]["OPCServer"], variables = Variables) self.MPCStateData.logger = self.logger mem["MPCData"]['State'] = 'Done' def doInitRTrigParam(self): self.RTrig = ReadTrigger( S0_tUpdate=self.S0_tUpdate, opcserver=self.config["Tree"]["Global"]["OPCServer"], test=True) self.RTrig.setLogger(self.logger) self.RTrig.CTimeperiod = self.config["Tree"]["MPC_Opti"]["ControlTimeperiod"] def doUpdateRTrigParam(self,dt): """Update trigger detection parameter to get a more precise identification of the positive slope instance.""" self.RTrig.updateTrigParam() self.logger.debug( "Trigger: lastDT=%s,DT=%s" % (self.RTrig.lastDT,self.RTrig.DT) ) def doUpdateConfig(self,conf): self.config = dict(zip(("Tree","Valid"),conf)) MPCmode = self.config["Tree"]["MPC"]["mode"] if isinstance(self.MPCAlgo['Active'], MPCAlgos.__dict__[MPCmode]): self.MPCAlgo['Active'].updateConf(self.config['Tree']["MPC_"+MPCmode]) def doRTrigInit(self): self.RTrig.jobRuns = 0 DT = self.RTrig.DT NextRT = self.RTrig.getNextRT().replace(tzinfo=None) max_runs = self.RTrig.getMaxRuns() #Debug-GSc: test max_runs = 4 self.RTrig.job = self.sched.add_interval_job(self.jobRTrig, seconds = DT, start_date = NextRT, max_runs = max_runs, name = "ReadTrigger-Job") self.doRTrigMemory = {"Count":0, "TrigDone":False} def doRTrigStop(self): if self.RTrig.job.compute_next_run_time(datetime.now()): # only un-schedule if the job is still scheduled otherwise scheduler error self.sched.unschedule_job(self.RTrig.job) self.RTrig.job = None def doWTrigInit(self): #for debug reasons: OPC-values are sometimes lost. self.logger.debug( "Log QSoll before trigger:" ) self.MPCOutData.readOPC() if self.isNoOPCWriteTrigger(): self.logger.debug( "WriteTrigger is asked not to be set." ) else: self.WTrig = WriteTrigger(S0_tUpdate=self.S0_tUpdate) self.WTrig.setLogger(self.logger) self.WTrig.job = self.sched.add_interval_job(self.jobWTrig, seconds = self.WTrig.DT, start_date = datetime.now() + timedelta(seconds=0.5), max_runs = 2, name = "WriteTrigger-Job") #GSc-ToDo: start a job that sets and resets the trigger # use self.WTrig.process() # Should be called a maximum of 2x self.WTrig.maxRuns # but only until self.WTrig.state is in ('Reset' or some "Error") # "sched" seems not to be best as Setting process can take several runs (DT 1s) # and reseting the same but in between 10%S0_tUpdate needs to be waited. def doReadOPC(self): #GSc-ToDo: rework this first level checking. Here only completely infeasible situations should lead to "VarsError" #Get first state related information and check it evStr = self.MPCStateData.readOPC() if evStr == None: evStr = self.checkMPCData(self.MPCStateData) #If OK Get the MPC "In" information and check it if evStr == "VarsOK": evStr = self.MPCData.readOPC() if evStr == None: evStr = self.checkMPCData(self.MPCData) if isinstance(evStr, (list,tuple)): evt = dict(zip(("Type","Data"),evStr)) else: evt = {"Type":evStr} self.eventDeque.append(evt) def doUpdateParam(self): #Only update here is GPC is OPC triggered if not self.RTrig.TrigOPC: return #Do update only if changed S0_tUpdate = self.MPCStateData.opcVarsDict["S0.S0_tUpdate"] S0_tUpdateDiff = S0_tUpdate.getDiff() if S0_tUpdateDiff != None and S0_tUpdateDiff.Diff[0] != 0: self.S0_tUpdate = S0_tUpdate.value self.RTrig.updateTrigParam(S0_tUpdate=self.S0_tUpdate) def doWriteOPCInit(self): #GSc-ToDo: Init write process self.logger.debug( "Init writeOPCVars process" ) self.doWriteOPCMemory = {"Count":0,} def doWriteOPC(self): opcResult = self.MPCOutData.writeOPC(allStored=True, toOPC=True) if opcResult in [True, None, []]: self.eventHandler({"Type":"OPCWriteError", "Data":"writeOPC returns: %s" % (opcResult)}) return nbrW = len(opcResult) tfSuccess = [ri[1] == "Success" for ri in opcResult] if not all(tfSuccess): nbrErr = nbrW - sum(tfSuccess) if self.doWriteOPCMemory["Count"] > 3: self.logger.debug( "doWriteOPC after (%s) tries still %s un-successful opc-writeouts\n -> give-up" % \ (self.doWriteOPCMemory["Count"],nbrErr)) self.eventHandler({"Type":"OPCWriteError", "Data":"writeOPC returns: %s" % (opcResult)}) else: self.logger.debug( "doWriteOPC (%s): %s un-successful opc-writeouts" % \ (self.doWriteOPCMemory["Count"],nbrErr)) sleep(0.5) else: self.logger.debug( "doWriteOPC (%s): ends successful" % \ (self.doWriteOPCMemory["Count"],) ) self.doWriteOPCMemory["Count"] += 1 def doCheckSysStates(self): #ToDo-GSc: check the on/off states of the GPC MPCSimu = self.isMPCSimu() if MPCSimu and self.getMPCSimuMode() in ['OPCReadOnly',]: self.eventDeque.append({"Type":"MPCInactif","Data":"OPCReadOnly Mode specified"}) return # Check the life states of all configured actors # SysGPCState = getSysGPCState(self.MPCStateData.opcVarsDict)# Old Life/Autonom based approach SysGPCState = getSysGPCState_StMo(self.MPCStateData.opcVarsDict) if getattr(self, "SysGPCState", None): UpdatedBState = dict([(si,Statei) for si, Statei in SysGPCState.iteritems() if Statei != self.SysGPCState[si]]) else: UpdatedBState = {} self.SysGPCState = SysGPCState #check the life states of actor S0 (SCADA system) if self.SysGPCState['S0'] == 'offline': self.eventHandler({"Type":"MPCImpossible","Data":"S0 is %s" % (self.SysGPCState['S0'],)}) return elif self.SysGPCState['S0'] == 'maintenance': self.eventDeque.append({"Type":"MPCInactif","Data":"S0 Station is in maintenance"}) return if all([zi in ['offline','maintenance'] for si,zi in self.SysGPCState.iteritems() if si not in ['S0','S99']]): self.eventDeque.append({"Type":"MPCInactif","Data":"There is NO Station controllable"}) return else: BModeUdate = getSysBModeUpdate(self.MPCStateData.opcVarsDict) UpdatedBMode = dict([(si,bmi['Mode']) for si, bmi in BModeUdate.iteritems() if bmi['Update']]) if self.MPCAlgo['Active'] != None and (UpdatedBState !={} or UpdatedBMode != {}): updateStruct = False algo = self.MPCAlgo['Active'] if UpdatedBMode != {}: res = algo.readBConfig(toUpdate=UpdatedBMode) #ToDo: check the return value (True False) if network configuration is usable. if res != True: interItem = {"Type":"MPCImpossible", "Data":"Error in reading the current basin configuration: %s" % (res,)} self.eventHandler(interItem) return updateStruct = True MPCmode = self.config["Tree"]["MPC"]["mode"] C_Switch = algo.updateBasinConf(self.SysGPCState,updateStruct=updateStruct) self.handleStateSwitch(C_Switch) else: try: MPCmode = self.config["Tree"]["MPC"]["mode"] try: if not isinstance(self.MPCAlgo['Active'], MPCAlgos.__dict__[MPCmode]): algo = MPCAlgos.__dict__[MPCmode](self.config['Tree']["MPC_"+MPCmode], sysVars=self.MPCData.opcVarsDict, stateVars=self.MPCStateData.opcVarsDict, outVars=self.MPCOutData.opcVarsDict) C_Switch = algo.updateBasinConf(self.SysGPCState) self.handleStateSwitch(C_Switch) self.MPCAlgo['Active'] = algo except KeyError as e: interItem = {"Type":"MPCImpossible", "Data":"MPC-mode related class is missing. %s" % (e,)} self.eventHandler(interItem) return except BaseException as e: interItem = {"Type":"MPCImpossible", "Data":"Error during instantiation of the algo class: %s" % (e,)} self.eventHandler(interItem) return except KeyError as e: interItem = {"Type":"MPCImpossible", "Data":"Error getting MPC-Mode specification: %s" % e} self.eventHandler(interItem) return #ToDo: Handle possible other control approaches that will run only as off-line control self.MPCAlgo['Inactive'] = [] try: for im in self.config["Tree"]["MPC"]["inactiveModes"]: pass except: pass #ToDo-GSc: integrate the AlgInernalSysFSM (S4, ...) # - init it in doInit # - process it here using the self.MPCData.S4_BZ self.logger.debug( "doCheckSysStates(): SysStates: %s; SysModes: %s" % (self.SysGPCState,BModeUdate) ) #Check the SysGPCState again here as it may have changed due to Switching. #If here a basin is in controllable this means it is not controlled by GPC in this cycle. if all([zi in ['offline','maintenance','controllable'] for si,zi in self.SysGPCState.iteritems() if si not in ['S0','S99']]): self.eventDeque.append({"Type":"MPCInactif","Data":"There is NO Station configured for GPC control"}) else: self.eventDeque.append({"Type":"MPCActive"}) def doRunMPC(self): #Initialize the specified MPC mode class object. algo = self.MPCAlgo['Active'] #run MPC try: algo.run(self.MPCData.opcVarsDict, stateVars=self.MPCStateData.opcVarsDict, outVars=self.MPCOutData.opcVarsDict) self.eventDeque.append({"Type":"MPCDone",}) except BaseException as e: interItem = {"Type":"MPCImpossible", "Data":"General MPC-Error: %s" % e} self.eventHandler(interItem) return def doLogMPCResults(self): #get the results and build a log entry res = [vi.wvalue for ki,vi in self.MPCOutData.opcVarsDict.iteritems() if ki.endswith('QSoll') and vi.isWReady()] self.logger.debug("MPC Results: %s" % (res,)) def doResetWriteVars(self): for ki,vi in self.MPCOutData.opcVarsDict.items() + self.MPCStateData.opcVarsDict.items(): if vi.isWReady(): vi._reset() def doWarning(self,msg): pass def doSetGPCOffline(self): self.logger.debug("""====== GPC is Offline ====== The GPC: is now in Offline mode. Only a 'Reset'-Event or a complete GSP-restart are possible in this System state. ============================""") if self.isMPCSimu(): try: DT = self.S0_tUpdate - 2*self.RTrig.gitter DT -= self.S0_tUpdate / self.RTrig.TrigSizePct # This is the sleep time in GPCOffline mode. except: DT = 900 #Debug-GSc: test DT = 40 self.sched.add_date_job( self.jobReset, date = datetime.now() + timedelta(seconds=DT), name = "Reset-Job" ) self.logger.debug("""====!! GPC auto-Reset !!==== The GPC: will be automatically reset at %s ============================""" % (DT,)) def isInitDone(self): #Check all doInitMemory entries for their "State" status state = [si['State'] == 'Done' for si in self.doInitMemory.itervalues() if isinstance(si, dict) and si.has_key('State')] return all(state) def isNotSync(self): return not self.RTrig.isSync() def isNoOPCWrite(self): if not self.isMPCSimu(): return False elif self.getMPCSimuMode() in [None,'NoOPCWrite']: return True return False def isNoOPCWriteTrigger(self): if not self.isMPCSimu(): return False elif self.getMPCSimuMode() in [None,'NoOPCWrite','NoOPCWriteTrigger']: return True return False def isMPCSimu(self): try: MPCSimu = self.config['Tree']['MPC']['simu'] except: MPCSimu = True if MPCSimu: return True return False def isOPCWriteOK(self): if self.MPCOutData.isWAllIdle(): self.logger.debug("isOPCWriteOK == True") return True self.logger.debug("isOPCWriteOK == False") return False def isOPCWriteError(self): if self.MPCOutData.isWAnyProblem(): self.logger.debug("isOPCWriteError == True") return True self.logger.debug("isOPCWriteError == False") return False def logIgnored(self): self._fsm.getDebugStream().write("The latest asked transition was ignored by the StateMashine.") def jobRTrig(self): j = self.RTrig.job self.doRTrigMemory['Count'] += 1 if self.RTrig.getRTrigJob(): self.doRTrigMemory['TrigDone'] = True self.doRTrigMemory['Event'] = {"Type":"TrigOK", "Data":self.RTrig.lastT} else: if not j.compute_next_run_time(datetime.now()): self.doRTrigMemory['TrigDone'] = True self.doRTrigMemory['Event'] = {"Type":"TrigError", "Data":"%s: no next fire time scheduled" % j.name} def jobWTrig(self): if self.WTrig.isInProcess(): cSatate = self.WTrig.state while self.WTrig.state == cSatate: self.WTrig.process() if self.WTrig.state == cSatate: sleep(1) #for debug reasons: OPC-values are sometimes lost. if not self.WTrig.isInProcess(): self.logger.debug( "Log QSoll after trigger:" ) self.MPCOutData.readOPC() if self.WTrig.isJobAlife() and not self.WTrig.isInProcess(): self.sched.unschedule_job(self.WTrig.job) self.WTrig.job = None self.logger.debug("jobWTrigError: job unscheduled due to probable Error WTrig process") #GSC-ToDo: this is not a correct solution because it can leave the system in an incoherent state. def jobReset(self): interItem = {"Type":"Reset"} self.eventHandler(interItem) def checkInitSleep(self): if self.isInitDone(): return False elif self.doInitMemory["Count"] == 0: return False elif self.doInitMemory["Count"] % 3 == 0: return True else: return False def checkMPCData(self,MPCData): #ToDo: Check if all variables of stations that are not "offline" are usable. for k,v in MPCData.opcVarsDict.iteritems(): if not v.usable: return ("VarsError","%s: is not usable"%(k,)) return "VarsOK" def getMPCSimuMode(self): try: MPCSimuMode = self.config['Tree']['MPC']['simuMode'] except: return None return MPCSimuMode def getFSMState(self): if not self._fsm.isInTransition(): cState = self._fsm.getState().getName() FSMState = "%s" % (cState,) ret = {'Trans':None,'State':cState,'Msg':FSMState} else: trans = self._fsm.getTransition() pState = self._fsm.getPreviousState().getName() FSMState = "In Transition: %s from %s" %(trans,pState) ret = {'Trans':trans,'State':pState,'Msg':FSMState} return ret def handleStateSwitch(self,C_Switch): """Handles the switch from Mode 1 (C-abl) -> 2 (C) or 2 (C) -> 1 (C-abl) """ for sti in C_Switch.get('C-abl -> C',[]): vi = "%s.%s_SteuerModus" % (sti,sti) self.MPCStateData.opcVarsDict[vi].setWriteValue(6) self.SysGPCState[sti] = 'controlled' for sti in C_Switch.get('C -> C-abl',[]): vi = "%s.%s_SteuerModus" % (sti,sti) self.MPCStateData.opcVarsDict[vi].setWriteValue(5) self.SysGPCState[sti] = 'controllable' if not self.isNoOPCWrite() and len(C_Switch) > 0: opcResult = self.MPCStateData.writeOPC(allStored=True, toOPC=True) if opcResult in [True, None, []]: self.logger.debug( "Basin StateSwitch error: writeOPC returns: %s" % (opcResult)) else: nbrW = len(opcResult) tfSuccess = [ri[1] == "Success" for ri in opcResult] if not all(tfSuccess): nbrErr = nbrW - sum(tfSuccess) self.logger.debug( "Basin StateSwitch error: writeOPC %s un-successful opc-writeouts" % \ (nbrErr,)) else: self.logger.debug( "Basin StateSwitch: %s; writeOPC: %s" % (C_Switch, opcResult,)) elif self.isNoOPCWrite() and len(C_Switch) > 0: self.logger.debug( "Basin StateSwitch: %s but NoOPCWrite is active" % (C_Switch,) ) def eventHandler(self,evt): evtStr = evt["Type"] if evt.has_key("Data"): evtStr = ';'.join((evtStr,str(evt['Data']))) self._fsm.getDebugStream().write("#%s (%s)\n" % (evtStr,datetime.now())) if evt['Type'] == "DoInit": self._fsm.InitDone() elif evt['Type'] == "InitError": self._fsm.InitError(evt['Data']) elif evt['Type'] == "TrigOK": self._fsm.TrigOK(evt['Data']) elif evt['Type'] == "TrigError": self._fsm.TrigError(evt['Data']) elif evt['Type'] == "VarsOK": self._fsm.VarsOK() elif evt['Type'] == "VarsError": self._fsm.VarsError() elif evt['Type'] == "MPCActive": self._fsm.MPCActive() elif evt['Type'] == "MPCInactif": self._fsm.MPCInactif() elif evt['Type'] == "MPCImpossible": self._fsm.MPCImpossible(evt['Data']) elif evt['Type'] == "MPCDone": self._fsm.MPCDone() elif evt['Type'] == "OPCWrite": self._fsm.OPCWrite(evt.get('Data',None)) elif evt['Type'] == "Reset": self._fsm.Reset() elif evt['Type'] == "Stop": sys.exit(0) else: raise ValueError("Unhandled Event type: %s" % evt)
class OutcallAction: def __init__(self, argument, start_time, duration, is_streamed, warning_time, program, hangup_on_complete): self.__argument = argument self.start_time = start_time self.duration = duration self.__is_streamed = is_streamed self.__warning_time = warning_time self.program = program self.__scheduler = Scheduler() self.__available_calls = dict() self.__call_handler = self.program.radio_station.call_handler self.__phone_status = PhoneStatus.QUEUING self.__interested_participants = Set([]) self.__hangup_on_complete = hangup_on_complete self.__in_talkshow_setup = True def start(self): self.program.set_running_action(self) self.request_host_call() self.__scheduler.start() self.__community_call_UUIDs = dict() self.__call_handler.register_for_incoming_calls(self) self.__call_handler.register_for_incoming_dtmf(self, str(self.__argument)) def pause(self): self.__hold_call() def stop(self): self.hangup_call() def request_host_call(self): self.__in_talkshow_setup = True result = self.__call_handler.call(self, self.__argument, None, None, self.duration) print "result of host call is " + result def request_station_call(self): #call the number specified thru plivo result = self.__call_handler.call( self, self.program.radio_station.station.transmitter_phone.number, 'play', self.__argument, self.duration) print "result of host call is " + str(result) def notify_call_answered(self, answer_info): if self.__argument not in self.__available_calls: self.__available_calls[answer_info['Caller-Destination-Number'] [-10:]] = answer_info #self.request_station_call() self.__inquire_host_readiness() else: #This notification is from answering the host call self.__available_calls[answer_info['Caller-Destination-Number'] [-10:]] = answer_info result1 = self.__schedule_warning() result2 = self.__schedule_hangup() self.__call_handler.register_for_call_hangup( self, answer_info['Caller-Destination-Number'][-10:]) def warn_number(self): seconds = self.duration - self.__warning_time if self.__argument in self.__available_calls and 'Channel-Call-UUID' in self.__available_calls[ self.__argument]: result = self.__call_handler.play( self.__available_calls[self.__argument]['Channel-Call-UUID'], '/home/amour/media/call_warning.mp3') print "result of warning is " + result def __pause_call(self): #hangup and schedule to call later self.__schedule_host_callback() self.hangup_call() def notify_call_hangup(self, event_json): if 'Caller-Destination-Number' in event_json and event_json[ 'Caller-Destination-Number'] in self.__community_call_UUIDs: #a community caller is hanging up del self.__community_call_UUIDs[ event_json['Caller-Destination-Number']] self.__call_handler.deregister_for_call_hangup( self, event_json['Caller-Destination-Number']) else: #It is a hangup by the station or the host self.hangup_call() #clean this later def __inquire_host_readiness(self): self.__call_handler.play( self.__available_calls[self.__argument]['Channel-Call-UUID'], '/home/amour/media/inquire_host_readiness.mp3') def __hold_call(self): #put ongoing call on hold print "We should be holding now" def hangup_call(self): #hangup the ongoing call for available_call in self.__available_calls: self.__call_handler.deregister_for_call_hangup( self, available_call) self.__call_handler.hangup( self.__available_calls[available_call]['Channel-Call-UUID']) #del self.__available_calls[available_call] #print "result of hangup is " + result self.__available_calls = dict( ) #empty available calls. they all are hung up def notify_incoming_dtmf(self, dtmf_info): dtmf_json = dtmf_info dtmf_digit = dtmf_json["DTMF-Digit"] if dtmf_digit == "1" and self.__in_talkshow_setup: self.request_station_call() self.__in_talkshow_setup = False elif dtmf_digit == "2" and self.__in_talkshow_setup: #stop the music, put this live on air self.hangup_call() self.__in_talkshow_setup = False elif dtmf_digit == "3": #put the station =in auto_answer if self.__phone_status != PhoneStatus.ANSWERING: self.__phone_status = PhoneStatus.ANSWERING self.__call_handler.play( self.__available_calls[ self.__argument]['Channel-Call-UUID'], '/home/amour/media/incoming_auto_answer.mp3') else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.play( self.__available_calls[ self.__argument]['Channel-Call-UUID'], '/home/amour/media/incoming_reject.mp3') elif dtmf_digit == "4": #disable auto answer, reject and record all incoming calls if self.__phone_status != PhoneStatus.QUEUING: self.__phone_status = PhoneStatus.QUEUING self.__call_handler.play( self.__available_calls[ self.__argument]['Channel-Call-UUID'], '/home/amour/media/incoming_queued.mp3') else: self.__phone_status = PhoneStatus.REJECTING self.__call_handler.play( self.__available_calls[ self.__argument]['Channel-Call-UUID'], '/home/amour/media/incoming_reject.mp3') elif dtmf_digit == "5": #dequeue and call from queue of calls that were rejected for caller in self.__interested_participants: result = self.__call_handler.call(self, caller, None, None, self.duration) print "result of participant call is {0}".format(str(result)) self.__community_call_UUIDs[caller] = result.split(" ")[-1] self.__call_handler.register_for_call_hangup(self, caller) self.__interested_participants.discard(caller) return elif dtmf_digit == "6": #terminate the current caller for community_call_UUID in self.__community_call_UUIDs: self.__call_handler.hangup( self.__community_call_UUIDs[community_call_UUID]) pass elif dtmf_digit == "7": #Take a 5 min music break self.__pause_call() def notify_incoming_call(self, call_info): if self.__phone_status == PhoneStatus.ANSWERING: #answer the phone call, join it to the conference if len(self.__community_call_UUIDs) == 0: self.__call_handler.bridge_incoming_call( call_info['Channel-Call-UUID'], self) self.__call_handler.register_for_call_hangup( self, call_info['Caller-Destination-Number']) self.__community_call_UUIDs[ call_info['Caller-Destination-Number']] = call_info[ 'Channel-Call-UUID'] elif self.__phone_status == PhoneStatus.QUEUING: #Hangup the phone, call back later self.__interested_participants.add(call_info['Caller-ANI']) self.__call_handler.play( self.__available_calls[self.__argument]['Channel-Call-UUID'], '/home/amour/media/incoming_new_caller.mp3') print self.__interested_participants self.__call_handler.hangup(call_info['Channel-Call-UUID']) elif self.__phone_status == PhoneStatus.REJECTING: #Hangup the call self.__call_handler.hangup(call_info['Channel-Call-UUID']) def __schedule_host_callback(self): time_delta = timedelta(seconds=600) #one minutes now = datetime.utcnow() callback_time = now + time_delta #self.__scheduler.add_date_job(getattr(self,'call_host_number'), callback_time) self.__scheduler.add_date_job(getattr(self, 'request_host_call'), callback_time) def __schedule_warning(self): time_delta = timedelta(seconds=self.__warning_time) now = datetime.utcnow() warning_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'warn_number'), warning_time) def __schedule_hangup(self): time_delta = timedelta(seconds=self.duration) now = datetime.utcnow() hangup_time = now + time_delta self.__scheduler.add_date_job(getattr(self, 'hangup_call'), hangup_time)
turn_on() else: if state == "on": state = "off" print("Off") turn_off() sleep(1) def loop(): while True: sleep(1000) if on_pi: b = sched.add_date_job(start_up, datetime.now() + timedelta(seconds=2)) else: b = sched.add_date_job(loop, datetime.now() + timedelta(seconds=2)) def event_stream(): temp = tempdata() temp /= 1000 print(temp) yield 'event: temp\n' + 'data: ' + str(temp) + '|' + str(target/1000) + '\n\n' try: @app.route('/my_event_source') def sse_request(): return Response(event_stream(), mimetype='text/event-stream')
class AlarmManager(object): def __init__(self, get_plugins, read_bedsensor): self.get_plugins = get_plugins self.read_bedsensor = read_bedsensor self.sched = Scheduler(daemonic=True) self.sched.start() self.sched.add_interval_job(self.check_events, hours=1) self.check_events() def check_events(self): """Gets called every hour and after an alarm ended to collect, process and schedule the next event. """ logging.getLogger(__name__).info("Checking for events.") events = [] for event in self.collect_events(): events.append(event) if events: events.sort() now = datetime.now() next_event_collection = now + timedelta(hours=1) for event in events: # if alarm_time already passed skip if event.get_alarm_time() < now: continue # adjust alarm_time self.process_event(event) # if alarm_time is after next event collection stop scheduling # events till next event collection if event.get_alarm_time() > next_event_collection: return self.schedule_alarm(event) def collect_events(self): """Collects events from all eventcollector plugins. Returns a list of events. Shedules an alarm if it's alarm_time is before the next event collection. """ logging.getLogger(__name__).info("Collecting events.") for plugin in self.get_plugins("eventcollector"): for event in plugin.collect(): yield event def process_event(self, event): """Passes an Event through every eventprocessor plugin to make modifications. """ for plugin in self.get_plugins("eventprocessor"): event = plugin.process(event) return event def schedule_alarm(self, event): """Shedules an alarm if alarm_time hasn't passed yet.""" # if alarm_time hasn't passed schedule alarm if event.get_alarm_time() > datetime.now(): alarm_time = event.get_alarm_time() logging.info("Sheduling alarm for {0}.".format(alarm_time)) self.sched.add_date_job(self.begin_alarms, alarm_time, [event]) self.sched.add_date_job(self.end_alarms, event.end_time) def bedsensor_changed(self, state): # pragma: no cover if state == Bedsensor.PRESSED: self.play_alarms() else: self.pause_alarms() def begin_alarms(self, event): logging.info("Begining alarms.") for plugin in self.get_plugins("alarm"): plugin.begin(event) if self.read_bedsensor() == Bedsensor.PRESSED: self.play_alarms() def play_alarms(self): logging.info("Bedsensor is pressed, playing alarms.") for plugin in self.get_plugins("alarm"): plugin.play() def pause_alarms(self): logging.info("Bedsensor is released, pausing alarms.") for plugin in self.get_plugins("alarm"): plugin.pause() def end_alarms(self): logging.info("Ending alarms.") for plugin in self.get_plugins("alarm"): plugin.end() def interval_alarms(self): for plugin in self.get_plugins("alarm"): plugin.interval() def test_alarm(self): # pragma: no cover event = Event( name="Some Event", reminder=timedelta(hours=0), start_time=datetime.now() + timedelta(seconds=2), duration=timedelta(seconds=30)) self.schedule_alarm(event) self.sched.add_date_job( self.play_alarms, event.get_alarm_time() + timedelta(seconds=1))
class HMScheduler( Base ): ''' The HMSceduler is used to periodically to send messages to HouseMonitor. The commands can anything including: # Report status # Turn on and off devices. You control the scheduler by sending messages to the scheduler using pubsub. ''' ''' The queue that is used to send messages to the rest of the system. ''' __input_queue = None ''' The scheduler object ''' scheduler = None ''' A dictionary of the current jobs that are running ''' jobs = defaultdict( list ) previous_datetime = datetime.utcnow() def __init__( self, queue ): ''' Initialize the MHScheduler. # Store the queue into __input_queue # Associate **add_interval** with Constants.TopicNames.SchedulerAddIntervalStep # Associate **add_cron** with Constants.TopicNames.SchedulerAddCronStep # Associate **add_date** with Constants.TopicNames.SchedulerAddDateStep # Associate **add_one_shot with Constants.TopicNames.SchedulerAddOneShotStepSchedulerAddOneShotStep # Associate **delete_job** with Constants.TopicNames.SchedulerDeleteJob ''' super( HMScheduler, self ).__init__() self.__input_queue = queue pub.subscribe( self.add_interval, Constants.TopicNames.SchedulerAddIntervalStep ) pub.subscribe( self.add_cron, Constants.TopicNames.SchedulerAddCronStep ) pub.subscribe( self.add_date, Constants.TopicNames.SchedulerAddDateStep ) pub.subscribe( self.add_one_shot, Constants.TopicNames.SchedulerAddOneShotStep ) pub.subscribe( self.deleteJob, Constants.TopicNames.SchedulerDeleteJob ) pub.subscribe( self.print_jobs, Constants.TopicNames.SchedulerPrintJobs ) @property def scheduler_topic_name( self ): ''' The topic name to which this routine subscribes.''' return Constants.TopicNames.SchedulerStep @property def logger_name( self ): ''' Set the logger level. ''' return Constants.LogKeys.Scheduler def start( self ): ''' Start the Scheduler. For more information on the parameter see: .. seealso:: http://packages.python.org/APScheduler/#starting-the-scheduler ''' self.logger.debug( 'Scheduler starting' ) self.scheduler = Scheduler() # self.logger.debug( 'Setting jobstore to HouseMonitor.db' ) # self.scheduler.add_jobstore(ShelveJobStore('HouseMonitor.db'), 'shelve') self.scheduler.start() name = 'scheduled status check' device = 'status' port = 'scheduler' listeners = [Constants.TopicNames.Statistics, Constants.TopicNames.CurrentValueStep] scheduler_id = str( uuid.uuid4() ) args = name, device, port, listeners, scheduler_id self.scheduler.add_interval_job( self.sendCommand, minutes=10, args=args ) name = 'uptime' device = 'HouseMonitor' port = 'uptime' listeners = [Constants.TopicNames.UpTime, Constants.TopicNames.CurrentValueStep] scheduler_id = str( uuid.uuid4() ) args = name, device, port, listeners, scheduler_id self.scheduler.add_interval_job( self.sendCommand, seconds=5, args=args ) name = 'Pulse' device = '0x13a20040902a02' port = 'DIO-0' listeners = [ Constants.TopicNames.StatusPanel_SystemCheck, Constants.TopicNames.ZigBeeOutput] scheduler_id = str( uuid.uuid4() ) args = name, device, port, listeners, scheduler_id self.scheduler.add_interval_job( self.sendCommand, seconds=5, args=args ) def add_interval( self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, args=None, kwargs=None ): ''' Schedule an interval at which sendCommand will be called. For more information on the parameter see: .. seealso:: http://packages.python.org/APScheduler/intervalschedule.html :param name: the name of the job to start. This will be used to identify the job if there is a need to delete it latter. :type name: str :param weeks: the number of weeks between calls. :type weeks: int :param days: the number of days between calls. :type days: int :param hours: the number of hours between calls. :type hours: int :param minutes: the number of minutes between calls. :type minutes: int :param seconds: the number of seconds between calls. :type seconds: int :param start_date: the time and date to start the interval. :type start_date: datetime :param args: the args to pass to sendCommand :param kwargs: the kwargs to pass to sendCommand :raises: None ''' name = args[0] self.logger.debug( 'interval ({}) add {} {} {} {} {} {} {}'.format( name, weeks, days, hours, hours, minutes, seconds, start_date ) ) token = self.scheduler.add_interval_job( self.sendCommand, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, start_date=start_date, args=args, kwargs=kwargs, name=name ) self.jobs[name].append( token ) def add_cron( self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, args=None, kwargs=None ): ''' Schedule a cron command to call sendCommand. For more information on the parameter see: .. seealso:: http://packages.python.org/APScheduler/cronschedule.html :param name: the name of the cron job to start. This will be used to identify the job if there is a need to delete it latter. :type weeks: str :param weeks: the number of weeks between calls. :type weeks: int :param days: the number of days between calls. :type days: int :param hours: the number of hours between calls. :type hours: int :param minutes: the number of minutes between calls. :type minutes: int :param seconds: the number of seconds between calls. :type seconds: int :param start_date: the time and date to start the interval. :type start_date: datetime :param args: the args to pass to sendCommand :param kwargs: the kwargs to pass to sendCommand :raises: None ''' name = args[0] self.logger.debug( 'set cron({}) at {}/{}/{} {}:{}:{} {} {} {}'.format( name, year, month, day, hour, minute, second, week, day_of_week, start_date ) ) token = self.scheduler.add_cron_job( self.sendCommand, year=year, month=month, day=day, week=week, day_of_week=day_of_week, hour=hour, minute=minute, second=second, start_date=start_date, args=args, kwargs=kwargs ) self.jobs[name].append( token ) def add_date( self, date, args, **kwargs ): ''' Schedule a specific data and time to call sendCommand. For more information on the parameter see: .. seealso:: http://packages.python.org/APScheduler/dateschedule.html :param name: the name of the cron job to start. This will be used to identify the job if there is a need to delete it latter. :type weeks: str :param date: Set the time to call sendCommand :type date: datetime :param args: the arguments to call sendCommand with :type weeks: tuple :param date: the kwwargs to call sendCommand with :type date: dictionary ''' name = args[0] self.logger.debug( 'add date({}) at {}'.format( name, date ) ) token = self.scheduler.add_date_job( self.sendCommand, date=date, args=args, kwargs=kwargs ) self.jobs[name].append( token ) def add_one_shot( self, delta, args=None, kwargs=None ): ''' Schedule sendCommand to be called after some interval. (ie. in 5 seconds or one hour). For more information on timeDelta see: .. seealso:: http://docs.python.org/2/library/datetime.html#timedelta-objects :param name: delta the time until sendCommand is called :type weeks: timedelta :param date: Set the time to call sendCommand :type date: datetime :param args: the arguments to call sendCommand with :type weeks: tuple :param date: the kwwargs to call sendCommand with :type date: dictionary ''' name = args[0] now = GetDateTime() dt = now.datetime() dt = dt + delta token = self.scheduler.add_date_job( self.sendCommand, date=dt, name=name, args=args, kwargs=kwargs ) self.jobs[name].append( token ) def deleteJob( self, name ): ''' Delete a specified job :param name: the name of the job to delete. :type weeks: str ''' item = None if name in self.jobs: for number, item in enumerate( self.jobs[name] ): try: self.scheduler.unschedule_job( item ) except KeyError: pass self.logger.info( '{} "{}" removed from scheduler'.format( number, name ) ) self.jobs[name] = [] def shutdown( self, wait=True ): ''' shutdown the scheduler .. seealso: http://packages.python.org/APScheduler/#shutting-down-the-scheduler :param wait: determines whether to wait on threads to commplete. :type wait: boolean ''' if ( self.scheduler != None ): self.scheduler.shutdown( wait=wait ) self.scheduler = None def print_jobs( self ): ''' print tye currently scheduled jobs .. seealso: http://packages.python.org/APScheduler/#getting-a-list-of-scheduled-jobs ''' self.scheduler.print_jobs() def sendCommand( self, name, device, port, listeners=[], scheduler_id=str( uuid.uuid4() ) ): """ send command will send the cammand to the HouseMonitor system :param device: the device name. :type device: str :param port: the port name. :type days: str :param listeners: the listeners that this command will be routed to. :type listeners: list of strings that contains the topic name of the listeners. Most can be found in Constants.TopicNames """ try: data = { Constants.EnvelopeContents.VALUE: 1, Constants.EnvelopeContents.DEVICE: device, Constants.EnvelopeContents.PORT: port, Constants.EnvelopeContents.SCHEDULER_ID: scheduler_id, Constants.EnvelopeContents.ARRIVAL_TIME: datetime.utcnow(), Constants.EnvelopeContents.STEPS: copy.copy( listeners ), Constants.EnvelopeContents.NAME: name, } de = DataEnvelope( Constants.EnvelopeTypes.STATUS, **data ) self.logger.debug( 'name: {} listeners: {} scheduler_id: {}'. format( name, listeners, data[Constants.EnvelopeContents.STEPS] ) ) self.__input_queue.transmit( de, self.__input_queue.LOW_PRIORITY ) except Exception as ex: self.logger.exception( "Exception in SendCommand: {}".format( ex ) )
# -*- coding:utf-8 -*- from apscheduler.scheduler import Scheduler from datetime import datetime sched = Scheduler() def some_job(): print 'hello world' job = sched.add_date_job(some_job,'2013-01-22 18:32:50') sched.start()
class RadioProgram: def __init__(self, db, program, radio_station): self.__program_actions = [] self.id = program.id self.__db = db self.name = program.id self.__program = program self.radio_station = radio_station self.__scheduler = Scheduler() self.__running_action = None return ''' Starts a station program and does the necessary preparations ''' def start(self): self.__load_program_actions() self.__schedule_program_actions() self.__scheduler.start() return ''' Load the definition of components of the program from a JSON definition ''' def __load_program_actions(self): print self.__program.program.description data = json.loads(self.__program.program.description) for category in data: if category == "Jingle": for action in data[category]: self.__program_actions.append(JingleAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Jingle scheduled to start at " + str(record["start_time"]) if category == "Media": for action in data[category]: self.__program_actions.append(MediaAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Media Scheduled to start at " + str(action["start_time"]) if category == "Interlude": for action in data[category]: self.__program_actions.append(InterludeAction(action["argument"], action["start_time"], action["duration"], action["is_streamed"], self, action["hangup_on_complete"])) print "Interlude Scheduled to start at " + str(action["start_time"]) if category == "Stream": #self.__program_actions.add(JingleAction(j['argument'])) print "Stream would have started here" if category == "Music": #self.__program_actions.add(MediaAction(j['argument'])) print "This would have started here" if category == "Outcall": for action in data[category]: print "Call to host scheduled to start at " + str(action["start_time"]) self.__program_actions.append(OutcallAction(action['argument'],action["start_time"], action['duration'], action['is_streamed'], action['warning_time'],self, action["hangup_on_complete"]) ) return ''' Schedule the actions of a particular program for playback within the program ''' def __schedule_program_actions(self): for program_action in self.__program_actions: self.__scheduler.add_date_job(getattr(program_action,'start'), self.__get_start_datetime(program_action.start_time).replace(tzinfo=None)) def set_running_action(self, running_action): if not self.__running_action == None: self.__running_action.stop()#clean up any stuff that is not necessary anymore self.__running_action = running_action ''' Get the time at which to schedule the program action to start ''' def __get_start_datetime(self, time_part): t = datetime.strptime(time_part, "%H:%M:%S") time_delta = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second) return self.__program.start + time_delta
class DawnduskAPI: """ dawndusk API """ def __init__(self, lgt, lat, use_cron, myxpl, log): """ Init the dawndusk API @param lgt : longitude of the observer @param lat : latitude of the observer """ self.use_cron = use_cron self.log = log self.myxpl = myxpl if self.use_cron == False: self._scheduler = Scheduler() self._scheduler.start() else: self._cronquery = CronQuery(self.myxpl, self.log) self.mycity = ephem.Observer() self.mycity.lat, self.mycity.lon = lat, lgt self.mycity.horizon = '-6' self.job = None self.job_test_dawn = None self.job_test_dusk = None def __del__(self): """ Kill the dawndusk API @param lgt : longitude of the observer @param lat : latitude of the observer """ if self.use_cron == True: self._cronquery.halt_job("dawndusk") self._cronquery.halt_job("dawn-test") self._cronquery.halt_job("dusk-test") else: self._scheduler.shutdown() def sched_add(self, sdate, cb_function, label): """ Add an event in the schedulered tasks @param sdate : the date of the event @param cb_function : the callback function to call @param : the label of the event """ self.log.debug("dawndusk.schedAdd : Start ... %s" % label) if self.use_cron == False: if label == "dawn" or label == "dusk": self.job = self._scheduler.add_date_job(cb_function, \ sdate, args = [label]) self.log.debug("dawndusk.schedAdd : Use internal cron \ for %s" % label) elif label == "dawn-test": self.job_test_dawn = self._scheduler.add_date_job\ (cb_function, sdate, args = ["dawn"]) self.log.debug("dawndusk.schedAdd : Use internal cron \ for %s" % "dawn") elif label == "dusk-test": self.job_test_dusk = self._scheduler.add_date_job\ (cb_function, sdate, args = ["dusk"]) self.log.debug("dawndusk.schedAdd : Use internal cron \ for %s" % "dusk") for i in self._scheduler.get_jobs(): self.log.debug("APScheduler : %-10s | %8s" % \ (str(i.trigger), i.runs)) else: self.log.debug("dawndusk.schedAdd : Use external cron ...") if label == "dawn" or label == "dusk": device = "dawndusk" elif label == "dawn-test": device = "dawn-test" elif label == "dusk-test": device = "dusk-test" if self._cronquery.status_job(device, extkey = "current") \ != "halted": self._cronquery.halt_job(device) self.log.debug("dawndusk.schedAdd : Halt old device") nstmess = XplMessage() nstmess.set_type("xpl-trig") nstmess.set_schema("dawndusk.basic") nstmess.add_data({"type": "dawndusk"}) if label == "dawn": nstmess.add_data({"status": "dawn"}) elif label == "dusk": nstmess.add_data({"status": "dusk"}) elif label == "dawn-test": nstmess.add_data({"status": "dawn"}) elif label == "dusk-test": nstmess.add_data({"status": "dusk"}) if self._cronquery.start_date_job(device, nstmess, sdate): self.log.debug("dawndusk.schedAdd : External cron activated") self.log.debug("dawndusk.schedAdd : Done :)") else: self.log.error("dawndusk.schedAdd : Can't activate \ external cron") self.log.debug("dawndusk.schedAdd : Done :(") return False self.log.info("Add a new event of type %s at %s" % (label, sdate)) return True def get_next_dawn(self): """ Return the date and time of the next dawn @return : the next dawn daytime """ self.mycity.date = datetime.datetime.today() dawn = ephem.localtime(self.mycity.next_rising(ephem.Sun(), \ use_center = True)) return dawn def get_next_dusk(self): """ Return the date and time of the dusk @return : the next dusk daytime """ self.mycity.date = datetime.datetime.today() dusk = ephem.localtime(self.mycity.next_setting(ephem.Sun(), \ use_center = True)) return dusk def get_next_fullmoon_dawn(self): """ Return the date and time of the next dawn and dusk of the next fullmoon @return : the next dawn daytime """ self.mycity.date = self._get_next_fullmoon() dawn = ephem.localtime(self.mycity.next_rising(ephem.Moon(), \ use_center = True)) dusk = ephem.localtime(self.mycity.next_setting(ephem.Moon(), \ use_center = True)) if dawn > dusk: dawn = ephem.localtime(self.mycity.previous_rising(ephem.Moon(), \ use_center = True)) return dawn def get_next_fullmoon_dusk(self): """ Return the date and time of the dusk of the next fullmoon @return : the next dusk daytime """ self.mycity.date = self._get_next_fullmoon() dusk = ephem.localtime(self.mycity.next_setting(ephem.Moon(), \ use_center = True)) return dusk def get_next_fullmoon(self): """ Return the date and time of the next fullmoon @return : the next full moon daytime """ dusk = ephem.localtime(self._get_next_fullmoon()) return dusk def _get_next_fullmoon(self): """ Return the date and time of the next full moon @return : the next full moon daytime """ now = datetime.datetime.today() nextfullmoon = ephem.next_full_moon(now) return nextfullmoon
class LocalPrefixDiscovery: #private: __slots__ = [ "_subscribers", "_currentPrefix", "_needStopDiscovery", "_scheduler", "_timeouts", "_face", "_evenLoop", "_eventLoopThread", "_periodicity" ] # public: def __init__(self, periodicity=30): # 30 seconds self._periodicity = periodicity self._subscribers = {} self._currentPrefix = Name() self._needStopDiscovery = True self._scheduler = Scheduler() self._scheduler.start() self._timeouts = 0 self._face = Face() self._eventLoop = Face.EventLoop(self._face) def subscribe(self, tag, callback): self._subscribers[tag] = callback if (len(self._subscribers.values()) == 1): self._start() def unsubscribe(self, tag): del self._subscribers[tag] if (len(self._subscribers.values()) == 0): self._stop() def shutdown(self): self._stop() self._scheduler.shutdown() #private: def _start(self): self._needStopDiscovery = False self._eventLoopThread = threading.Thread( target=self._pyndn_loop_ignoring_errors) self._eventLoopThread.start() nextDiscovery = datetime.datetime.now() + datetime.timedelta(seconds=1) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) def _stop(self): self._needStopDiscovery = True self._eventLoop.stop() self._eventLoopThread.join() def _pyndn_loop_ignoring_errors(self): while not self._needStopDiscovery: try: self._eventLoop.run() except: if not self._needStopDiscovery: self._face.disconnect() time.sleep(self._periodicity) self._face.connect() def _requestLocalPrefix(self): self._timeouts = 0 self._face.expressInterestForLatest(Name("/local/ndn/prefix"), self._onLocalPrefix, self._onTimeout) def _onLocalPrefix(self, baseName, interest, data, kind): try: name = Name(str(data.content).strip(' \t\n\r')) except: pass if (name != self._currentPrefix): for subscriber in self._subscribers.values(): subscriber(self._currentPrefix, name) self._currentPrefix = name if not self._needStopDiscovery: nextDiscovery = datetime.datetime.now() + datetime.timedelta( seconds=self._periodicity) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) return Closure.RESULT_OK def _onTimeout(self, baseName, interest): if self._timeouts < 3: self._timeouts = self._timeouts + 1 return Closure.RESULT_REEXPRESS else: # do stuff name = Name() if (name != self._currentPrefix): for subscriber in self._subscribers.values(): subscriber(self._currentPrefix, name) self._currentPrefix = name if not self._needStopDiscovery: nextDiscovery = datetime.datetime.now() + datetime.timedelta( seconds=self._periodicity) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) return Closure.RESULT_OK
class HypeScheduler(object): """Wraps APScheduler with some conveniences.""" def __init__(self, local_tz: str = None): """Constructor. Args: local_tz: The local timezone the scheduler is running in. """ self._scheduler = Scheduler() self._local_tz = local_tz self.StartScheduler() def StartScheduler(self): if self._scheduler and not self._scheduler.running: self._scheduler.start() def InSeconds(self, seconds: int, fn: Callable, *args, **kwargs) -> Job: """Schedule function to run in given seconds. Args: seconds: How many seconds to wait before scheduling function. fn: Function to call. *args: Arguments to pass to function. **kwargs: Keyworded arguments to pass to function. Returns: APScheduler Job. """ schedule_time = arrow.now().shift(seconds=seconds) # APScheduler 2.1.2 doesn't understand timezones. return self._scheduler.add_date_job(fn, schedule_time.naive, args=args, kwargs=kwargs) def DailyCallback(self, schedule_time: arrow.Arrow, fn: Callable, *args, **kwargs) -> Job: """Schedules fn to be run once a day at schedule_time. The actual scheduled time is perturbed randomly +/-30s unless the kwarg '_jitter' is set to False. Args: schedule_time: An Arrow object specifying when to run fn. fn: The function to be run. *args: Arguments to pass to fn. **kwargs: Keyworded arguments to pass to fn. Special kwargs listed below: _jitter - {int} How many seconds to perturb scheduling time by, in both directions. Defaults to 30s. Returns: APScheduler Job. """ if self._local_tz: schedule_time = schedule_time.to(self._local_tz) jitter = kwargs.get('_jitter', 30) if jitter: jitter_secs = random.randint(-jitter, jitter) schedule_time = schedule_time.shift(seconds=jitter_secs) kwargs.pop('_jitter', None) # APScheduler 2.1.2 doesn't understand timezones. return self._scheduler.add_interval_job(fn, args=args, kwargs=kwargs, start_date=schedule_time.naive, days=1) def FixedRate(self, initial_delay: int, period: int, fn: Callable, *args, **kwargs) -> Job: """Schedules a recurring task at a fixed rate. Args: initial_delay: Seconds to wait before scheduling first instance. period: Interval in seconds between subsequent instances. fn: The function to run. *args: Arguments to pass to fn. **kwargs: Keyworded arguments to pass to fn. Returns: APScheduler Job. """ start_time = arrow.now().shift(seconds=initial_delay) # APScheduler 2.1.2 doesn't understand timezones. return self._scheduler.add_interval_job(fn, args=args, kwargs=kwargs, start_date=start_time.naive, seconds=period) def UnscheduleJob(self, job: Job) -> None: """Unschedules job from running in the future. Args: job: Job to unschedule. """ try: self._scheduler.unschedule_job(job) except KeyError: logging.info('Job %s not scheduled.', job)
args=['biometeo']) schedule_job(sched, send_message_p, 1800, '2014-09-20 8:10::00', args=['engagement']) schedule_job(sched, send_message_p, 1800, '2014-09-20 8:10::00', args=['proverbial']) #FIXED job_I1 = sched.add_date_job(send_message, '2014-09-20 8:01:00', args=['identity', 'I1']) job_I2 = sched.add_date_job(send_message, '2014-09-20 9:01:00', args=['identity', 'I2']) job_I2 = sched.add_date_job(send_message, '2014-09-20 11:00:00', args=['identity', 'I3']) job_aqualta = sched.add_date_job(send_message, '2014-09-20 9:05:00', args=['acqualta', 'AQ1']) while True: pass ##############################################################
stime = datetime.strptime(lesson['stime'], '%H:%M') etime = datetime.strptime(lesson['etime'], '%H:%M') except ValueError: print("Incorrect time format for: " + lesson['name']) continue if now.time() > stime.time() and now.time() < etime.time(): print(lesson['name'], end=" | ") print(lesson['stime'] + " - " + lesson['etime']) loading_process = threading.Thread( target=user.openClass, args=[lesson['name_in_website']]) loading_process.start() print() login = loading.simple(loading_process, "Getting " + lesson['name'] + " URL... ", False) loading_process.join() if user.status[0] == 404: cprint(" Error: Can't find " + lesson['name'], 'red') else: if stime.time() > now.time(): datetime = now.replace(hour=stime.hour, minute=stime.minute, second=stime.second) sched.add_date_job(user.openClass, datetime, [lesson['name_in_website']]) cprint(lesson['name'], 'white', end=" | ") print(lesson['stime'] + " - " + lesson['etime']) input() if input("Enter 'q' to quit: ") == "q": break os.system("cls")
class Bus(BotPlugin): def activate(self): super(Bus, self).activate() self.sched = Scheduler(coalesce=True) self.sched.start() @botcmd(split_args_with=' ') def bus(self, mess, args): now = arrow.now() t = self.next_bus(*args) if t: return 'The next no. %s bus leaves from %s %s' % ( args[1], args[0], t.humanize(now) ) @botcmd(split_args_with=' ') def bus_remind(self, mess, args): t = self.next_bus(*args) reminder = t.replace(minutes=-10) remind = partial(self.remind, mess, args) self.sched.add_date_job(remind, reminder.naive) return "%s: you'll be reminded %s" % ( mess.getMuckNick(), reminder.humanize() ) def remind(self, mess, args): now = arrow.now() t = self.next_bus(args[0], args[1]) if t: self.send( mess.getFrom(), '%s: the next no. %s bus leaves from %s %s' % ( mess.getMuckNick(), args[1], args[0], t.humanize(now) ), message_type=mess.getType() ) def parse_timetable(self, stop, route): if stop in STOPS: stop = STOPS[stop] url = posixpath.join( "http://www.nextbuses.mobi", "WebView/BusStopSearch/BusStopSearchResults/", stop ) res = requests.get( url, params={'searchType': 'route', 'searchFilter': route} ) soup = BeautifulSoup(res.text) bus_stops = soup.findAll('table', {'class': 'BusStops'}) times = bus_stops[0].findAll('p', {'class': 'Stops'}) return times def next_bus(self, stop, route, time=0): times = self.parse_timetable(stop, route) now = arrow.now() then = now.replace(minutes=+int(time)) for i in times: logging.info(i.text) if 'DUE' in i.text: continue elif ';at ' in i.text: t = i.text.split('at ')[-1].strip().split(':') next = now.replace(hour=int(t[0]), minute=int(t[1])) else: t = i.text.split('in ')[-1].strip().split() next = now.replace(minutes=int(t[0])) if next > then: return next return False
class ModHandler(pyinotify.ProcessEvent): global o def create(self, theFile): if self.isDir: o.mkdir(theFile) else: o.upload(theFile) print "Created: %s " % theFile def delete(self, theFile): if self.isDir: o.delete_folder(theFile) else: o.delete_file(theFile) print "Deleted: %s " % theFile def renameModification(self, fromFile, toFile): self.create(toFile) self.delete(fromFile) def __init__(self): self.initialTS = datetime.datetime(2005, 7, 14, 12, 30) self.currentTS = datetime.datetime.now() self.schedule = Scheduler() logging.basicConfig() self.schedule.start() self.stack = [] self.previousFull= "" self.happened=False self.isDir=False self.file="" self.path="" self.full="" def process(self): self.post_processing() def process_IN_CREATE(self, event): self.currentTS = datetime.datetime.now() self.file = event.name self.path = event.path self.full = event.pathname self.isDir = event.dir difference = self.currentTS-self.initialTS if (difference.total_seconds() < 1 and self.full is self.previousFull): self.stack.append("Create") else: executionTime = self.currentTS+datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS=datetime.datetime.now() del self.stack[:] self.stack.append("Create") def process_IN_MOVED_FROM(self, event): self.previous=event.pathname self.currentTS = datetime.datetime.now() self.file = event.name self.path = event.path self.full = event.pathname self.isDir = event.dir difference = self.currentTS-self.initialTS if (difference.total_seconds() < 1 and self.stack[0] is "Create"): self.stack.append("Moved_From") else: executionTime = self.currentTS+datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS=datetime.datetime.now() del self.stack[:] self.stack.append("Moved_From") def process_IN_MOVED_TO(self, event): self.currentTS = datetime.datetime.now() self.file = event.name self.path = event.path self.full = event.pathname self.isDir = event.dir difference = self.currentTS-self.initialTS if (difference.total_seconds() < 1 and self.stack[len(self.stack)-1] is "Moved_From"): self.stack.append("Moved_To") else: executionTime = self.currentTS+datetime.timedelta(seconds=.25) job = self.schedule.add_date_job(lambda: self.process(), date=executionTime) self.previousFull = event.pathname self.initialTS=datetime.datetime.now() del self.stack[:] self.stack.append("Moved_To") def post_processing(self): global o # --- Creation/Deletion --- if len(self.stack) is 1: if self.stack[0] is "Create": self.create(self.full) elif self.stack[0] is "Moved_From": self.delete(self.full) else: print "Error" # --- Rename --- elif len(self.stack) is 2: if self.stack[0] is "Moved_From" and self.stack[1] is "Moved_To": self.renameModification(self.previous, self.full) print "Renamed: %s" % self.full else: print "Error" # --- Modified --- elif len(self.stack) is 3: if self.stack[0] is "Create" and self.stack[1] is "Moved_From" and self.stack[2] is "Moved_To": self.renameModification(self.previous, self.full) print "Modified: %s" % self.full else: print "Error" self.happened=True
print sched.print_jobs() now = datetime.datetime.now() start_time = now + datetime.timedelta(seconds=3) later = now + datetime.timedelta(seconds=10) print "====> now is " + str(now) print "====> start_time is " + str(start_time) print "====> later is " + str(later) reservation_id = 1 job_name = "print_reservation_id_" + str(reservation_id) print "====> adding start_time job" start_instance_job = sched.add_date_job(print_reservation_id, start_time, \ name=job_name + '_start', args=[reservation_id]) print "====> adding later job" start_instance_job = sched.add_date_job(print_reservation_id, later, \ name=job_name + '_later', args=[reservation_id]) print "====> Reservation id should print in 3 seconds, sleeping..." sleep(3) print "====> Printing jobs..." print sched.print_jobs()
print "Ticker firing: %s" % request_id tasks.validate_message(request_id) if __name__ == '__main__': scheduler = Scheduler() scheduler.start() print "Scheduler started" db = create_engine(SQLALCHEMY_DATABASE_URI) metadata = MetaData(db) entries = Table('entries', metadata, autoload=True) time.sleep(2) while True: esel = entries.select(entries.c.status==True) cursor = esel.execute() for each in cursor: time_delta = each.fire_dt - datetime.now() if time_delta > timedelta(0,0,0,0,1,0,0): print "Adding %s to queue." % each.request_id scheduler.add_date_job(fire_job, each.fire_dt, [each.request_id]) else: print "Rejecting %s from queue." % each.request_id eup = entries.update(entries.c.id==each.id) eup.execute(status=False) time.sleep(10)
class Scheduler(object): """ Manages a list of actions that should be performed at specific times. Note that this class *intentionally* contains hardly any error checking. The correct behaviour of the Scheduler depends on the parent code doing "the right thing". In particular, it is crucial that the reached() method be called with the next time step at which an event is expected to happen, which can be obtained using the next() method. Thus a typical (correct) usage is as follows: s = Scheduler() s.add(...) # schedule some item(s) t = s.next() # get next time step at which something should happen # [do some stuff based on the time step just obtained] s.reached(t) """ def __init__(self): """ Create a Scheduler. """ self.items = [] self.realtime_items = {} self.realtime_jobs = [] # while the scheduler is running, the job # associated with each realtime_item will be # stored in this list (otherwise it is empty) self.last = None def __iter__(self): return self def add(self, func, args=None, kwargs=None, at=None, at_end=False, every=None, after=None, realtime=False): """ Register a function with the scheduler. Returns the scheduled item, which can be removed again by calling Scheduler._remove(item). Note that this may change in the future, so use with care. """ if not hasattr(func, "__call__"): raise TypeError( "The function must be callable but object '%s' is of type '%s'" % (str(func), type(func))) assert at or every or at_end or ( after and realtime ), "Use either `at`, `every` or `at_end` if not in real time mode." assert not ( at is not None and every is not None ), "Cannot mix `at` with `every`. Please schedule separately." assert not (at is not None and after is not None), "Delays don't mix with `at`." args = args or [] kwargs = kwargs or {} callback = functools.partial(func, *args, **kwargs) if realtime: if at_end: at_end_item = SingleTimeEvent(None, True, callback) self._add(at_end_item) return at_end_item if at or (at_end and not every): at_item = SingleTimeEvent(at, at_end, callback) self._add(at_item) return at_item if every: every_item = RepeatingTimeEvent(every, after, at_end, callback) self._add(every_item) return every_item def _add(self, item): self.items.append(item) def _remove(self, item): self.items.remove(item) def _add_realtime(self, func, at=None, every=None, after=None): """ Add a realtime job. Returns the Job object as obtained from APScheduler.add_job() etc. """ if not hasattr(self, "apscheduler"): try: from apscheduler.scheduler import Scheduler as APScheduler except ImportError: log.error( "Need APScheduler package to schedule realtime events.\n" "Please install from http://pypi.python.org/pypi/APScheduler." ) raise self.apscheduler = APScheduler() atexit.register(lambda: self.apscheduler.shutdown(wait=False)) self.apscheduler.start() if after and isinstance(after, Number): # `after` can be either a delay in seconds, or a date/datetime. # Since the APScheduler API expects a date/datetime convert it. after = datetime.now() + timedelta(seconds=after) # Register the job so that it can be started/stopped as needed. self.realtime_items[func] = (at, every, after) def start_realtime_jobs(self): for (func, (at, every, after)) in self.realtime_items.items(): if at: job = self.apscheduler.add_date_job(func, at) elif every: if after: job = self.apscheduler.add_interval_job(func, seconds=every, start_date=after) else: job = self.apscheduler.add_interval_job(func, seconds=every) elif after: job = self.apscheduler.add_date_job(func, after) else: raise ValueError( "Assertion violated. Use either `at`, `every` of `after`.") self.realtime_jobs.append(job) def stop_realtime_jobs(self): for job in self.realtime_jobs: self.apscheduler.unschedule_job(job) self.realtime_jobs = [] def next(self): """ Returns the time for the next action to be performed. Automatically called upon iteration of scheduler instance. """ next_step = None stop = False # This flag determines whether or not iteration should be # stopped after all items are checked. for item in self.items: if item.next_time is not None and (next_step is None or next_step > item.next_time): next_step = item.next_time if item.state == EV_REQUESTS_STOP_INTEGRATION: self._remove(item) stop = True if next_step is None: stop = True if stop is True: raise StopIteration if next_step < self.last: log.error( "Scheduler computed the next time step should be t = {:.2g} s, but the last one was already t = {:.2g} s." .format(next_step, self.last)) raise ValueError( "Scheduler is corrupted. Requested a time step in the past: dt = {:.2g}." .format(next_step - self.last)) return next_step def reached(self, time): """ Notify the Scheduler that a certain point in time has been reached. It will perform the action(s) that were defined to happen at that time. """ for item in self.items: if same_time(item.next_time, time): item.check_and_trigger(time) if item.state == EV_DONE: self._remove(item) self.last = time def finalise(self, time): """ Trigger all events that need to happen at the end of time integration. """ for item in self.items: if item.trigger_on_stop: item.check_and_trigger(time, is_stop=True) def reset(self, time): """ Override schedule so that internal time is now `time` and modify scheduled items accordingly. """ self.last = None for item in self.items: item.reset(time) def _print_realtime_item(self, item, func_print=log.info): (f, (at, every, after)) = item func_print("'{}': <at={}, every={}, after={}>".format( item.callback.f.__name__, at, every, after)) def print_scheduled_items(self, func_print=log.info): for item in self.items: # this will call __str__ on the item, which should be defined for # all events print item for item in self.realtime_items: self._print_realtime_item(item, func_print) def clear(self): log.debug("Removing scheduled items:") self.print_scheduled_items(func_print=log.debug) self.items = [] self.stop_realtime_jobs() self.realtime_items = {} def run(self, integrator, callbacks_at_scheduler_events=[]): """ Integrate until an exit condition in the schedule has been met. The optional argument `callbacks_at_scheduler_events` should be a list of functions which are called whenever the time integration reaches a "checkpoint" where some event is scheduled. Each such function should expect the timestep t at which the event occurs as its single argument. Note that these functions are called just *before* the scheduled events are triggered. This is used, for example, to keep time-dependent fields up to date with the simulation time. """ self.start_realtime_jobs() for t in self: assert (t >= integrator.cur_t) # sanity check # If new items were scheduled after a previous time # integration finished, we can have t == integrator.cur_t. # However, this confuses the integrators so we don't integrate # in this case. if t != integrator.cur_t: integrator.advance_time(t) for f in callbacks_at_scheduler_events: f(t) self.reached(t) self.finalise(t) self.stop_realtime_jobs()
class TrainScheduler(object): def __init__(self): logging.basicConfig(level=logging.DEBUG, filename="debug.log", format='%(asctime)s %(levelname)-8s %(message)s', datefmt="%d.%m.%Y %H:%M:%S") self.scheduler = Scheduler() self.scheduler.add_listener(self.checkForDuplicates, apscheduler.events.EVENT_JOBSTORE_JOB_ADDED) self.scheduler.start() if len(self.scheduler.get_jobs()) == 0: self.createInitSchedule() self.log("Initial tasks completed. Waiting for next event..") while True: try: time.sleep(10) #self.scheduler.print_jobs() except KeyboardInterrupt: self.log("Shutting down..") self.scheduler.shutdown() quit() def createInitSchedule(self): self.log("Perform initial query for passenger trains..") self.processPassenger() self.log("Perform initial query for freight trains..") self.processFreight() self.log("Perform initial query for auto trains..") self.processAutotrain() self.log("Creating initial train schedule..") # request passenger trains every hour self.scheduler.add_cron_job(self.processPassenger, hour="*/1", minute="0", day="*", month="*", year="*") # request freight trains every day self.scheduler.add_cron_job(self.processFreight, hour="0", minute="2", day="*", month="*", year="*") # request auto trains every month self.scheduler.add_cron_job(self.processAutotrain, hour="0", minute="5", day="1", month="*", year="*") def processPassenger(self): # return trains for station in question tReq = passenger.PassengerTrainRequest(PASSENGER_STATION_ID) for train in tReq.getTrainList(): trainTime = train.actualTime if (train.actualTime) else train.scheduledTime trainTimeCheck = trainTime - datetime.timedelta(minutes=CHECKBEFORE) try: self.scheduler.add_date_job(self.checkIfOnTime, trainTimeCheck, args=[train], name=train.name) self.log("Schedule passenger train '%s' to be checked on %s." % (train.name, trainTimeCheck)) except ValueError: try: self.scheduler.add_date_job(self.output, trainTime, args=[train], name=train.name) self.log("Schedule passenger train '%s' to be displayed on %s." % (train.name, trainTime)) except ValueError: self.log("Passenger train '%s' (%s) already passed by." % (train.name, trainTime)) def checkIfOnTime(self, remTrain): # return trains for station in question tReq = passenger.PassengerTrainRequest(PASSENGER_STATION_ID) for train in tReq.getTrainList(): if remTrain.name == train.name: trainTime = train.actualTime if (train.actualTime) else train.scheduledTime try: self.scheduler.add_date_job(self.output, trainTime, args=[train], name=train.name) self.log("Schedule passenger train '%s' to be displayed on %s." % (train.name, trainTime)) except ValueError: self.log("Passenger train '%s' (%s) already passed by." % (train.name, trainTime)) break def processFreight(self): # return trains for station in question freightTrains = freight.FreightTrainRequest(FREIGHT_STATION_ID) for train in freightTrains.getTrainList(): # FIXME: only arrival atm if train.arrival > datetime.datetime.now(): self.log("Schedule freight train '%s' to be displayed on %s." % (train.name, train.arrival)) self.scheduler.add_date_job(self.output, train.arrival, args=[train], name=train.name) else: self.log("Freight train '%s' (%s) already passed." % (train.name, train.arrival)) def processAutotrain(self): # return trains for station in question freightTrains = autotrain.AutoTrainRequest(AUTO_TRAIN_STATION_NAME) for train in freightTrains.getTrainList(): if train.arrival > datetime.datetime.now(): self.log("Schedule auto train '%s' to be displayed on %s." % (train.name, train.arrival)) self.scheduler.add_date_job(self.output, train.arrival, args=[train], name=train.name) else: self.log("Auto train '%s' (%s) already passed." % (train.name, train.arrival)) def checkForDuplicates(self, event): jobs = self.scheduler.get_jobs() if jobs: # events with the same name (train name) and the next "next run time" are duplicates dups = [job for job in jobs if job.name == event.job.name and job.next_run_time == event.job.next_run_time] if len(dups) > 1: self.log("Unscheduling %s." % event.job) self.scheduler.unschedule_job(event.job) def output(self, train): self.log("OUTPUT: %s" % train) f = open(OUTPUT_FILE, "a") f.write("%s\n" % train) f.close() def log(self, message): logging.info("* %s" % message)
class TestJobExecution(object): def setup(self): self.scheduler = Scheduler(threadpool=FakeThreadPool()) self.scheduler.add_jobstore(RAMJobStore(), 'default') self.scheduler._stopped = False # Make the scheduler think it's running self.scheduler._thread = FakeThread() self.logstream = StringIO() self.loghandler = StreamHandler(self.logstream) self.loghandler.setLevel(ERROR) scheduler.logger.addHandler(self.loghandler) def teardown(self): scheduler.logger.removeHandler(self.loghandler) if scheduler.datetime == FakeDateTime: scheduler.datetime = datetime FakeDateTime._now = original_now @raises(TypeError) def test_noncallable(self): date = datetime.now() + timedelta(days=1) self.scheduler.add_date_job('wontwork', date) def test_job_name(self): def my_job(): pass job = self.scheduler.add_interval_job(my_job, start_date=datetime(2010, 5, 19)) eq_( repr(job), '<Job (name=my_job, ' 'trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), ' 'start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>') def test_schedule_object(self): # Tests that any callable object is accepted (and not just functions) class A: def __init__(self): self.val = 0 def __call__(self): self.val += 1 a = A() job = self.scheduler.add_interval_job(a, seconds=1) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(a.val, 2) def test_schedule_method(self): # Tests that bound methods can be scheduled (at least with RAMJobStore) class A: def __init__(self): self.val = 0 def method(self): self.val += 1 a = A() job = self.scheduler.add_interval_job(a.method, seconds=1) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(a.val, 2) def test_unschedule_job(self): def increment(): vals[0] += 1 vals = [0] job = self.scheduler.add_cron_job(increment) self.scheduler._process_jobs(job.next_run_time) eq_(vals[0], 1) self.scheduler.unschedule_job(job) self.scheduler._process_jobs(job.next_run_time) eq_(vals[0], 1) def test_unschedule_func(self): def increment(): vals[0] += 1 def increment2(): vals[0] += 1 vals = [0] job1 = self.scheduler.add_cron_job(increment) job2 = self.scheduler.add_cron_job(increment2) job3 = self.scheduler.add_cron_job(increment) eq_(self.scheduler.get_jobs(), [job1, job2, job3]) self.scheduler.unschedule_func(increment) eq_(self.scheduler.get_jobs(), [job2]) @raises(KeyError) def test_unschedule_func_notfound(self): self.scheduler.unschedule_func(copy) def test_job_finished(self): def increment(): vals[0] += 1 vals = [0] job = self.scheduler.add_interval_job(increment, max_runs=1) self.scheduler._process_jobs(job.next_run_time) eq_(vals, [1]) assert job not in self.scheduler.get_jobs() def test_job_exception(self): def failure(): raise DummyException job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9)) self.scheduler._process_jobs(job.next_run_time) assert 'DummyException' in self.logstream.getvalue() def test_misfire_grace_time(self): self.scheduler.misfire_grace_time = 3 job = self.scheduler.add_interval_job(lambda: None, seconds=1) eq_(job.misfire_grace_time, 3) job = self.scheduler.add_interval_job(lambda: None, seconds=1, misfire_grace_time=2) eq_(job.misfire_grace_time, 2) def test_coalesce_on(self): # Makes sure that the job is only executed once when it is scheduled # to be executed twice in a row def increment(): vals[0] += 1 vals = [0] events = [] scheduler.datetime = FakeDateTime self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED) job = self.scheduler.add_interval_job(increment, seconds=1, start_date=FakeDateTime.now(), coalesce=True, misfire_grace_time=2) # Turn the clock 14 seconds forward FakeDateTime._now += timedelta(seconds=2) self.scheduler._process_jobs(FakeDateTime.now()) eq_(job.runs, 1) eq_(len(events), 1) eq_(events[0].code, EVENT_JOB_EXECUTED) eq_(vals, [1]) def test_coalesce_off(self): # Makes sure that every scheduled run for the job is executed even # when they are in the past (but still within misfire_grace_time) def increment(): vals[0] += 1 vals = [0] events = [] scheduler.datetime = FakeDateTime self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED) job = self.scheduler.add_interval_job(increment, seconds=1, start_date=FakeDateTime.now(), coalesce=False, misfire_grace_time=2) # Turn the clock 2 seconds forward FakeDateTime._now += timedelta(seconds=2) self.scheduler._process_jobs(FakeDateTime.now()) eq_(job.runs, 3) eq_(len(events), 3) eq_(events[0].code, EVENT_JOB_EXECUTED) eq_(events[1].code, EVENT_JOB_EXECUTED) eq_(events[2].code, EVENT_JOB_EXECUTED) eq_(vals, [3]) def test_interval(self): def increment(amount): vals[0] += amount vals[1] += 1 vals = [0, 0] job = self.scheduler.add_interval_job(increment, seconds=1, args=[2]) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(vals, [4, 2]) def test_interval_schedule(self): @self.scheduler.interval_schedule(seconds=1) def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time self.scheduler._process_jobs(start) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals, [2]) def test_cron(self): def increment(amount): vals[0] += amount vals[1] += 1 vals = [0, 0] job = self.scheduler.add_cron_job(increment, args=[3]) start = job.next_run_time self.scheduler._process_jobs(start) eq_(vals, [3, 1]) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals, [6, 2]) self.scheduler._process_jobs(start + timedelta(seconds=2)) eq_(vals, [9, 3]) def test_cron_schedule_1(self): @self.scheduler.cron_schedule() def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time self.scheduler._process_jobs(start) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals[0], 2) def test_cron_schedule_2(self): @self.scheduler.cron_schedule(minute='*') def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time next_run = start + timedelta(seconds=60) eq_(increment.job.get_run_times(next_run), [start, next_run]) self.scheduler._process_jobs(start) self.scheduler._process_jobs(next_run) eq_(vals[0], 2) def test_date(self): def append_val(value): vals.append(value) vals = [] date = datetime.now() + timedelta(seconds=1) self.scheduler.add_date_job(append_val, date, kwargs={'value': 'test'}) self.scheduler._process_jobs(date) eq_(vals, ['test']) def test_print_jobs(self): out = StringIO() self.scheduler.print_jobs(out) expected = 'Jobstore default:%s'\ ' No scheduled jobs%s' % (os.linesep, os.linesep) eq_(out.getvalue(), expected) self.scheduler.add_date_job(copy, datetime(2200, 5, 19)) out = StringIO() self.scheduler.print_jobs(out) expected = 'Jobstore default:%s '\ 'copy (trigger: date[2200-05-19 00:00:00], '\ 'next run at: 2200-05-19 00:00:00)%s' % (os.linesep, os.linesep) eq_(out.getvalue(), expected) def test_jobstore(self): self.scheduler.add_jobstore(RAMJobStore(), 'dummy') job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore='dummy') eq_(self.scheduler.get_jobs(), [job]) self.scheduler.remove_jobstore('dummy') eq_(self.scheduler.get_jobs(), []) @raises(KeyError) def test_remove_nonexistent_jobstore(self): self.scheduler.remove_jobstore('dummy2') def test_job_next_run_time(self): # Tests against bug #5 def increment(): vars[0] += 1 vars = [0] scheduler.datetime = FakeDateTime job = self.scheduler.add_interval_job(increment, seconds=1, misfire_grace_time=3, start_date=FakeDateTime.now()) start = job.next_run_time self.scheduler._process_jobs(start) eq_(vars, [1]) self.scheduler._process_jobs(start) eq_(vars, [1]) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vars, [2])
class Scheduler(object): schedulr = None aps3 = True def __init__(self): ##### # ApScheduler version detection try: # APScheduler 3.x implementation from apscheduler.schedulers.background import BackgroundScheduler self.schedulr = BackgroundScheduler() self.aps3 = True except ImportError: # APScheduler 2.x implementation from apscheduler.scheduler import Scheduler self.schedulr = Scheduler() self.aps3 = False def start(self): return self.schedulr.start() def get_job(self, name): if self.aps3: return self.schedulr.get_job(name) else: jobs = self.schedulr.get_jobs() for job in jobs: if job.name == name: return job return None def add_job(self, func, trigger, args=None, kwargs=None, id=None, **trigger_args): if self.aps3: return self.schedulr.add_job(func, trigger, id=id, replace_existing=True, args=args, kwargs=kwargs, **trigger_args) else: if trigger is 'date': run_date = trigger_args[ 'run_date'] # by intention: to raise if not set! del trigger_args['run_date'] return self.schedulr.add_date_job( func, run_date, name=id, # replace_existing=True, args=args, kwargs=kwargs) elif trigger is 'interval': # only partially implemented!! seconds = 0 minutes = 0 hours = 0 if 'seconds' in trigger_args: seconds = trigger_args.get('seconds', 0) del trigger_args['seconds'] if 'minutes' in trigger_args: minutes = trigger_args.get('minutes', 0) del trigger_args['minutes'] if 'hours' in trigger_args: hours = trigger_args.get('hours', 0) del trigger_args['hours'] return self.schedulr.add_interval_job( func, name=id, # replace_existing=True, hours=hours, minutes=minutes, seconds=seconds, args=args, kwargs=kwargs) elif trigger is 'cron': # only partially implemented!! second = 0 minute = 0 hour = 0 if 'second' in trigger_args: second = trigger_args.get('second', 0) del trigger_args['second'] if 'minute' in trigger_args: minute = trigger_args.get('minute', 0) del trigger_args['minute'] if 'hour' in trigger_args: hour = trigger_args.get('hour', 0) del trigger_args['hour'] return self.schedulr.add_cron_job( func, name=id, # replace_existing=True, hour=hour, minute=minute, second=second) else: raise NotImplementedError def shutdown(self): return self.schedulr.shutdown() # https://github.com/ralphwetzel/theonionbox/issues/19#issuecomment-263110953 def check_tz(self): from tzlocal import get_localzone try: # APScheduler 3.x from apscheduler.util import astimezone except ImportError: # https://github.com/ralphwetzel/theonionbox/issues/31 # APScheduler 2.x # import six from pytz import timezone, utc from datetime import tzinfo # copied here from apscheduler/util.py (version 3.4) # copyright Alex Grönholm # https://github.com/agronholm/apscheduler def astimezone(obj): """ Interprets an object as a timezone. :rtype: tzinfo """ # if isinstance(obj, six.string_types): if isinstance(obj, (str, unicode)): return timezone(obj) if isinstance(obj, tzinfo): if not hasattr(obj, 'localize') or not hasattr( obj, 'normalize'): raise TypeError( 'Only timezones from the pytz library are supported' ) if obj.zone == 'local': raise ValueError( 'Unable to determine the name of the local timezone -- you must explicitly ' 'specify the name of the local timezone. Please refrain from using timezones like ' 'EST to prevent problems with daylight saving time. Instead, use a locale based ' 'timezone name (such as Europe/Helsinki).') return obj if obj is not None: raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__) tz = get_localzone() try: res = astimezone(tz) except ValueError as ve: return False return True
class TestJobExecution(object): def setup(self): self.scheduler = Scheduler(threadpool=FakeThreadPool()) self.scheduler.add_jobstore(RAMJobStore(), "default") # Make the scheduler think it's running self.scheduler._thread = FakeThread() self.logstream = StringIO() self.loghandler = StreamHandler(self.logstream) self.loghandler.setLevel(ERROR) scheduler.logger.addHandler(self.loghandler) def teardown(self): scheduler.logger.removeHandler(self.loghandler) if scheduler.datetime == FakeDateTime: scheduler.datetime = datetime FakeDateTime._now = original_now def test_job_name(self): def my_job(): pass job = self.scheduler.add_interval_job(my_job, start_date=datetime(2010, 5, 19)) eq_( repr(job), "<Job (name=my_job, trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), " "start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>", ) def test_schedule_object(self): # Tests that any callable object is accepted (and not just functions) class A: def __init__(self): self.val = 0 def __call__(self): self.val += 1 a = A() job = self.scheduler.add_interval_job(a, seconds=1) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(a.val, 2) def test_schedule_method(self): # Tests that bound methods can be scheduled (at least with RAMJobStore) class A: def __init__(self): self.val = 0 def method(self): self.val += 1 a = A() job = self.scheduler.add_interval_job(a.method, seconds=1) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(a.val, 2) def test_unschedule_job(self): def increment(): vals[0] += 1 vals = [0] job = self.scheduler.add_cron_job(increment) self.scheduler._process_jobs(job.next_run_time) eq_(vals[0], 1) self.scheduler.unschedule_job(job) self.scheduler._process_jobs(job.next_run_time) eq_(vals[0], 1) def test_unschedule_func(self): def increment(): vals[0] += 1 def increment2(): vals[0] += 1 vals = [0] job1 = self.scheduler.add_cron_job(increment) job2 = self.scheduler.add_cron_job(increment2) job3 = self.scheduler.add_cron_job(increment) eq_(self.scheduler.get_jobs(), [job1, job2, job3]) self.scheduler.unschedule_func(increment) eq_(self.scheduler.get_jobs(), [job2]) @raises(KeyError) def test_unschedule_func_notfound(self): self.scheduler.unschedule_func(copy) def test_job_finished(self): def increment(): vals[0] += 1 vals = [0] job = self.scheduler.add_interval_job(increment, max_runs=1) self.scheduler._process_jobs(job.next_run_time) eq_(vals, [1]) assert job not in self.scheduler.get_jobs() def test_job_exception(self): def failure(): raise DummyException job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9)) self.scheduler._process_jobs(job.next_run_time) assert "DummyException" in self.logstream.getvalue() def test_misfire_grace_time(self): self.scheduler.misfire_grace_time = 3 job = self.scheduler.add_interval_job(lambda: None, seconds=1) eq_(job.misfire_grace_time, 3) job = self.scheduler.add_interval_job(lambda: None, seconds=1, misfire_grace_time=2) eq_(job.misfire_grace_time, 2) def test_coalesce_on(self): # Makes sure that the job is only executed once when it is scheduled # to be executed twice in a row def increment(): vals[0] += 1 vals = [0] events = [] scheduler.datetime = FakeDateTime self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED) job = self.scheduler.add_interval_job( increment, seconds=1, start_date=FakeDateTime.now(), coalesce=True, misfire_grace_time=2 ) # Turn the clock 14 seconds forward FakeDateTime._now += timedelta(seconds=2) self.scheduler._process_jobs(FakeDateTime.now()) eq_(job.runs, 1) eq_(len(events), 1) eq_(events[0].code, EVENT_JOB_EXECUTED) eq_(vals, [1]) def test_coalesce_off(self): # Makes sure that every scheduled run for the job is executed even # when they are in the past (but still within misfire_grace_time) def increment(): vals[0] += 1 vals = [0] events = [] scheduler.datetime = FakeDateTime self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED) job = self.scheduler.add_interval_job( increment, seconds=1, start_date=FakeDateTime.now(), coalesce=False, misfire_grace_time=2 ) # Turn the clock 2 seconds forward FakeDateTime._now += timedelta(seconds=2) self.scheduler._process_jobs(FakeDateTime.now()) eq_(job.runs, 3) eq_(len(events), 3) eq_(events[0].code, EVENT_JOB_EXECUTED) eq_(events[1].code, EVENT_JOB_EXECUTED) eq_(events[2].code, EVENT_JOB_EXECUTED) eq_(vals, [3]) def test_interval(self): def increment(amount): vals[0] += amount vals[1] += 1 vals = [0, 0] job = self.scheduler.add_interval_job(increment, seconds=1, args=[2]) self.scheduler._process_jobs(job.next_run_time) self.scheduler._process_jobs(job.next_run_time) eq_(vals, [4, 2]) def test_interval_schedule(self): @self.scheduler.interval_schedule(seconds=1) def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time self.scheduler._process_jobs(start) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals, [2]) def test_cron(self): def increment(amount): vals[0] += amount vals[1] += 1 vals = [0, 0] job = self.scheduler.add_cron_job(increment, args=[3]) start = job.next_run_time self.scheduler._process_jobs(start) eq_(vals, [3, 1]) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals, [6, 2]) self.scheduler._process_jobs(start + timedelta(seconds=2)) eq_(vals, [9, 3]) def test_cron_schedule_1(self): @self.scheduler.cron_schedule() def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time self.scheduler._process_jobs(start) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vals[0], 2) def test_cron_schedule_2(self): @self.scheduler.cron_schedule(minute="*") def increment(): vals[0] += 1 vals = [0] start = increment.job.next_run_time next_run = start + timedelta(seconds=60) eq_(increment.job.get_run_times(next_run), [start, next_run]) self.scheduler._process_jobs(start) self.scheduler._process_jobs(next_run) eq_(vals[0], 2) def test_date(self): def append_val(value): vals.append(value) vals = [] date = datetime.now() + timedelta(seconds=1) self.scheduler.add_date_job(append_val, date, kwargs={"value": "test"}) self.scheduler._process_jobs(date) eq_(vals, ["test"]) def test_print_jobs(self): out = StringIO() self.scheduler.print_jobs(out) expected = "Jobstore default:%s" " No scheduled jobs%s" % (os.linesep, os.linesep) eq_(out.getvalue(), expected) self.scheduler.add_date_job(copy, datetime(2200, 5, 19)) out = StringIO() self.scheduler.print_jobs(out) expected = ( "Jobstore default:%s " "copy (trigger: date[2200-05-19 00:00:00], " "next run at: 2200-05-19 00:00:00)%s" % (os.linesep, os.linesep) ) eq_(out.getvalue(), expected) def test_jobstore(self): self.scheduler.add_jobstore(RAMJobStore(), "dummy") job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy") eq_(self.scheduler.get_jobs(), [job]) self.scheduler.remove_jobstore("dummy") eq_(self.scheduler.get_jobs(), []) @raises(KeyError) def test_remove_nonexistent_jobstore(self): self.scheduler.remove_jobstore("dummy2") def test_job_next_run_time(self): # Tests against bug #5 def increment(): vars[0] += 1 vars = [0] scheduler.datetime = FakeDateTime job = self.scheduler.add_interval_job(increment, seconds=1, misfire_grace_time=3, start_date=FakeDateTime.now()) start = job.next_run_time self.scheduler._process_jobs(start) eq_(vars, [1]) self.scheduler._process_jobs(start) eq_(vars, [1]) self.scheduler._process_jobs(start + timedelta(seconds=1)) eq_(vars, [2])
mC(auth['account_sid'],auth['auth_token'],targetNumber,ngrokURL) print "Call Queued Up" time.sleep(60) # Twilio default call timeout is 60 sec #kill = raw_input("Kill proceses? (y/n):") #while kill in ['n']: # kill = raw_input("Kill proceses? (y/n):") print "Killing processes" ngrok.kill() # kill ngrok process os.system('killall -KILL Python') # kill web server and this thread # Start the scheduler sched = Scheduler() sched.start() # Convert UTC target to local time localTarget = utc2local(datetime.strptime(targetTimeUTC,'%Y-%m-%d %H:%M:%S')) job = sched.add_date_job(main, localTarget, [filename,targetNumber,ngrokURL]) sched.print_jobs() print 'Current time is %s' % datetime.now() # Keep scheduler alive until you hit Ctrl+C! while True: time.sleep(1) sched.shutdown()
class TestOfflineScheduler(object): def setup(self): self.scheduler = Scheduler() def teardown(self): if self.scheduler.running: self.scheduler.shutdown() @raises(KeyError) def test_jobstore_twice(self): self.scheduler.add_jobstore(RAMJobStore(), "dummy") self.scheduler.add_jobstore(RAMJobStore(), "dummy") def test_add_tentative_job(self): job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy") assert isinstance(job, Job) eq_(self.scheduler.get_jobs(), []) def test_add_job_by_reference(self): job = self.scheduler.add_date_job("copy:copy", datetime(2200, 7, 24)) eq_(job.func, copy) eq_(job.func_ref, "copy:copy") def test_configure_jobstore(self): conf = {"apscheduler.jobstore.ramstore.class": "apscheduler.jobstores.ram_store:RAMJobStore"} self.scheduler.configure(conf) self.scheduler.remove_jobstore("ramstore") def test_shutdown_offline(self): self.scheduler.shutdown() def test_configure_no_prefix(self): global_options = {"misfire_grace_time": "2", "daemonic": "false"} self.scheduler.configure(global_options) eq_(self.scheduler.misfire_grace_time, 1) eq_(self.scheduler.daemonic, True) def test_configure_prefix(self): global_options = {"apscheduler.misfire_grace_time": 2, "apscheduler.daemonic": False} self.scheduler.configure(global_options) eq_(self.scheduler.misfire_grace_time, 2) eq_(self.scheduler.daemonic, False) def test_add_listener(self): val = [] self.scheduler.add_listener(val.append) event = SchedulerEvent(EVENT_SCHEDULER_START) self.scheduler._notify_listeners(event) eq_(len(val), 1) eq_(val[0], event) event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN) self.scheduler._notify_listeners(event) eq_(len(val), 2) eq_(val[1], event) self.scheduler.remove_listener(val.append) self.scheduler._notify_listeners(event) eq_(len(val), 2) def test_pending_jobs(self): # Tests that pending jobs are properly added to the jobs list when # the scheduler is started (and not before!) self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9)) eq_(self.scheduler.get_jobs(), []) self.scheduler.start() jobs = self.scheduler.get_jobs() eq_(len(jobs), 1)
''' Created on 18-Feb-2014 @author: Abhimanyu ''' from datetime import datetime from apscheduler.scheduler import Scheduler # Start the scheduler sched = Scheduler(standalone=True) # Define the function that is to be executed def my_job(text): print text # The job will be executed on November 6th, 2009 exec_date = datetime.strptime("2014-05-06 18:17:00", "%Y-%m-%d %H:%M:%S") # Store the job in a variable in case we want to cancel it job = sched.add_date_job(my_job, exec_date, ['sdasda']) sched.start()
class ProgramHandler: def __init__(self, radio_station): self.__radio_station = radio_station self.__scheduler = None self.__scheduled_jobs = None self.__start_listeners() self.__is_starting_up = True self.__running_program = None self.__interval_hours = 3 # Time after which to schedule again self.__radio_station.logger.info( "Done initialising ProgramHandler for {0}".format( radio_station.station.name)) def run(self): self.run_current_schedule() self.__is_starting_up = False def __prepare_schedule(self): self.__load_programs() self.__scheduler = Scheduler(timezone=pytz.utc) self.__scheduled_jobs = dict() def run_current_schedule(self): self.__prepare_schedule() self.__scheduler.start() self.__schedule_programs() #self.__schedule_next_schedule() def set_running_program(self, running_program): self.__stop_program() self.__running_program = running_program def stop(self): self.__stop_program() # any clean up goes here # unschedule stuff def __schedule_next_schedule(self): base_date = datetime.now() next_schedule_date = base_date + timedelta( 0, 0, 0, 0, 0, self.__interval_hours) # 3 hours self.__scheduler.add_date_job(getattr(self, 'run_current_schedule'), next_schedule_date) def __schedule_programs(self): for scheduled_program in self.__scheduled_programs: if not self.__is_program_expired(scheduled_program): self.__add_scheduled_job(scheduled_program) self.__radio_station.logger.info( "Scheduled program {0} for station {1} starting at {2}". format(scheduled_program.program.name, self.__radio_station.station.name, scheduled_program.start)) def __add_scheduled_job(self, scheduled_program): start_time = self.__get_program_start_time(scheduled_program).replace( tzinfo=None) program = RadioProgram(scheduled_program, self.__radio_station, self) try: scheduled_job = self.__scheduler.add_date_job( getattr(program, 'start'), start_time) self.__scheduled_jobs[scheduled_program.id] = scheduled_job except Exception as e: self.__radio_station.logger.error( "Error {err} in __add_scheduled_job".format(err=e.message)) def __delete_scheduled_job(self, index): if not self.__scheduled_jobs: self.__radio_station.logger.warning( "Failed to delete job (no jobs are scheduled)") return if index in self.__scheduled_jobs: try: self.__scheduler.unschedule_job(self.__scheduled_jobs[index]) except: # The job probably ran already self.__radio_station.logger.warning( "Failed to remove unscheduled job #{}".format(index)) del self.__scheduled_jobs[index] def __stop_program(self): try: if self.__running_program is not None: self.__running_program.stop() sleep(2) except: return def __run_program(self): # self.__running_program.run() return def __load_programs(self): timezone = self.__radio_station.station.timezone #if self.__is_starting_up: date_filter = "((date(start) = date(now())) or (start < now() and radio_scheduledprogram.end > now()))" #else: # date_filter = "(start >= now() at time zone '{tz}' and start < now() at time zone '{tz}' + interval '{interval} hour')".format( # tz=timezone, interval=self.__interval_hours) query = self.__radio_station.db.query(ScheduledProgram).filter( ScheduledProgram.station_id == self.__radio_station.station.id).filter( text(date_filter)).filter(ScheduledProgram.deleted == False) self.__scheduled_programs = query.all() self.__radio_station.logger.info("Loaded {1} programs for {0}".format( self.__radio_station.station.name, len(self.__scheduled_programs))) def __load_program(self, program_id): return self.__radio_station.db.query(ScheduledProgram).filter( ScheduledProgram.id == program_id).first() def __start_listeners(self): t = threading.Thread(target=self.__listen_for_scheduling_changes, args=(DefaultConfig.SCHEDULE_EVENTS_SERVER_IP, DefaultConfig.SCHEDULE_EVENTS_SERVER_PORT)) t.start() def __listen_for_scheduling_changes(self, ip, port): sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) addr = (ip, port) # It may not be possible to connect after restart, TIME_WAIT could come into play etc. Anyway, keep trying connected = False while not connected: try: sck.connect(addr) connected = True except: self.__radio_station.logger.warning( "[Station #{}] Could not connect to server, retrying in 30..." .format(self.__radio_station.id)) sleep(30) sck.send( json.dumps({ 'station': self.__radio_station.station.id, 'action': 'register' })) while True: incomplete_data = False data = sck.recv(10240000) try: event = json.loads(data) except: incomplete_data = True total_data = [] total_data.append(data) self.__radio_station.logger.debug(data) while incomplete_data: self.__radio_station.logger.debug('getting chunks...') try: partial_data = sck.recv(10240000) except Exception as e: self.__radio_station.logger.error( 'Failed to get all data chunks... {}'.format( e.message)) if partial_data: total_data.append(partial_data) try: json.loads(''.join(total_data)) incomplete_data = False except: pass data = ''.join(total_data) try: event = json.loads(data) except Exception as e: self.__radio_station.logger.error( "Error 1 {err} in ProgramHandler.__listen_for_scheduling_changes" .format(err=e.message)) return # self.__radio_station.logger.error('Processing JSON data for station {}:\n{}'.format(self.__radio_station.station.id, event)) try: if "action" in event and "id" in event: if event["action"] == "delete": self.__delete_scheduled_job(event["id"]) self.__radio_station.logger.info( "Scheduled program with id {0} has been deleted". format(event["id"])) elif event["action"] == "add": scheduled_program = self.__load_program(event["id"]) if not self.__is_program_expired(scheduled_program): self.__add_scheduled_job(scheduled_program) self.__radio_station.logger.info( "Scheduled program with id {0} has been added at time {1}" .format(event["id"], scheduled_program.start)) elif event["action"] == "update": self.__delete_scheduled_job(event["id"]) scheduled_program = self.__load_program(event["id"]) if not self.__is_program_expired(scheduled_program): self.__add_scheduled_job(scheduled_program) self.__radio_station.logger.info( "Scheduled program with id {0} has been moved to start at time {1}" .format(event["id"], scheduled_program.start)) elif event["action"] == "sync": # self.__radio_station.logger.info("Syncing music for station {0}".format(event["id"])) t = threading.Thread(target=self.__process_music_data, args=(event["id"], event["music_data"])) t.start() except Exception as e: self.__radio_station.logger.error( "Error 2 {err} in ProgramHandler.__listen_for_scheduling_changes" .format(err=e.message)) def __get_dict_from_rows(self, rows): result = dict() for row in rows: result[row.title] = row return result def __process_music_data(self, station_id, json_string): songs_in_db = self.__get_dict_from_rows( self.__radio_station.db.query(ContentMusic).filter( ContentMusic.station_id == station_id).all()) artists_in_db = self.__get_dict_from_rows( self.__radio_station.db.query(ContentMusicArtist).filter( ContentMusicArtist.station_id == station_id).all()) albums_in_db = self.__get_dict_from_rows( self.__radio_station.db.query(ContentMusicAlbum).filter( ContentMusicAlbum.station_id == station_id).all()) data = json.loads(json_string) for artist in data: if artist in artists_in_db: music_artist = artists_in_db[artist] else: # persist the artist music_artist = ContentMusicArtist(**{ 'title': artist, 'station_id': station_id }) artists_in_db[artist] = music_artist self.__radio_station.db.add(music_artist) try: self.__radio_station.db._model_changes = {} self.__radio_station.db.commit() except DatabaseError: self.__radio_station.db.rollback() continue except: continue for album in data[artist]: if album in albums_in_db: music_album = albums_in_db[album] else: # persist the album music_album = ContentMusicAlbum(**{ 'title': album, 'station_id': station_id }) albums_in_db[album] = music_album self.__radio_station.db.add(music_album) try: self.__radio_station.db._model_changes = {} self.__radio_station.db.commit() except DatabaseError: self.__radio_station.db.rollback() continue except: continue for song in data[artist][album]['songs']: if song['title'] in songs_in_db: music_song = songs_in_db[song['title']] else: music_song = ContentMusic( **{ 'title': song['title'], 'duration': song['duration'], 'station_id': station_id, 'album_id': music_album.id, 'artist_id': music_artist.id }) songs_in_db[song['title']] = music_song self.__radio_station.db.add(music_song) try: self.__radio_station.db._model_changes = {} self.__radio_station.db.commit() except DatabaseError: self.__radio_station.db.rollback() continue except: continue """ Gets the program to run from the current list of programs that are lined up for the day """ def __get_current_program(self): for program in self.__scheduled_programs: if not self.__is_program_expired(program): return program """ Returns whether or not the time for a particular program has passed """ def __is_program_expired(self, scheduled_program): now = arrow.utcnow() return (scheduled_program.start_utc + scheduled_program.program.duration) < (now + timedelta(minutes=1)) def __get_program_start_time(self, scheduled_program): now = arrow.utcnow().datetime if scheduled_program.start_utc < now: # Time at which program begins is already past return now + timedelta(seconds=5) # 5 second scheduling allowance else: return scheduled_program.start_utc + timedelta( seconds=5) # 5 second scheduling allowance