Ejemplo n.º 1
0
def main(config_file):
    info = load_config(config_file)

    day_of_week = info['days']
    hour = info['hour']
    minute = info['minute']

    config = {
        'apscheduler.jobstores.file.class': info['scheduler-config']['class'],
        'apscheduler.jobstores.file.path': info['scheduler-config']['path']
    }
    sched = Scheduler(config)

    sched.add_cron_job(run_scraper, day_of_week=day_of_week, hour=hour, minute=minute)
    sched.add_cron_job(check_archive, day='first')

    sched.start()

    print('Press Ctrl+{0} to exit'.format('C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown()  # Not strictly necessary if daemonic mode is enabled but should be done if possibleisched.start()
Ejemplo n.º 2
0
def add_items(request):
    seller = request.user
    if seller:
        if request.method == "POST":
            item_name = request.POST['name']
            #find way to pass null to image_url
            item = {
                "item_name": item_name,
                "seller": seller,
                "status": "Available",
                "date_added": datetime.now(),
                "min_bid": request.POST['min_bid']
            }
            res = add_doc(index='item', type=Item, id=item_name, doc=item)

            from apscheduler.scheduler import Scheduler

            sched = Scheduler()
            sched.start()
            exec_time = datetime.now() + timedelta(seconds=5)
            job1 = sched.add_date_job(sell_items, exec_time, [item_name])

            return HttpResponse("Added Item: {}".format(item_name), status=200)

    else:
        return HttpResponse("Please log in")  #redirect to login
Ejemplo n.º 3
0
def update_celery_tasks_status():
    schedudler = Scheduler(daemonic = False)

    @schedudler.cron_schedule(second='*/30', max_instances=1)
    def update_job():
        infos = RedisHelper.get_all_celery_tasks_info()
        res_ids = get_tasks_uuid('start_spider')
        for res_id in infos:
            is_complete = False
            info = infos[res_id]
            spider_name = RedisStrHelper.split(info)[1]
            if res_id in res_ids:
                res = app.AsyncResult(res_id)
                if res.state == 'SUCCESS':
                    is_complete = True
                else:
                    if res.state == 'FAILURE':
                        print res.trackback()
                        ## TODO: warning
                        pass
                    RedisHelper.update_celery_task_status(res_id, res.state)
            else:
                is_complete = True
            if is_complete:
                if spider_name in source_home_spiders:
    #                     time.sleep(1 * 60)
                        call(spider_name)
                RedisHelper.del_celery_task_status(res_id)

    schedudler.start()
Ejemplo n.º 4
0
def start_task_scheduling():
    """
    Start scheduling tasks.
    """
    sched = Scheduler()
    sched.start()

    interval = int(Config.get('task_interval_pull_bambino'))
    sched.add_interval_job(pull_bambino_data, seconds=interval)

    cheeseprism_interval = int(Config.get('task_interval_pull_cheesprism_data'))
    sched.add_interval_job(pull_cheeseprism_data, seconds=cheeseprism_interval)

    git_interval = int(Config.get('task_interval_pull_github_data'))
    sched.add_interval_job(pull_github_data, seconds=git_interval)

    interval = int(Config.get('task_interval_pull_releases_for_all_services'))
    sched.add_interval_job(pull_releases_for_all_services, seconds=interval)

    interval = int(Config.get('tast_interval_pull_service_configs'))
    sched.add_interval_job(pull_service_configs, seconds=interval)

    cleanup_interval = int(Config.get('task_interval_cleanup_queue'))
    sched.add_interval_job(cleanup_queue, seconds=cleanup_interval)

    interval = int(Config.get('tast_interval_add_webhook_callbacks'))
    sched.add_interval_job(add_webhook_callbacks, seconds=interval)
Ejemplo n.º 5
0
def main(config_file):
    info = load_config(config_file)

    day_of_week = info['days']
    hour = info['hour']
    minute = info['minute']

    config = {
        'apscheduler.jobstores.file.class': info['scheduler-config']['class'],
        'apscheduler.jobstores.file.path': info['scheduler-config']['path']
    }
    sched = Scheduler(config)

    sched.add_cron_job(run_scraper,
                       day_of_week=day_of_week,
                       hour=hour,
                       minute=minute)
    sched.add_cron_job(check_archive, day='first')

    sched.start()

    print('Press Ctrl+{0} to exit'.format('C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown(
        )  # Not strictly necessary if daemonic mode is enabled but should be done if possibleisched.start()
Ejemplo n.º 6
0
def main():
    parser = OptionParser(usage="usage: python %prog [options] filename",
                          version="order_cherry server v%s" % version)
    parser.add_option("-p",
                      "--port",
                      action="store",
                      type="int",
                      dest="port",
                      default=8060,
                      help="Listen Port")
    parser.add_option("-f",
                      "--logfile",
                      action="store",
                      type="string",
                      dest="logfile",
                      default='./logs/run.log',
                      help="LogFile Path and Name. default=./run.log")
    parser.add_option("-n",
                      "--backupCount",
                      action="store",
                      type="int",
                      dest="backupCount",
                      default=10,
                      help="LogFile BackUp Number")
    parser.add_option("-m",
                      "--master",
                      action="store_true",
                      dest="master",
                      default=False,
                      help="master process")
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False,
                      help="debug mode")
    (options, args) = parser.parse_args()
    initLog(options)

    sched = Scheduler()
    # #定时任务
    # sched.add_cron_job(order_cherry.close_order, minute='*/2')
    #
    sched.start()

    cherrypy.config.update({
        'server.socket_host': '0.0.0.0',
        'server.socket_port': 8050,
        'server.socket_queue_size': 300,
        'server.max_request_header_size': 10 * 1024 * 1024,
        'server.thread_pool': 400,
        'response.headers.Content-Type': 'application/json; charset=UTF-8',
        'tools.encode.encoding': 'utf-8',
        'engine.autoreload.on': True,
    })
    cherrypy.tree.mount(urls.HellowWord(), '/v1/2')
    cherrypy.tree.mount(urls.Test(), '/api')
    cherrypy.tree.mount(HelloWordTest(), '/hello')
    cherrypy.engine.start()
    cherrypy.engine.block()
Ejemplo n.º 7
0
class ClientScheduler(Observable):
    def __init__(self):
        global DeviceClientInstance
        
        self.scheduler = Scheduler()
        self.scheduler.start()
        
        self.wifi_job = WifiActiveCampaignCron.run
        self.bt_job = BluetoothActiveCampaignCron.run
        self.log_job = LogRotation.run
        self.status_job = StatusAPI.run
        
        # Schedule Status API
        self.addSecondSchedule(self.status_job, newschedule_secs=15)
        
        # Schedule Wi-Fi Job
        self.addMinuteSchedule(self.wifi_job, newschedule_mins=1)
        
        # Schedule BT Job
        self.addMinuteSchedule(self.bt_job, newschedule_mins=1)
        
        # Schedule Log Rotation Job, rotation doesn't always happen when this job runs
        self.addMinuteSchedule(self.log_job, newschedule_mins=1)
    
    def addMinuteSchedule(self, job, newschedule_mins):
        self.scheduler.add_interval_job(job, minutes=newschedule_mins)
    
    def addSecondSchedule(self, job, newschedule_secs):
        self.scheduler.add_interval_job(job, seconds=newschedule_secs)
Ejemplo n.º 8
0
def watchlist(request):
    #Handle file upload
    global location
    if 'import' in request.POST:
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile=request.FILES['docfile'])
            newdoc.save()
            location = newdoc.path()
            # Redirect to the document list after POST
            return HttpResponseRedirect(reverse('scraper.views.watchlist'))
    else:
        form = DocumentForm()  # An empty, unbound form

    if 'match' in request.POST:
        call_command('readfile', location)

        sched = Scheduler(standalone=True)

        def match_sched():
            call_command('readfile', location)

        sched.add_interval_job(match_sched, seconds=20, max_instances=1000)
        sched.start()

    # Load documents for the list page
    documents = Document.objects.all()

    # Render list page with the documents and the form
    return render_to_response('scraper/index2.0.html', {
        'documents': documents,
        'form': form
    },
                              context_instance=RequestContext(request))
Ejemplo n.º 9
0
def main():
    
    log.info( "BLI Monitor starting..." )
    
    #check_pid()
    
    bli = BLIMonitor()    
   
    spot = SpotMonitor()
    
    sched = Scheduler(daemonic = False)
    
    
    #sched.add_listener(err_listener, events.EVENT_ALL) 
    
    sched.add_interval_job(lambda:bli.check(), seconds=3)
    
    sched.add_interval_job(lambda:spot.check(), seconds=3)
    
    sched.add_listener(err_listener,  events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED| events.EVENT_JOB_MISSED)  
   
    sched.start()
    
    log.info( "started" )
    
    
    
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
class ProgramHandler:
    def __init__(self, db, radio_station):
        self.__db = db
        self.__radio_station = radio_station
        self.__load_programs()
        self.__scheduler = Scheduler()
        self.__radio_station.logger.info(
            "Done initing ProgramHandler for {0}".format(radio_station.name))

    def run(self):
        self.__schedule_programs()
        self.__scheduler.start()

    def stop(self):
        self.__stop_program()
        #any clean up goes here

    def __schedule_programs(self):
        for scheduled_program in self.__scheduled_programs:  #throw all the jobs into AP scheduler and have it rain down alerts
            if not self.__is_program_expired(scheduled_program):
                try:
                    program = RadioProgram(self.__db, scheduled_program,
                                           self.__radio_station)
                    self.__scheduler.add_date_job(
                        getattr(program, 'start'),
                        scheduled_program.start.replace(tzinfo=None))
                    self.__radio_station.logger.info(
                        "Scheduled program {0} for station {1} starting at {2}"
                        .format(scheduled_program.program.name,
                                self.__radio_station.name,
                                scheduled_program.start))
                except Exception, e:
                    self.__radio_station.logger.info(str(e))
        return
Ejemplo n.º 11
0
class TrackerManager(object):
    '''Manages process information collection for multiple processes'''
    LOG = logging.getLogger('pt.tracker_manager')

    def __init__(self, interval):
        TrackerManager.LOG.debug(
            "Initializing TrackerManager with interval = %s",
            interval)
        self.listeners = []
        self.probes = []
        self.scheduler = Scheduler()
        self.scheduler.add_interval_job(self.tracking_job, seconds=interval)
        self.scheduler.start()

    def add_listener(self, listener):
        '''Add listener that will receive metrics'''
        self.listeners.append(listener)

    def add_probes(self, probes):
        '''Add probe that will collect metrics'''
        self.probes.extend(probes)

    def tracking_job(self):
        '''a job that monitors'''
        results = []
        for probe in self.probes:
            results.extend(probe())
        self.submit(results)

    def submit(self, results):
        '''publish results to listeners'''
        for listener in self.listeners:
            listener.submit(results)
Ejemplo n.º 12
0
class IsItWar(Plugin):
    def __init__(self, skype):
        super(IsItWar, self).__init__(skype)
        self.falseMessages =["The world is still safe, Russia has not declared war yet", "http://suptg.thisisnotatrueending.com/archive/29138254/images/1388285593271.jpg", "http://www.meh.ro/original/2010_03/meh.ro3771.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/arpBmWp_700b_v1.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/aXb2VAv_700b.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/aLKXd6v_700b.jpg"]
        self.sched = Scheduler()
        self.sched.start()
        self.sched.add_interval_job(self.is_it_war, minutes=10)
        self.command = "isitwaryet"

    def message_received(self, args, status, msg):

        res = urllib.urlopen("http://www.bbc.co.uk/news")
        text = res.read()
        if "declares war" in text.lower():
            msg.Chat.SendMessage("Brace your selves, mother Russia is coming")
        else:
            msg.Chat.SendMessage(choice(self.falseMessages))

    def is_it_war(self):
        print "checking if war"
        res = urllib.urlopen("http://www.bbc.co.uk")
        text = res.read()
        if "declares war" in text.lower():
            chat = self.skype.Chat("#stigrk85/$jvlomax;b43a0c90a2592b9b")
            chat.SendMessage("Brace yourself, Mother Russia is coming")
        


    def help(self, msg):
        msg.Chat.SendMessage("usage: @isitwaryet\nWill tell you if Russia has declared war")
Ejemplo n.º 13
0
def main(standalone=True):
    
    env.configure("conf.ini")
    
    '''
    transfer History Detail for the first time
    when program is executed
    '''
    if env.config.get("transfer_hist_detail_on_loading") and env.config.get("transfer_hist_detail_on_loading") == "1":
        transferHistDetails(env.config.get("DB_FILE"), env.config.get("STOREID"), 
                         env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),
                         env.config.get("FTP_PORT"), env.config.get("FTP_USERNAME"),
                         env.config.get("FTP_PASSWORD"))
     
    '''
    create the scheduler for two job:
    1. check the client's alive
    2. transfer details to server
    '''
    scheduler = Scheduler(standalone=standalone)
      
    scheduler.add_interval_job(sync, 
                               minutes=int(env.config.get("sync_interval")), 
                               args=(env.config.get("DB_FILE"), env.config.get("STOREID"),
                                     env.config.get("PROGNAME"), env.config.get("PROG"),
                                     env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),env.config.get("FTP_PORT"), 
                                     env.config.get("FTP_USERNAME"),env.config.get("FTP_PASSWORD")))
       
    scheduler.add_cron_job(transferFluxDetailsByStatus, 
                           day_of_week=env.config.get("upload_day_of_week"), hour=env.config.get("upload_hour"), minute=env.config.get("upload_minute"), 
                           args=(env.config.get("DB_FILE"), env.config.get("STOREID"),
                                 env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),env.config.get("FTP_PORT"), 
                                 env.config.get("FTP_USERNAME"),env.config.get("FTP_PASSWORD"), 
                                 0))
    scheduler.start()
Ejemplo n.º 14
0
class Timer:
    def __init__(self, fieldCanvas, field):
        #self.sched = BackgroundScheduler()
        self.sched = Scheduler()
        self.sched.start()
        self.timerValue = 0
        self.fieldCanvas = fieldCanvas
        self.field = field


    def updateTimerValue(self):
        self.timerValue += 1
        minutes = "0" + str(int(self.timerValue/60)) if int(self.timerValue/60) < 9 else str(int(self.timerValue/60))
        secondes = "0" + str(self.timerValue%60) if self.timerValue%60 < 9 else str(self.timerValue%60) 
        timerString = minutes + ":" + secondes
        self.fieldCanvas.itemconfigure(self.field ,text=timerString)

    def getTimer(self):
        return self.timerValue

    def startTimer(self):
        #job = self.sched.add_job(self.updateTimerValue, 'interval', seconds = 1)
        job = self.sched.add_interval_job(self.updateTimerValue, seconds = 1)
        pass
        
    def stopTimer(self):
        try:
            model.user.setScore(self.getTimer())
            self.sched.shutdown(wait=False)
        except Exception as e:
            #print(e)
            pass
Ejemplo n.º 15
0
def main():

    log.info("BLI Monitor starting...")

    #check_pid()

    bli = BLIMonitor()

    spot = SpotMonitor()

    sched = Scheduler(daemonic=False)

    #sched.add_listener(err_listener, events.EVENT_ALL)

    sched.add_interval_job(lambda: bli.check(), seconds=3)

    sched.add_interval_job(lambda: spot.check(), seconds=3)

    sched.add_listener(
        err_listener, events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED
        | events.EVENT_JOB_MISSED)

    sched.start()

    log.info("started")
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
Ejemplo n.º 16
0
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=0.3, max_instances=2, max_runs=4,
                                        args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Ejemplo n.º 17
0
def app(global_config, **settings):
    """ This function returns a WSGI application.
    
    It is usually called by the PasteDeploy framework during 
    ``paster serve``.
    """
    zcml_file = settings.get('configure_zcml', 'configure.zcml')
    db_string = settings.get('db_string')
    if db_string is None:
        raise ValueError("No 'db_string' value in application configuration.")
    initialize_sql(db_string)
    config = Configurator(root_factory=get_root, settings=settings)
    config.begin()
    config.load_zcml(zcml_file)
    config.end()
    # Ugly hack to configure the MapperExtension with the settings.
    removal_extension.path = settings.get('upload_directory')

    scheduler = Scheduler()
    # Send out queued mails
    from eportfolio.utilities.mail_delivery import trigger_queued_delivery
    scheduler.add_interval_job(trigger_queued_delivery, seconds=30)
    scheduler.start()

    return config.make_wsgi_app()
Ejemplo n.º 18
0
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent',
                                        seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent',
            seconds=0.3, max_instances=2, max_runs=4, args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Ejemplo n.º 19
0
def schedule():
    logging.info("[+]shivascheduler.py: INSIDE SCHEDULER")
    sched = Scheduler()
    duration = server.shivaconf.getint('analyzer', 'schedulertime')
    sched.add_interval_job(resetcounter, minutes=duration)
    sched.start()
    logging.info("Shiva scheduler, which dumps data into maindb, resets global counter and sends data on hpfeeds, started at %s and would execute every %d minutes " % (datetime.datetime.now(), duration))
Ejemplo n.º 20
0
 def start_scheduler(self):
     global schedulerObj
     schedulerObj = Scheduler()
     schedulerObj.start()
     schedulerObj.add_listener(self.my_listener,
                               EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
     print 'Scheduler is started.'
Ejemplo n.º 21
0
 def __init__(self):
     logging.basicConfig(filename=str(app.config['CONFIG']['logfile']),level=logging.INFO)
     sched = Scheduler(daemon=True)
     sched.start()
     sched.add_interval_job(lambda: self.fetch_report_by_interval(), hours=int(app.config['CONFIG']['scheduler']['hourly_interval']))
     while True:
         pass
Ejemplo n.º 22
0
def schedule(gmclient, opts):
    sched = Scheduler()
    sched.daemonic = False

    @sched.cron_schedule(hour="2-23", minute=00)
    def movie_task():
        task = MovieTask(opts)
        gmclient.submit_job(task)

    @sched.cron_schedule(hour="2-23", minute=10)
    def series_task():
        task = SeriesTask(opts)
        gmclient.submit_job(task)

    @sched.cron_schedule(hour="2-23", minute=30)
    def useries_task():
        task = UpdatingSeriesTask(opts)
        gmclient.submit_job(task)

    @sched.cron_schedule(hour="2-23", minute=00)
    def error_episode_task():
        task = DownloadErrorEpisodeTask(opts)
        gmclient.submit_job(task)

    sched.start()
Ejemplo n.º 23
0
def main():
    sched = Scheduler()
    sched.start()

    c_date = datetime.today()
    date_ = '2013-11-30'
    delay = timedelta(minutes=2)

    all_portfolios = combine_lists(PROGRESS_PORTFOLIOS,FIS_GROUP_PORTFOLIOS)

    # end of week jobs
    #-----------------
    if c_date.weekday == 4:
        # runs at 6pm Friday evening
        sched_date = datetime(c_date.year, c_date.month, c_date.day, 18, 0, 0)
        sched.add_date_job(
            axys_job,
            sched_date,
            [MODEL_PORTFOLIOS,all_portfolios,date_])

    # monthly jobs
    #-------------
    if c_date.day == 1:
        # runs at 10 am
        sched_date = datetime(c_date.year, c_date.month, c_date.day, 10, 0, 0)
        sched.add_date_job(
            axys_job,
            sched_date,
            [MODEL_PORTFOLIOS,all_portfolios,date_])
        sched_date = sched_date + delay

    # keep script 'running' in order to allow the scheduler to stay open and run
    # the jobs added
    time.sleep(60)
    sched.shutdown()
Ejemplo n.º 24
0
def start_scheduler(event):
    sched = Scheduler()
    sched.start()

    sched.add_interval_job(manage_queue,
                           kwargs={'settings': event.app.registry.settings},
                           seconds=2)
Ejemplo n.º 25
0
def callback():
	# Start the scheduler
	sched = Scheduler()
	sched.start()
	alarmMonth = userentermonth.get()
	print len(alarmMonth) #DEBUG
	print alarmMonth #DEBUG
	alarmDay = userenterday.get()
	print len(alarmDay) #DEBUG
	print alarmDay #DEBUG
	alarmYear = userenteryear.get()
	print len(alarmYear) #DEBUG
	print alarmYear #DEBUG
	alarmHour = userenterhour.get()
	print len(alarmHour) #DEBUG
	print alarmHour #DEBUG
	alarmMinute = userenterminute.get()
	print len(alarmMinute) #DEBUG
	print alarmMinute #DEBUG
# 	alarmMonth = 5
# 	alarmDay = 1
# 	alarmHour = 5
# 	alarmMinute = 30
	# add values in here to set lighting variables so that they remain static
	# Schedules job_function for specific date and time
	sched.add_cron_job(AlarmOn, year=alarmYear, month=alarmMonth, day=alarmDay, hour=alarmHour, minute=alarmMinute)
	lbl.configure(text="Alarm Set!")
Ejemplo n.º 26
0
class Scheduler(object):
    """ The Zato's job scheduler. All of the operations assume the data's being
    first validated and sanitized by relevant Zato public API services.
    """
    def __init__(self, singleton=None, init=False):
        self.singleton = singleton
        self.broker_token = None
        self.zmq_context = None
        self.client_push_broker_pull = None
        
        if init:
            self._init()
            
    def _init(self):
        self._sched = APScheduler()
        self._sched.start()
        
    def wait_for_init(self):
        """ Sleeps till the background APScheduler's thread is up and running.
        """
        self._init()
        while not self._sched.running:
            time.sleep(0.01)
        
    def _parse_cron(self, def_):
        minute, hour, day_of_month, month, day_of_week = [elem.strip() for elem in def_.split()]
        return minute, hour, day_of_month, month, day_of_week
        
    def _on_job_execution(self, name, service, extra, broker_msg_type):
        """ Invoked by the underlying APScheduler when a job is executed. Sends
        the actual execution request to the broker so it can be picked up by
        one of the parallel server's broker clients.
        """
        msg = {'action': SCHEDULER.JOB_EXECUTED, 'name':name, 'service': service, 'payload':extra, 'cid':new_cid()}
        self.singleton.broker_client.send(msg)
        
        if logger.isEnabledFor(logging.DEBUG):
            msg = 'Sent a job execution request, name [{0}], service [{1}], extra [{2}]'.format(
                name, service, extra)
            logger.debug(msg)

    def create_edit(self, action, job_data, broker_msg_type=MESSAGE_TYPE.TO_PARALLEL_ANY):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug(job_data)
            
        if not job_data.is_active:
            msg = 'Job [{0}] is not active, not scheduling it'.format(job_data.name)
            logger.info(msg)
            return
        
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)
        
        try:
            handler(job_data, broker_msg_type)
        except Exception, e:
            msg = 'Caught exception [{0}]'.format(format_exc(e))
            logger.error(msg)
Ejemplo n.º 27
0
def run_cron_cleanup(settings):
    '''
    Read cron scheduling entries and schedule
    '''
    cron_time = {}
    year = settings.get("extract.cleanup.schedule.cron.year")
    month = settings.get("extract.cleanup.schedule.cron.month")
    day = settings.get("extract.cleanup.schedule.cron.day")
    week = settings.get("extract.cleanup.schedule.cron.week")
    day_of_week = settings.get("extract.cleanup.schedule.cron.day_of_week")
    hour = settings.get("extract.cleanup.schedule.cron.hour")
    minute = settings.get("extract.cleanup.schedule.cron.minute")
    second = settings.get("extract.cleanup.schedule.cron.second")

    if year is not None:
        cron_time['year'] = year
    if month is not None:
        cron_time['month'] = month
    if day is not None:
        cron_time['day'] = day
    if week is not None:
        cron_time['week'] = week
    if day_of_week is not None:
        cron_time['day_of_week'] = day_of_week
    if hour is not None:
        cron_time['hour'] = hour
    if minute is not None:
        cron_time['minute'] = minute
    if second is not None:
        cron_time['second'] = second

    if len(cron_time) > 0:
        sched = Scheduler()
        sched.start()
        sched.add_cron_job(delete_stats, **cron_time)
Ejemplo n.º 28
0
class Wikipedia(Plugin):
    def __init__(self, skype):
        super(Wikipedia, self).__init__(skype)
        self.daily_channels = ["#stigrk85/$jvlomax;b43a0c90a2592b9b"]
        self.sched = Scheduler()
        self.sched.start()
        self.command = "wikipedia"
        self.sched.add_cron_job(self.dailyWikipedia, hour=18, minute=0, day_of_week="mon-sun")

    def message_received(self, args, status, msg):
        if (len(args) == 1 and args[0] == "random") or not args:
            url = self.fetch_randWiki()
            msg.Chat.SendMessage(url)
        else:
            try:
                page = wiki.wikipedia.page(" ".join(args))
                if page.url:
                    msg.Chat.SendMessage(urllib.unquote(page.url))
                else:
                    msg.Chat.SendMessage("Could not find any results for {}".format(" ".join(args)))
            except wiki.exceptions.DisambiguationError:
                msg.Chat.SendMessage("Your search is disambiguous")
            except wiki.exceptions.PageError:
                 msg.Chat.SendMessage("Could not find any results for {}".format(" ".join(args)))

    def fetch_randWiki(self):
        r = requests.get("http://en.wikipedia.org/wiki/Special:Random")
        return r.url

    def dailyWikipedia(self):
        for channel in self.daily_channels:
            chat = self.skype.Chat(channel)
            chat.SendMessage("Dagens random wikipedia: " + self.fetch_randWiki())
Ejemplo n.º 29
0
    def handle(self, *args, **options):
        sched = Scheduler()
        sched.start()

        from twilio.rest import TwilioRestClient
        account_sid = "AC6fe90756ae4096c5bf790984038a3f32"
        auth_token  = "97e8833ee3553bc4d9d16e86f1865d32"
        client = TwilioRestClient(account_sid, auth_token)

        for user in user_list:
            user_schedule = Schedule.objects.all().filter(user=user)
            for schedule in user_schedule:
                day_of_week = schedule.day_of_week
                hour = schedule.hour
                minute = schedule.minute
                user_message = schedule.message
                print 'BEFORE:' + str(user_message)
                
                def timed_job(msg):
                    print 'AFTER' + str(msg)
                sched.add_cron_job(lambda: timed_job(user_message), second='0-60')

        #sched.start()
        print 'test'
        while True:
            pass
Ejemplo n.º 30
0
class Scheduler(Plugin):

    crons = {}
    intervals = {}
    started = False

    def __init__(self):

        addEvent("schedule.cron", self.cron)
        addEvent("schedule.interval", self.interval)
        addEvent("schedule.remove", self.remove)

        self.sched = Sched(misfire_grace_time=60)
        self.sched.start()
        self.started = True

    def remove(self, identifier):
        for cron_type in ["intervals", "crons"]:
            try:
                self.sched.unschedule_job(getattr(self, cron_type)[identifier]["job"])
                log.debug("%s unscheduled %s", (cron_type.capitalize(), identifier))
            except:
                pass

    def doShutdown(self):
        self.stop()
        return super(Scheduler, self).doShutdown()

    def stop(self):
        if self.started:
            log.debug("Stopping scheduler")
            self.sched.shutdown()
            log.debug("Scheduler stopped")
        self.started = False

    def cron(self, identifier="", handle=None, day="*", hour="*", minute="*"):
        log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))

        self.remove(identifier)
        self.crons[identifier] = {
            "handle": handle,
            "day": day,
            "hour": hour,
            "minute": minute,
            "job": self.sched.add_cron_job(handle, day=day, hour=hour, minute=minute),
        }

    def interval(self, identifier="", handle=None, hours=0, minutes=0, seconds=0):
        log.info(
            "Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s", (identifier, hours, minutes, seconds)
        )

        self.remove(identifier)
        self.intervals[identifier] = {
            "handle": handle,
            "hours": hours,
            "minutes": minutes,
            "seconds": seconds,
            "job": self.sched.add_interval_job(handle, hours=hours, minutes=minutes, seconds=seconds),
        }
Ejemplo n.º 31
0
class SayulitaMain(threading.Thread):

	def __init__(self):
		threading.Thread.__init__(self)

		self.speaker = Synthetizer("festival", "spanish")
		self.scheduler = Scheduler()
		self.scheduler.start()
	
	def initialization(self):
		self.speaker.speechit("Aqui proyecto Sayulita operado por x e uno gol yanqui quebec")
		self.speaker.speechit("Estacion experimental de texto a voz")

	def features(self):
		self.clock = Clock()
		self.news = FeedParserRss()

	def logging(self, command):
		if command == 'start':
			logging.basicConfig(filename='sayulita.log', filemode='w', level=logging.INFO)
			logging.basicConfig(format='%(asctime)s %(message)s')
			logging.info('Started')

	def scheduling(self):
		#self.scheduler.interval_schedule(seconds=1)
		#self.clock.clockget()
		self.news.getitems
		self.scheduler.add_cron_job(self.clock.clockget,month='*',day='*',hour='*',minute ='*',second='0')
		self.scheduler.add_cron_job(self.initialization,month='*',day='*',hour='*',minute ='*',second='15')
                self.scheduler.add_cron_job(self.news.getitems,month='*',day='*',hour='*',minute ='15,30,45',second='15')

		self.scheduler.print_jobs()
Ejemplo n.º 32
0
def schedule(file_name, n_jobs, frequency):
    '''Schedule the scraper to execute every hour and shut it down after a
       certain number of jos have been run'''

    # Create a default logger
    basicConfig()

    # Run the first job
    scrape(file_name)

    # Instantiate the scheduler
    sched = Scheduler()

    # Start it
    sched.start()

    # Schedule the function
    sched.add_interval_job(scrape,
                           args=[file_name],
                           minutes=frequency,
                           misfire_grace_time=60)

    # Wait to run n_jobs (assuming 1 job per hour, which is 3600 seconds)
    sleep(n_jobs * 3600)

    # Shutdown the scheduler
    sched.shutdown()
Ejemplo n.º 33
0
def app(global_config, **settings):
    """ This function returns a WSGI application.
    
    It is usually called by the PasteDeploy framework during 
    ``paster serve``.
    """
    zcml_file = settings.get('configure_zcml', 'configure.zcml')
    db_string = settings.get('db_string')
    if db_string is None:
        raise ValueError("No 'db_string' value in application configuration.")
    initialize_sql(db_string)
    config = Configurator(root_factory=get_root, settings=settings)
    config.begin()
    config.load_zcml(zcml_file)
    config.end()
    # Ugly hack to configure the MapperExtension with the settings.
    removal_extension.path = settings.get('upload_directory')
    
    scheduler = Scheduler()
    # Send out queued mails
    from eportfolio.utilities.mail_delivery import trigger_queued_delivery
    scheduler.add_interval_job(trigger_queued_delivery, seconds=30)
    scheduler.start()
    
    return config.make_wsgi_app()
Ejemplo n.º 34
0
 def load(self):
     sched = Scheduler()
     sched.daemonic = False
     # Schedules job_function to be run on the third Friday
 # of June, July, August, November and December at 00:00, 01:00, 02:00 and 03:00
     sched.add_cron_job(self.job_function, second='*/3')
     sched.start()
Ejemplo n.º 35
0
class BlueprintHandler:
    setter_blacklist = []
    getter_blacklist = []

    def __init__(self, blueprint, testing=False, testing_count=10):
        self.blueprint = blueprint
        self.testing = testing
        self.testing_count = testing_count
        self.scheduler = Scheduler()

    def do_step(self):
        print "stepping"
        try:
            # Fetch any outstanding events from the engine process and execute in simulator
            while not self.local_queue.empty():
                action = self.local_queue.get()
                try:
                    self.blueprint.interface.set(action[0], float(action[1]))
                    print "Received action:", action
                except exceptions.ValueError:
                    print "Value '" + str(action[1]) + "' is not convertable to float"

            points = self.blueprint.interface.get_getters()

            self.blueprint.step(stepcount=int(1 / 0.1))

            g = {}
            for point in points:
                if point in BlueprintHandler.getter_blacklist:
                    continue
                g[point] = self.blueprint.interface.get(point)

            for k in g.keys():
                m = Measurement()
                m.bid = self.blueprint.building.buildingID
                m.timestamp = datetime.utcnow().replace(tzinfo=utc)
                m.uuid = k
                m.val = g[k]
                m.save()
        except:
            # print 'error: ', sys.exc_info()
            print "trace: ", traceback.print_exc()

    def init_scheduler(self):
        schedule_store = RAMJobStore()

        # Write data every 15 seconds.
        job_second = self.scheduler.add_interval_job(self.do_step, 0, 0, 0, 0, 15)

        schedule_store.add_job(job_second)

        self.scheduler.add_jobstore(schedule_store, "Simulator scheduler", quiet=False)

    def start(self, queue=None):
        self.local_queue = queue
        self.init_scheduler()
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
Ejemplo n.º 36
0
class SimpleScheduler:
    def __init__(self):
        self._sched = Scheduler()
        self._sched.start()
        self._jobs = {}

    def schedule(self, job):
        if job.name in self._jobs:
            logger.warn("Already have job with name: %s" % job.name)
            return False

        try:
            self._sched.add_cron_job(job._execute_and_store, **job.schedule)
        except TypeError:
            logger.error("Invalid schedule for job with name: %s" % job.name +
                         " schedule: %s" % job.schedule)
        self._jobs[job.name] = job
        return True

    def schedules(self):
        return {job.name: job.schedule for job in self._jobs.values()}

    def execute(self, name):
        return self._sched.add_date_job(self._jobs[name]._execute_and_store,
                                        datetime.now() + timedelta(seconds=1))
Ejemplo n.º 37
0
def watchlist(request):
#Handle file upload
	global location
	if 'import' in request.POST:
		form = DocumentForm(request.POST, request.FILES)
		if form.is_valid():
			newdoc = Document(docfile = request.FILES['docfile'])
			newdoc.save()
			location = newdoc.path()
			# Redirect to the document list after POST
			return HttpResponseRedirect(reverse('scraper.views.watchlist'))
	else:
		form = DocumentForm() # An empty, unbound form

	if 'match' in request.POST:
		call_command('readfile', location)

		sched = Scheduler(standalone = True)
		def match_sched():
			call_command('readfile', location)
		sched.add_interval_job(match_sched, seconds = 20, max_instances = 1000)
		sched.start()
		

	# Load documents for the list page
	documents = Document.objects.all()

	# Render list page with the documents and the form
	return render_to_response(
		'scraper/index2.0.html',
		{'documents': documents, 'form' : form},
		context_instance = RequestContext(request)
	)
Ejemplo n.º 38
0
def scheduleNotification(username, password, receivers, subject, message, attachments, timestring):
    logging.basicConfig()
    scheduler = Scheduler()
    scheduler.start()
    sentOn = datetime.datetime.strptime(timestring,"%Y-%m-%dT%H:%M")
    scheduler.add_date_job(emailUser,sentOn,[username,password,receivers.split(","),subject,message,attachments])
    atexit.register(lambda:scheduler.shutdown(wait=False))
Ejemplo n.º 39
0
def main():

	config_file = "config.xml"
	xml = ET.parse(config_file)
	HOST_NAME = xml.find('host_name').text
	DB_NAME = xml.find('db_name').text
	DISPLAY_ADDR = xml.find("display_addr").text
	DISPLAY_PORT = int(xml.find('display_port').text)
	ENDPOINT = xml.find("endpoint").text
	SECONDS = int(xml.find("seconds").text)
	USERNAME = xml.find("username").text
	PASSWORD = xml.find("password").text

	HOST = 'http://'+ USERNAME+ ':'+ PASSWORD + '@'+ HOST_NAME

	print "starting ..."
	
	#print HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT
	#readData(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT)
	display = LedDisplay(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT, ENDPOINT)
	# Start the scheduler
	sched = Scheduler()

	sched.add_interval_job(display.query, seconds=SECONDS )
	sched.start()

	try: 
		while True:
			time.sleep(0.1)

	except KeyboardInterrupt:
		print "terminating"
		sched.shutdown()
Ejemplo n.º 40
0
 def schedule(self):
     sched = Scheduler()
     sched.add_interval_job(self.resetcounter, minutes=self.duration)
     sched.start()
     logging.info(
         "Artemis scheduler, which resets global counter and sends data on hpfeeds, started at %s, executes every %d minutes "
         % (datetime.datetime.now(), self.duration))
Ejemplo n.º 41
0
 def pre_eva_start(self, conf):
     # Load all jobs
     self.invoke('pre_scheduler_load_jobs')
     sched = APScheduler()
     conf['scheduler']['scheduler'] = sched
     for job_name in conf['scheduler']['jobs']:
         job = conf['scheduler']['jobs'][job_name]
         if job.get('type') == 'date':
             # datetime is a datetime string in this case
             # ie: '2012-11-06 14:25:10.8880'
             sched.add_date_job(job['func'], job['datetime'], args=[conf])
         elif job.get('type') == 'interval':
             sched.add_interval_job(job['func'],
                                    seconds=job['interval'].get('seconds', 0),
                                    minutes=job['interval'].get('minutes', 0),
                                    hours=job['interval'].get('hours', 0),
                                    days=job['interval'].get('days', 0),
                                    weeks=job['interval'].get('weeks', 0),
                                    start_date=job['interval'].get('start_date'),
                                    args=[conf])
         elif job.get('type') == 'cron':
             sched.add_cron_job(job['func'],
                                second=job['interval'].get('second'),
                                minute=job['interval'].get('minute'),
                                hour=job['interval'].get('hour'),
                                day=job['interval'].get('day'),
                                week=job['interval'].get('week'),
                                month=job['interval'].get('month'),
                                year=job['interval'].get('year'),
                                day_of_week=job['interval'].get('day_of_week'),
                                args=[conf])
     sched.start()
     self.invoke('post_scheduler_load_jobs')
Ejemplo n.º 42
0
    def add_job(self, command, hour, minute, sec=0):

        logger.info("2. scheduler adding job command: %s at %s:%s:%s" % (
            command, hour, minute, sec
        ))
        sched = Scheduler(standalone=True)

        #make a db file
        shelve.open(
            os.path.join(
                os.path.dirname(__file__),
                'example.db'
            )
        )
        sched.add_jobstore(ShelveJobStore('example.db'), 'shelve')

        exec_time = datetime(
            date.today().year,
            date.today().month,
            date.today().day,
            int(hour),
            int(minute),
            int(sec)
        )
        #test
        #exec_time = datetime.now() + timedelta(seconds=5)

        sched.add_date_job(
            job,
            exec_time,
            name='alarm',
            jobstore='shelve',
            args=[command]
        )
        sched.start()
Ejemplo n.º 43
0
class FreshPots(BotPlugin):

    pots = [
        'http://i.imgur.com/Q2b54vc.jpg',
        'http://i.imgur.com/SYsdsew.jpg',
        'http://i.imgur.com/caIbQMh.png',
        'http://i.imgur.com/MCwiikl.jpg',
        'http://i.imgur.com/g4sFHwz.jpg',
        'http://i.imgur.com/vnuJQ4S.gif',
        'http://i.imgur.com/cm3Y6jX.jpg',
        'http://i.imgur.com/ZcKZTFU.jpg',
        'http://i.imgur.com/4mEaNIp.jpg',
        'http://i.imgur.com/gDukRFu.png',
        'http://i.imgur.com/1MDO9fV.png',
        'http://i.imgur.com/U5cFX3M.jpg'
    ]

    def activate(self):
        super(FreshPots, self).activate()
        self.sched = Scheduler(coalesce=True)
        self.sched.start()
        self.sched.add_cron_job(
            self.fresh_pots,
            kwargs={'message': 'fresh pots time'},
            day_of_week='mon-fri',
            hour=11)
        self.sched.add_cron_job(
            self.fresh_pots,
            kwargs={'message': 'fresh pots time'},
            day_of_week='mon-fri',
            hour=15)
        logging.info(self.sched.get_jobs())

    def callback_message(self, conn, mess):
        body = mess.getBody().lower()
        if body.find('coffee') != -1 or body.find('fresh pots') != -1:
            self.fresh_pots(mess.getFrom())

    def fresh_pots(self, channel='#cloudant-bristol', message=None):
        if message:
            self.send(
                channel,
                message,
                message_type='groupchat'
            )

        self.send(
            channel,
            choice(self.pots),
            message_type='groupchat'
        )
        self.check()

    def check(self):
        for job in self.sched:
            delta = job.next_run_time - datetime.now()
            hour_delta = timedelta(seconds=3600)
            if delta < hour_delta:
                job.compute_next_run_time(datetime.now() + hour_delta)
Ejemplo n.º 44
0
def create_app(configFile):
    crowdsourcingWebApp = CrowdsourcingWeb(configFile)

    apschedulerObj = Scheduler()
    apschedulerObj.start()
    #apschedulerObj.add_cron_job(clean_expire_function,year="*", month="*", day="*", hour="*",minute="*",second="*",args=[crowdsourcingWebApp]);

    return crowdsourcingWebApp
Ejemplo n.º 45
0
def run_scheduler(app):
    scheduler = Scheduler()

    expire_wrapper = lambda: expire_sessions(app)
    scheduler.add_interval_job(expire_wrapper, seconds=EXPIRE_SESSION_EVERY)

    scheduler.start()
    print "APScheduler started successfully"
Ejemplo n.º 46
0
def schedule():
    sched = Scheduler()
    duration = server.shivaconf.getint('analyzer', 'schedulertime')
    sched.add_interval_job(resetcounter, minutes=duration)
    sched.start()
    logging.info(
        "Shiva scheduler, which dumps data into maindb, resets global counter and sends data on hpfeeds, started at %s and would execute every %d minutes "
        % (datetime.datetime.now(), duration))
Ejemplo n.º 47
0
def initialize():
    apsched = Scheduler()
    apsched.start()

    apsched.add_interval_job(syncFacebookEvents, seconds=10)
    print "added job 1"
    apsched.add_interval_job(syncSharedEvents, seconds=10)
    print "added job 2"
Ejemplo n.º 48
0
def main():
    fpid = os.fork()
    if fpid != 0:
        # Running as daemon now. PID is fpid
        sys.exit(0)
    sched = Scheduler()
    sched.add_cron_job(report, minute='*/10')
    sched.start()
    signal.pause()
Ejemplo n.º 49
0
def start_schedules(config):
    sched = Scheduler()
    config.scheduler = sched
    sched.start()
    for repo in config.repositories:
        for branch in repo.branches:
            if branch.schedule:
                sched.add_cron_job(tester(config, repo, branch),
                                   **branch.schedule)
Ejemplo n.º 50
0
def daemon():
    worker_minutes = cfg.get("job.switch_cfg_persistence", "run_on_minutes")
    logger.info("Starting persistence-control daemon...")
    logger.info("The persistence-control worker will run on minutes '%s'..." %
                worker_minutes)
    sched = Scheduler()
    sched.add_cron_job(worker, minute=worker_minutes)
    sched.start()
    signal.pause()
Ejemplo n.º 51
0
def last_tag_update():
    cron = Scheduler(daemon=True)
    cron.start()

    @cron.interval_schedule(minutes=2)
    def job_function():
        last_tag_get_update()

    atexit.register(lambda: cron.shutdown(wait=False))
Ejemplo n.º 52
0
 def bootstrap(self):
     """Bootstraps the static inventory writer background job"""
     self.write_static_inventory()
     logging.basicConfig()
     interval = self.config['interval']
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(self.write_static_inventory,
                             seconds=interval)
Ejemplo n.º 53
0
 def bootstrap(self):
     """Bootstraps the static inventory writer background job"""
     self.rm_sitemap_files(Source.STATIC_FILE_PATH)
     self.write_static_inventory()
     logging.basicConfig()
     interval = self.config['interval']
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(self.write_static_inventory, seconds=interval)
Ejemplo n.º 54
0
  def wrap(**options):
    sched = Scheduler()
    sched.start()
    seconds = 5
    interval = timedelta(seconds = seconds)
    #trigger = RandomizedIntervalTrigger(interval, randomize = lambda : uniform(seconds / 8, seconds / 4))
    trigger = RandomizedIntervalTrigger(interval)

    sched.add_job(trigger, func, None, None, **{} )
Ejemplo n.º 55
0
def create_app(object_name):
    app = Flask(__name__)
    app.config.from_object(object_name)
    cors = CORS(app,
                resources={r"/*": {
                    "origins": "*"
                }},
                allow_headers=['Authorization', 'Content-Type'])
    db.init_app(app)

    cron = Scheduler(daemon=True)
    cron.start()

    @app.route('/')
    def index():
        return render_template("index.html")

    # Importing Blueprints
    from controllers.cas import blueprint_cas

    # Registering blueprints
    app.register_blueprint(blueprint_cas)

    # Custom HTTP error handlers
    @app.errorhandler(400)
    def custom_400(error):
        print error
        return jsonify(message=error.description['message']), 400

    @app.errorhandler(401)
    def custom_401(error):
        return jsonify(message=error.description['message']), 401

    @app.errorhandler(403)
    def custom_403(error):
        return jsonify(message=error.description['message']), 403

    @app.errorhandler(404)
    def custom_404(error):
        return jsonify(message="Item or resource not found"), 404

    @app.errorhandler(405)
    def custom_405(error):
        return jsonify(message="Not allowed"), 405

    @app.errorhandler(500)
    def custom_500(error):
        return jsonify(message=error.description['message']), 500

    #@app.errorhandler(Exception)
    #def unhandled_exception(e):
    #    return jsonify(message=str(e)),500

    # Shutdown your cron thread if the web process is stopped
    atexit.register(lambda: cron.shutdown(wait=False))

    return app