Exemple #1
0
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent',
                                        seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent',
            seconds=0.3, max_instances=2, max_runs=4, args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Exemple #2
0
class BlueprintHandler:
    setter_blacklist = []
    getter_blacklist = []

    def __init__(self, blueprint, testing=False, testing_count=10):
        self.blueprint = blueprint
        self.testing = testing
        self.testing_count = testing_count
        self.scheduler = Scheduler()

    def do_step(self):
        print "stepping"
        try:
            # Fetch any outstanding events from the engine process and execute in simulator
            while not self.local_queue.empty():
                action = self.local_queue.get()
                try:
                    self.blueprint.interface.set(action[0], float(action[1]))
                    print "Received action:", action
                except exceptions.ValueError:
                    print "Value '" + str(action[1]) + "' is not convertable to float"

            points = self.blueprint.interface.get_getters()

            self.blueprint.step(stepcount=int(1 / 0.1))

            g = {}
            for point in points:
                if point in BlueprintHandler.getter_blacklist:
                    continue
                g[point] = self.blueprint.interface.get(point)

            for k in g.keys():
                m = Measurement()
                m.bid = self.blueprint.building.buildingID
                m.timestamp = datetime.utcnow().replace(tzinfo=utc)
                m.uuid = k
                m.val = g[k]
                m.save()
        except:
            # print 'error: ', sys.exc_info()
            print "trace: ", traceback.print_exc()

    def init_scheduler(self):
        schedule_store = RAMJobStore()

        # Write data every 15 seconds.
        job_second = self.scheduler.add_interval_job(self.do_step, 0, 0, 0, 0, 15)

        schedule_store.add_job(job_second)

        self.scheduler.add_jobstore(schedule_store, "Simulator scheduler", quiet=False)

    def start(self, queue=None):
        self.local_queue = queue
        self.init_scheduler()
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
Exemple #3
0
    def add_job(self, command, hour, minute, sec=0):

        logger.info("2. scheduler adding job command: %s at %s:%s:%s" % (
            command, hour, minute, sec
        ))
        sched = Scheduler(standalone=True)

        #make a db file
        shelve.open(
            os.path.join(
                os.path.dirname(__file__),
                'example.db'
            )
        )
        sched.add_jobstore(ShelveJobStore('example.db'), 'shelve')

        exec_time = datetime(
            date.today().year,
            date.today().month,
            date.today().day,
            int(hour),
            int(minute),
            int(sec)
        )
        #test
        #exec_time = datetime.now() + timedelta(seconds=5)

        sched.add_date_job(
            job,
            exec_time,
            name='alarm',
            jobstore='shelve',
            args=[command]
        )
        sched.start()
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=0.3, max_instances=2, max_runs=4,
                                        args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Exemple #5
0
class Sheduler:
    def __init__(self):
        self.scheduler = Scheduler(standalone=True)
        self.scheduler.add_jobstore(MongoDBJobStore('mesjobs'),
                                    'MongoDB')

    def start(self):
        print('Starting the sheduler')
        try:
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemple #6
0
def start( config={} ):

	# params init
	mongo_host = config.get('host', SETTING['host'])
	mongo_port = config.get('port', SETTING['port'])

	db = pymongo.Connection(mongo_host, mongo_port)  
	store = MongoDBJobStore(connection=db) 

	# create schedudler and run
	scheduler = Scheduler(daemonic=False)
	scheduler.start()
	scheduler.add_jobstore(store, 'mongo') 

	# add cron jobs
	scheduler.add_cron_job(monitor_cron_job, hour='0-23', minute="0", second="0", jobstore='mongo')
def restart_file_schedule():

    scheduler = Scheduler(daemonic = False)

    scheduler.print_jobs()

    #scheduler.remove_jobstore('file',close=True)

    #scheduler.shutdown(wait=False)

    scheduler.add_jobstore(ShelveJobStore('/tmp/db_schedule'), 'file')

    scheduler.start()

    print 'success!'

    scheduler.print_jobs()
Exemple #8
0
def main():
    sched = Scheduler()
    # mysql_engine = create_engine('mysql://root:@localhost:3306/fengine?charset=utf8',encoding = "utf-8",echo =True)
    mysql_engine = get_db_engine()
    sched.daemonic = False
    print "Starting index engine......"
    job_store = SQLAlchemyJobStore(engine=mysql_engine)
    sched.add_jobstore(job_store, 'default')

    list_spider_job(sched)  #将Spider的任务加入队列
    scan_engine_job(sched)  #将主索引服务加入任务队列

    # sched.add_cron_job(scan_loan_items_job,hour='*', minute='*', second='5')
    #将索引Job加入到调度系统,按照每5分钟的频率启动
    # engine_name = 'engine.py'
    # python_loc = os.path.join(os.getcwd(), engine_name)
    # sched.add_interval_job(python_job_func, seconds =5, name = engine_name, args = [python_loc])
    # list_spider_job(sched)
    sched.start()
Exemple #9
0
def main():
	sched = Scheduler()  
	# mysql_engine = create_engine('mysql://root:@localhost:3306/fengine?charset=utf8',encoding = "utf-8",echo =True)
	mysql_engine = get_db_engine()
	sched.daemonic = False  
	print "Starting index engine......"
	job_store = SQLAlchemyJobStore(engine = mysql_engine)
	sched.add_jobstore(job_store, 'default')

	list_spider_job(sched) #将Spider的任务加入队列
	scan_engine_job(sched) #将主索引服务加入任务队列

	# sched.add_cron_job(scan_loan_items_job,hour='*', minute='*', second='5')  
	#将索引Job加入到调度系统,按照每5分钟的频率启动
	# engine_name = 'engine.py'
	# python_loc = os.path.join(os.getcwd(), engine_name)
	# sched.add_interval_job(python_job_func, seconds =5, name = engine_name, args = [python_loc])
	# list_spider_job(sched)
	sched.start()
Exemple #10
0
def start(config={}):

    # params init
    mongo_host = config.get('host', SETTING['host'])
    mongo_port = config.get('port', SETTING['port'])

    db = pymongo.Connection(mongo_host, mongo_port)
    store = MongoDBJobStore(connection=db)

    # create schedudler and run
    scheduler = Scheduler(daemonic=False)
    scheduler.start()
    scheduler.add_jobstore(store, 'mongo')

    # add cron jobs
    scheduler.add_cron_job(monitor_cron_job,
                           hour='0-23',
                           minute="0",
                           second="0",
                           jobstore='mongo')
Exemple #11
0
class MyScheduler:

    EVENTS = {
        '1': 'EVENT_SCHEDULER_START',
        '2': 'EVENT_SCHEDULER_SHUTDOWN',
        '3': 'EVENT_JOBSTORE_ADDED',
        '4': 'EVENT_JOBSTORE_REMOVED',
        '5': 'EVENT_JOBSTORE_JOB_ADDED',
        '32': 'EVENT_JOBSTORE_JOB_REMOVED',
        '64': 'EVENT_JOB_EXECUTED',
        '128': 'EVENT_JOB_ERROR',
        '256': 'EVENT_JOB_MISSED'
    }

    def __init__(self, db_path='sqlite:///scheduler.db'):
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(SQLAlchemyJobStore(url=db_path), 'default')

    def start(self):
        self.scheduler.start()

    def add_job(self, job, date, args):
        job = self.scheduler.add_date_job(job, date, args)
        print job

    def jobs(self):
        return self.scheduler.get_jobs()

    def remove_job(self, notfication_id):
        jobs = self.jobs()
        for job in jobs:
            if int(job.args[0]) == int(notfication_id):
                self.scheduler.unschedule_job(job)
                return True
        return False

    def shutdown(self):
        self.scheduler.shutdown()
def start_schedule():

#if __name__ == '__main__':

    
    scheduler_pl = Scheduler(daemonic = False)

    scheduler_pl.print_jobs()


    scheduler_pl.shutdown()

    
    scheduler_pl.add_jobstore(ShelveJobStore('/tmp/db_pl_schedule'), 'file')

    v_current_jobs = scheduler_pl.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客


        scheduler_pl.unschedule_func(upload_processlist) 

    scheduler_pl.add_interval_job(upload_processlist, minutes=1)
            



    scheduler_pl.start()

    print 'success!'

    scheduler_pl.print_jobs()

    '''
Exemple #13
0
from apscheduler.util import convert_to_datetime, timedelta_seconds
import sys
from datetime import *
import time
from apscheduler.scheduler import Scheduler 
from apscheduler.jobstores.sqlalchemy_store import  SQLAlchemyJobStore
import subprocess,os
from cron.models import Job_Info
from django.db import transaction
from django.views.generic import ListView
from tool.database import SQLITE_ADDRESS


scheduler=Scheduler()
scheduler.daemonic=False
scheduler.add_jobstore(SQLAlchemyJobStore(SQLITE_ADDRESS),'schedulerjobs')
scheduler.start()

class IndexView(View):
    #template_name = 'cron_index.html'
    def get(self,request,*args,**kwargs):
        return render_to_response('cron_index.html')
indexView = IndexView.as_view()

class ListJobView(ListView):
    paginate_by = 5
    context_object_name = 'instances'
    queryset = Job_Info.objects.all()
    template_name = 'job_list.html'
listjobView = ListJobView.as_view()
Exemple #14
0
class EventScheduler():
    logging.basicConfig()
    """Class to scheduler regular events in a similar manner to cron."""
    __mysql_url = 'mysql+pymysql://powermonitor:%s@localhost/powermonitor' \
                  % str(base64.b64decode(bytes('cDB3M3JtMG4xdDBy')))
    '''This determines the number of seconds after the designated run time that the job is still allowed to be run.
    If jobs are not being run, try increasing this in increments of 1.'''
    __GRACE_PERIOD = 31536000  # Amazing grace! Time in seconds before the job is considered misfired. Currently a year
    __COALESCE = True  # Force the job to only run once instead of retrying multiple times
    '''If there is a problem with thread concurrency, play around with these values. You'd think with all these threads
    in the pool that the filter would get clogged up!'''
    __threadpool_corethreads = 0  # Maximum number of persistent threads in the pool
    __threadpool_maxthreads = 20  # Maximum number of total threads in the pool
    __threadpool_keepalive = 1  # Seconds to keep non-core worker threads in the pool

    def __init__(self, start=True):
        try:
            config = {
                'apscheduler.daemon': True,
                'apscheduler.standalone': False,
                'apscheduler.threadpool.core_threads':
                self.__threadpool_corethreads,
                'apscheduler.threadpool.max_threads':
                self.__threadpool_maxthreads,
                'apscheduler.threadpool.keepalive':
                self.__threadpool_keepalive,
                'apscheduler.coalesce': self.__COALESCE
            }
            self.__sched = Scheduler(config)
            '''Add the SQLAlchemy job store as the default. This was surprisingly far less tedious than getting the
            shelve job store working.'''
            self.__sched.add_jobstore(
                SQLAlchemyJobStore(url=self.__mysql_url, tablename='SCHEDULE'),
                'default')
            atexit.register(lambda: self.__sched.shutdown(wait=False)
                            )  # Stop the scheduler when the program exits
            if start:
                self.__sched.start()
        except KeyError:
            logging.warning('An error occurred starting the scheduler.')

    def start_scheduler(self):
        self.__sched.start()

    def add_cron_event(self,
                       func,
                       name,
                       year=None,
                       month=None,
                       week=None,
                       day=None,
                       day_of_week=None,
                       hour=None,
                       minute=None,
                       second=None,
                       start_date=None,
                       *args,
                       **kwargs):
        """Add a cron like event to the schedule. Each job must be given a name in case it needs to be removed.
        The following expressions can be used in each field:
        Expression  Field   Description
        *           any     Fire on every value
        */a         any     Fire on every 'a' values, starting from the minimum
        a-b         any     Fire on any value in the 'a-b' range (a must be smaller than b
        a-b/c       any     Fire every 'c' values within the 'a-b' range
        xth y       day     Fire on the x -th occurrence of weekday y within the month
        last x      day     Fire on the last occurrence of weekday 'x' within the month
        last        day     Fire on the last day within the month
        x,y,z       any     Fire on any matching expression; can combine any number of any of the above expressions

        If you want to add **options to the event, use kwargs (keyword arguments dictionary)"""
        if self.__sched is not None:
            event_exists = False
            if self.__find_event(name) is not None:
                event_exists = True
            if not event_exists:
                self.__sched.add_cron_job(
                    func=func,
                    name=name,
                    year=year,
                    month=month,
                    day=day,
                    week=week,
                    day_of_week=day_of_week,
                    hour=hour,
                    minute=minute,
                    second=second,
                    start_date=start_date,
                    args=args,
                    kwargs=kwargs,
                    misfire_grace_time=self.__GRACE_PERIOD)
                logging.info('New cron event added')
            else:
                '''Every event needs a unique name so we can keep track of the little bastards. And please use
                descriptive names so that they can be properly identified in the job schedule.'''
                logging.warning('add_cron_event: Event already exists')
                raise EventExistsError('A job with name %s already exists' %
                                       name)
        else:
            raise SchedulerNotFoundError(
                'add_cron_event: Scheduler does not exist. It may have not started.'
            )

    def __find_event(self, event_name):
        if self.__sched is not None:
            events = self.__sched.get_jobs()
            for event in events:
                if event.name == event_name:
                    return event
                else:
                    return None
        else:
            logging.warning(
                '__find_event: Scheduler does not exist. It may have not started.'
            )
            raise SchedulerNotFoundError(
                'Scheduler does not exist. It may have not started.')

    def add_onceoff_event(self, func, name, date, args=None):
        """Add a once off event to the schedule. The job is executed once at the specified date and time.
        Date/time format: YYYY-MM-DD HH:MM:SS"""
        if self.__sched is not None:
            try:
                if args is None:  # If there are no arguments to be passed to the function
                    self.__sched.add_date_job(
                        func=func,
                        name=name,
                        date=date,
                        misfire_grace_time=self.__GRACE_PERIOD)
                else:  # If there are arguments to be passed to the function
                    self.__sched.add_date_job(
                        func=func,
                        name=name,
                        date=date,
                        arge=args,
                        misfire_grace_time=self.__GRACE_PERIOD)
            except ValueError:
                '''If the event is in the past, it will not run. This program is not capable of manipulating
                space and time. Try import __time_travel__'''
                raise EventWontRunError(
                    'The event will not run: Event time has expired.')
            logging.info('New once off event added')
        else:
            logging.warning(
                'add_onceoff_event: Scheduler does not exist. It may have not started.'
            )
            raise SchedulerNotFoundError(
                'Scheduler does not exist. It may have not started.')

    def remove_event(self, event_name):
        """Remove the event 'event_name' from the schedule."""
        if self.__sched is not None:
            removed = False
            event = self.__find_event(event_name=event_name)
            if event is not None:  # If the event exists, remove it
                self.__sched.unschedule_job(event)
                removed = True
            if not removed:
                '''Raise an error so that it can be handled correctly'''
                logging.warning('remove_event: Event not found for removal.')
                raise EventNotFoundError('Event not found for removal: %s' %
                                         event_name)
        else:
            raise SchedulerNotFoundError(
                'remove_event: Scheduler does not exist. It may have not started.'
            )

    def get_jobs(self):
        """Get the list of events currently in the job store."""
        if self.__sched is not None:
            return self.__sched.get_jobs()
        else:
            raise SchedulerNotFoundError(
                'get_events: Scheduler does not exist. It may have not started.'
            )

    def get_job_names(self):
        """
        Get the names of all the jobs in the job store
        :return: list
        """
        jobs = self.get_jobs()
        job_list = []
        if jobs:
            for job in jobs:
                job_list.append(job.name)
        return job_list

    def get_scheduler(self):
        """Returns the Scheduler object. Rather add functionality to this class than call this method."""
        if self.__sched is not None:
            return self.__sched
        else:
            raise SchedulerNotFoundError(
                'get_scheduler: Scheduler does not exist. It may have not started.'
            )
Exemple #15
0
from datetime import datetime, timedelta

from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
from sqlalchemy import create_engine
import logging
logging.basicConfig()
def alarm(time):
    print('Alarm! This alarm was scheduled at %s.' % time)


if __name__ == '__main__':
    engine = create_engine('sqlite:///example3.db')
    scheduler = Scheduler(standalone=True)
    scheduler.add_jobstore(SQLAlchemyJobStore(engine=engine), 'shelve')
    alarm_time = datetime.now() + timedelta(seconds=100)
    scheduler.add_cron_job(alarm, 
                           jobstore='shelve', second="*",minute="1")
    print('To clear the alarms, delete the example.db file.')
    print('Press Ctrl+C to exit')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #16
0
This example demonstrates the use of persistent job stores.
On each run, it adds a new alarm that fires after ten seconds.
You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active.
"""

from datetime import datetime, timedelta

from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore


def alarm(time):
    print('Alarm! This alarm was scheduled at %s.' % time)


if __name__ == '__main__':
    scheduler = Scheduler(standalone=True)
    scheduler.add_jobstore(ShelveJobStore('example.db'), 'shelve')
    alarm_time = datetime.now() + timedelta(seconds=10)
    scheduler.add_date_job(alarm,
                           alarm_time,
                           name='alarm',
                           jobstore='shelve',
                           args=[datetime.now()])
    print('To clear the alarms, delete the example.db file.')
    print('Press Ctrl+C to exit')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
class EventScheduler():
    """Class to scheduler regular events in a similar manner to cron."""
    __mysql_url = 'mysql+pymysql://powermonitor:%s@localhost/powermonitor' \
                  % str(base64.b64decode(bytes('cDB3M3JtMG4xdDBy')))
    '''This determines the number of seconds after the designated run time that the job is still allowed to be run.
    If jobs are not being run, try increasing this in increments of 1.'''
    __GRACE_PERIOD = 31536000  # Amazing grace! Time in seconds before the job is considered misfired. Currently a year
    __COALESCE = True   # Force the job to only run once instead of retrying multiple times
    '''If there is a problem with thread concurrency, play around with these values. You'd think with all these threads
    in the pool that the filter would get clogged up!'''
    __threadpool_corethreads = 0    # Maximum number of persistent threads in the pool
    __threadpool_maxthreads = 20    # Maximum number of total threads in the pool
    __threadpool_keepalive = 1      # Seconds to keep non-core worker threads in the pool

    def __init__(self, start=True):
        try:
            config = {'apscheduler.daemon': True, 'apscheduler.standalone': False,
                      'apscheduler.threadpool.core_threads': self.__threadpool_corethreads,
                      'apscheduler.threadpool.max_threads': self.__threadpool_maxthreads,
                      'apscheduler.threadpool.keepalive': self.__threadpool_keepalive,
                      'apscheduler.coalesce': self.__COALESCE}
            self.__sched = Scheduler(config)
            '''Add the SQLAlchemy job store as the default. This was surprisingly far less tedious than getting the
            shelve job store working.'''
            self.__sched.add_jobstore(SQLAlchemyJobStore(url=self.__mysql_url, tablename='SCHEDULE'), 'default')
            atexit.register(lambda: self.__sched.shutdown(wait=False))  # Stop the scheduler when the program exits
            if start:
                self.__sched.start()
        except KeyError:
            logging.warning('An error occurred starting the scheduler.')

    def start_scheduler(self):
        self.__sched.start()

    def add_cron_event(self, func, name, year=None, month=None, week=None, day=None,
                       day_of_week=None, hour=None, minute=None, second=None, start_date=None, *args,
                       **kwargs):
        """Add a cron like event to the schedule. Each job must be given a name in case it needs to be removed.
        The following expressions can be used in each field:
        Expression  Field   Description
        *           any     Fire on every value
        */a         any     Fire on every 'a' values, starting from the minimum
        a-b         any     Fire on any value in the 'a-b' range (a must be smaller than b
        a-b/c       any     Fire every 'c' values within the 'a-b' range
        xth y       day     Fire on the x -th occurrence of weekday y within the month
        last x      day     Fire on the last occurrence of weekday 'x' within the month
        last        day     Fire on the last day within the month
        x,y,z       any     Fire on any matching expression; can combine any number of any of the above expressions

        If you want to add **options to the event, use kwargs (keyword arguments dictionary)"""
        if self.__sched is not None:
            event_exists = False
            if self.__find_event(name) is not None:
                event_exists = True
            if not event_exists:
                self.__sched.add_cron_job(func=func, name=name, year=year, month=month, day=day, week=week,
                                          day_of_week=day_of_week, hour=hour, minute=minute, second=second,
                                          start_date=start_date, args=args, kwargs=kwargs,
                                          misfire_grace_time=self.__GRACE_PERIOD)
                logging.info('New cron event added')
            else:
                '''Every event needs a unique name so we can keep track of the little bastards. And please use
                descriptive names so that they can be properly identified in the job schedule.'''
                logging.warning('add_cron_event: Event already exists')
                raise EventExistsError('A job with name %s already exists' % name)
        else:
            raise SchedulerNotFoundError('add_cron_event: Scheduler does not exist. It may have not started.')

    def __find_event(self, event_name):
        if self.__sched is not None:
            events = self.__sched.get_jobs()
            for event in events:
                if event.name == event_name:
                    return event
                else:
                    return None
        else:
            logging.warning('__find_event: Scheduler does not exist. It may have not started.')
            raise SchedulerNotFoundError('Scheduler does not exist. It may have not started.')

    def add_onceoff_event(self, func, name, date, args=None):
        """Add a once off event to the schedule. The job is executed once at the specified date and time.
        Date/time format: YYYY-MM-DD HH:MM:SS"""
        if self.__sched is not None:
            try:
                if args is None:  # If there are no arguments to be passed to the function
                    self.__sched.add_date_job(func=func, name=name, date=date,
                                              misfire_grace_time=self.__GRACE_PERIOD)
                else:   # If there are arguments to be passed to the function
                    self.__sched.add_date_job(func=func, name=name, date=date, arge=args,
                                              misfire_grace_time=self.__GRACE_PERIOD)
            except ValueError:
                '''If the event is in the past, it will not run. This program is not capable of manipulating
                space and time. Try import __time_travel__'''
                raise EventWontRunError('The event will not run: Event time has expired.')
            logging.info('New once off event added')
        else:
            logging.warning('add_onceoff_event: Scheduler does not exist. It may have not started.')
            raise SchedulerNotFoundError('Scheduler does not exist. It may have not started.')

    def remove_event(self, event_name):
        """Remove the event 'event_name' from the schedule."""
        if self.__sched is not None:
            removed = False
            event = self.__find_event(event_name=event_name)
            if event is not None:   # If the event exists, remove it
                self.__sched.unschedule_job(event)
                removed = True
            if not removed:
                '''Raise an error so that it can be handled correctly'''
                logging.warning('remove_event: Event not found for removal.')
                raise EventNotFoundError('Event not found for removal: %s' % event_name)
        else:
            raise SchedulerNotFoundError('remove_event: Scheduler does not exist. It may have not started.')

    def get_jobs(self):
        """Get the list of events currently in the job store."""
        if self.__sched is not None:
            return self.__sched.get_jobs()
        else:
            raise SchedulerNotFoundError('get_events: Scheduler does not exist. It may have not started.')

    def get_job_names(self):
        """
        Get the names of all the jobs in the job store
        :return: list
        """
        jobs = self.get_jobs()
        job_list = []
        if jobs:
            for job in jobs:
                job_list.append(job.name)
        return job_list

    def get_scheduler(self):
        """Returns the Scheduler object. Rather add functionality to this class than call this method."""
        if self.__sched is not None:
            return self.__sched
        else:
            raise SchedulerNotFoundError('get_scheduler: Scheduler does not exist. It may have not started.')
def start_schedule():

#if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    
    scheduler = Scheduler(daemonic = False)

    scheduler.print_jobs()

    #scheduler.remove_jobstore('file',close=True)

    #scheduler.shutdown(wait=False)

    

    scheduler.shutdown()

    #scheduler.unschedule_func(backup)

    scheduler.add_jobstore(ShelveJobStore('/tmp/db_schedule'), 'file')

    v_current_jobs = scheduler.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客

     

        scheduler.unschedule_func(backup)

    

    #scheduler = Scheduler(standalone=True)
    #scheduler = Scheduler(daemon=True)
    #连接配置中心库,获取数据库备份周期等信息
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list: # 有server需要配置

        i=0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = 'update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0'

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server['instance_id']

            from_host = bak_server['ip']

            #print from_host

            mysql_port = bak_server['port']

            backup_interval_type = bak_server['backup_interval_type']

            backup_start_time = bak_server['backup_start_time']

            str_start_date= time.strftime("%Y-%m-%d") + ' ' + backup_start_time

            print str_start_date 

            if backup_interval_type == 1: # every day

                #内存jobstore

                #scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

                #文件jobstore jobstore='file'

                scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')
               
                #scheduler.add_interval_job(backup, days=1, start_date='2014-07-18 18:17:01', args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 2: # every week

                scheduler.add_interval_job(backup, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 3: # every hour

                scheduler.add_interval_job(backup, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (instance_id)

            db.execute(v_sche_start_sql)

            i=i+1


    db.close()


    if bak_server_list: # 有server需要配置

        scheduler.start()

        print 'success!'

        scheduler.print_jobs()

        '''
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
import datetime
from time import sleep

JOBS_DATABASE = "postgresql://*****:*****@localhost/test_jobs"

# Start the scheduler
sched = Scheduler()
sched.add_jobstore(SQLAlchemyJobStore(url=JOBS_DATABASE, tablename='apscheduler_jobs'), 'default')
sched.start()

def print_reservation_id(reservation_id):
	print "====> Reservation id is " + str(reservation_id)


if __name__ == '__main__':

	print "====> Printing jobs..."
	print sched.print_jobs()
	 
	now = datetime.datetime.now()
	start_time =  now + datetime.timedelta(seconds=3)
	later = now + datetime.timedelta(seconds=10)

	print "====> now is " + str(now)
	print "====> start_time is " + str(start_time)
	print "====> later is " + str(later)


	reservation_id = 1
Exemple #20
0
        jobs = 0

        last_check = 0

        try:
            last_check = int(pydis.get("cron_last_run"))
        except TypeError:
            pass

        pydis.set("cron_last_run", int(time()))

        for job in db.jobqueue.select().where(db.jobqueue.ts >= last_check):
            jobs += 1
            t = threading.Thread(target=self.run, args=(job, ))
            t.start()
            threads.append(t)

        [t.join() for t in threads]

        # Started %d jobs" % jobs


cjob = Jobs()

sched = Scheduler(daemonic=False)
sched.add_jobstore(ShelveJobStore('./cron.jobs'), 'file')
sched.add_interval_job(cjob.start, seconds=10)
sched.start()

pydis.set("cron_next_run", sched.get_jobs()[0].next_run_time)
Exemple #21
0
            and event.job.name != 'remove_events'):
        if (event.retval == True):
            set_status(event.job.name, 5)
            logger.info("Event #" + str(event.job.name) +
                        " completed successfully")
        elif (event.retval == False):
            set_status(event.job.name, 4)
            logger.error("Event #" + str(event.job.name) + " had an error")
        if event.exception:
            print event.exception
            logger.fatal("Event #" + str(event.job.name) + ' - job crashed :(')


#start the scheduler
sched = Scheduler()
sched.add_jobstore(WriteBackShelveJobStore('jobstore.db'), 'shelve')
sched.start()

sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

process_events()

# Process events list every 10 seconds
sched.add_interval_job(process_events, seconds=10)

# Remove completed events from db every minute
sched.add_interval_job(remove_events, minutes=1)

print "Dispatcher started..."

try:
Exemple #22
0
def my_listener(event):
    if (event.job.name != 'process_events' and event.job.name != 'remove_events'):
        if (event.retval == True):
            set_status(event.job.name, 5)
            logger.info("Event #" + str(event.job.name) + " completed successfully")
        elif (event.retval == False):
            set_status(event.job.name, 4)
            logger.error("Event #" + str(event.job.name) + " had an error")
        if event.exception:
            print event.exception
            logger.fatal("Event #" + str(event.job.name) + ' - job crashed :(')
    

#start the scheduler
sched = Scheduler()
sched.add_jobstore(WriteBackShelveJobStore('jobstore.db'), 'shelve')
sched.start()

sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

process_events()

# Process events list every 10 seconds
sched.add_interval_job(process_events, seconds=10)

# Remove completed events from db every minute
sched.add_interval_job(remove_events, minutes=1)

print "Dispatcher started..."

Exemple #23
0
# Set default values
for key in DEFAULT_CONFIG.keys():
    config_set_default(key, DEFAULT_CONFIG[key])

# Executor
from executor_raspberry import ExecutorRaspberry
executor = ExecutorRaspberry(panServoId=config_get("pan_servo"),
                             tiltServoId=config_get("tilt_servo"))

# Scheduler
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
import atexit
sched = Scheduler(daemon=True)
sched.add_jobstore(ShelveJobStore('sched.db'), 'file')
atexit.register(lambda: sched.shutdown(wait=False))
sched.start()

# Logging
import logging
import sys

ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)

root = logging.getLogger()
root.addHandler(ch)
Exemple #24
0
# pull out the delimiter
delimiter = app.config["MESSAGING"]["incoming_delimiter"]

# mongoengine connection
connect(app.config["MONGO"]["db_name"], host=app.config["MONGO"]["host"], port=int(app.config["MONGO"]["port"]))

# start the logger
logging.basicConfig(
    filename=app.config["LOG_FILE"], level=logging.DEBUG, format="%(levelname)s[%(asctime)s]: %(message)s"
)

# start the job scheduler with persistent store
scheduler = Scheduler()
scheduler.add_jobstore(
    MongoDBJobStore(
        database=app.config["MONGO"]["db_name"], host=app.config["MONGO"]["host"], port=int(app.config["MONGO"]["port"])
    )
)
scheduler.start()


""" twilio endpoints
"""


@app.route("/api/1/sms/incoming", methods=["POST"])
def incoming_sms():
    """ twilio posts data here when a text message is received
    see http://www.twilio.com/docs/api/twiml/sms/twilio_request
    """
    if flask.request.form["AccountSid"] != app.config["TWILIO"]["account_sid"]:
Exemple #25
0
from datetime import datetime, timedelta
import sys
import time

from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore


def alarm(time):
    sys.stdout.write('Alarm! This alarm was scheduled at %s.\n' % time)


if __name__ == '__main__':
    scheduler = Scheduler()
    scheduler.add_jobstore(ShelveJobStore('example.db'), 'shelve')
    alarm_time = datetime.now() + timedelta(minutes=1)
    scheduler.add_date_job(alarm, alarm_time, name='alarm',
                           jobstore='shelve', args=[datetime.now()])
    sys.stdout.write('To clear the alarms, delete the example.db file.\n')
    sys.stdout.write('Press Ctrl+C to exit\n')
    scheduler.start()

    try:
        # This is here to prevent the main thread from exiting so that the
        # scheduler has time to work -- this is rarely necessary in real world
        # applications
        time.sleep(9999)
    finally:
        # Shut down the scheduler so that the job store gets closed properly
        scheduler.shutdown()
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
import datetime
from time import sleep

JOBS_DATABASE = "postgresql://*****:*****@localhost/test_jobs"

# Start the scheduler
sched = Scheduler()
sched.add_jobstore(
    SQLAlchemyJobStore(url=JOBS_DATABASE, tablename='apscheduler_jobs'),
    'default')
sched.start()


def print_reservation_id(reservation_id):
    print "====> Reservation id is " + str(reservation_id)


if __name__ == '__main__':

    print "====> Printing jobs..."
    print sched.print_jobs()

    now = datetime.datetime.now()
    start_time = now + datetime.timedelta(seconds=3)
    later = now + datetime.timedelta(seconds=10)

    print "====> now is " + str(now)
    print "====> start_time is " + str(start_time)
    print "====> later is " + str(later)
Exemple #27
0
# Set default values
for key in DEFAULT_CONFIG.keys():
  config_set_default(key, DEFAULT_CONFIG[key])

# Executor
from executor_raspberry import ExecutorRaspberry
executor = ExecutorRaspberry(
  panServoId=config_get("pan_servo"),
  tiltServoId=config_get("tilt_servo"))

# Scheduler
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
import atexit
sched = Scheduler(daemon=True)
sched.add_jobstore(ShelveJobStore('sched.db'), 'file')
atexit.register(lambda: sched.shutdown(wait=False))
sched.start()

# Logging
import logging
import sys

ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)

root = logging.getLogger()
root.addHandler(ch)
Exemple #28
0
class Application(tornado.web.Application):

    def __init__(self, debug):
        handlers = [
            (r"/?", RootHandler),
            (r"/login/?", LoginHandler),
            (r"/signup/?", CreateUserHandler),
            (r"/logout/?", LogoutHandler),
            (r"/ws/?", WebsocketHandler),
            (r"/test/?", testHandler),
            (r"/developer", DeveloperRegistrationHandler),
            (r"/submitForm", FormHandler),
            (r"/adminForm", AdminHandler),

            #### oAuth 2.0 Handlers
            (r"/login/oauth/authorize/?", AuthorizeHandler),
            (r"/login/oauth/access_token", AccessTokenHandler),

            #### API Handlers
            (r"/api/email/config/create?", CreateEmailConfigHandler),
            (r"/api/email/config/list?", GetEmailConfigHandler),
            (r"/api/osData/?", OsHandler),
            (r"/api/networkData/?", NetworkHandler),
            (r"/api/summaryData/?", SummaryHandler),
            (r"/api/global/graphs/severity?", GetPackageSeverityOverTimeHandler),
            #(r"/api/patchData/?", PatchHandler),
            (r"/api/graphData/?", GraphHandler),
            (r"/api/logger/modifyLogging?", LoggingModifyerHandler),
            (r"/api/logger/getParams?", LoggingListerHandler),
            (r"/api/nodes.json/?", NodesHandler),
            (r"/api/tags.json/?", TagsHandler),
            (r"/api/patches.json/?", PatchesHandler),
            (r"/api/severity.json/?", SeverityHandler),
            (r"/api/scheduler/list.json/?", SchedulerListerHandler),
            (r"/api/scheduler/add?", SchedulerAddHandler),
            (r"/api/scheduler/recurrent/add?", SchedulerAddRecurrentJobHandler),
            (r"/api/scheduler/remove?", SchedulerRemoveHandler),
            (r"/api/timeblocker/list.json/?", TimeBlockerListerHandler),
            (r"/api/timeblocker/add?", TimeBlockerAddHandler),
            (r"/api/timeblocker/remove?", TimeBlockerRemoverHandler),
            (r"/api/timeblocker/toggler?", TimeBlockerTogglerHandler),
            (r"/api/tagging/listByTag.json/?", TagListerByTagHandler),
            (r"/api/tagging/listByNode.json/?", TagListerByNodeHandler),
            (r"/api/tagging/addTag?", TagAddHandler),
            (r"/api/tagging/addTagPerNode?", TagAddPerNodeHandler),
            (r"/api/tagging/removeTagPerNode?", TagRemovePerNodeHandler),
            (r"/api/tagging/removeTag?", TagRemoveHandler),
            (r"/api/tagging/tagStats?", GetTagStatsHandler),
            (r"/api/tagging/graphs/severity?", GetTagPackageSeverityOverTimeHandler),
            (r"/api/transactions/getTransactions?", GetTransactionsHandler),
            (r"/api/transactions/search?", SearchTransactionsHandler),
            (r"/api/package/getDependecies?", GetDependenciesHandler),
            (r"/api/package/searchByPatch?", SearchPatchHandler),
            (r"/api/package/getTagsByTpId?", GetTagsPerTpIdHandler),
            (r"/api/node/modifyDisplayName?", ModifyDisplayNameHandler),
            (r"/api/node/modifyHostName?", ModifyHostNameHandler),
            (r"/api/node/delete?", NodeRemoverHandler),
            (r"/api/node/cleanData?", NodeCleanerHandler),
            (r"/api/node/wol?", NodeWolHandler),
            (r"/api/node/graphs/severity?", GetNodePackageSeverityOverTimeHandler),
            (r"/api/ssl/nodeToggler?", NodeTogglerHandler),
            (r"/api/ssl/list.json/?", SslHandler),
            (r"/api/acl/create?", AclCreateHandler),
            (r"/api/acl/modify?", AclModifyHandler),
            (r"/api/acl/delete?", AclDeleteHandler),
            (r"/api/user/?", UserHandler),
            (r"/api/users/list?", ListUserHandler),
            (r"/api/users/create?", CreateUserHandler),
            (r"/api/users/delete?", DeleteUserHandler),
            (r"/api/users/toggleGroup?", ModifyUserFromGroupHandler),
            (r"/api/groups/list?", ListGroupHandler),
            (r"/api/groups/create?", CreateGroupHandler),
            (r"/api/groups/delete?", DeleteGroupHandler),
            (r"/api/virtual/node/snapshots/list?", GetNodeSnapshotsHandler),
            (r"/api/virtual/node/snapshots/create?", CreateSnapshotHandler),
            (r"/api/virtual/node/snapshots/revert?", RevertSnapshotHandler),
            (r"/api/virtual/node/snapshots/remove?", RemoveSnapshotHandler),
            (r"/api/virtual/node/snapshots/removeAll?", RemoveAllSnapshotsHandler),
            (r"/api/vmware/config/create?", CreateVmwareConfigHandler),
            (r"/api/vmware/config/list?", GetVmwareConfigHandler),
            (r"/api/virtual/node/info?", GetNodeVmInfoHandler),
            (r"/api/virtual/node/poweron?", PowerOnVmHandler),
            (r"/api/virtual/node/shutdown?", ShutdownVmHandler),
            (r"/api/virtual/node/reboot?", RebootVmHandler),
            (r"/api/vendors/?", ApiHandler),                # Returns all vendors
            (r"/api/vendors/?(\w+)/?", ApiHandler),         # Returns vendor with products and respected vulnerabilities.
            (r"/api/vendors/?(\w+)/?(\w+)/?", ApiHandler),  # Returns specific product from respected vendor with vulnerabilities.

            #### File system access whitelist
            (r"/css/(.*?)", tornado.web.StaticFileHandler, {"path": "wwwstatic/css"}),
            (r"/font/(.*?)", tornado.web.StaticFileHandler, {"path": "wwwstatic/font"}),
            (r"/img/(.*?)", tornado.web.StaticFileHandler, {"path": "wwwstatic/img"}),
            (r"/js/(.*?)", tornado.web.StaticFileHandler, {"path": "wwwstatic/js"})
        ]

        template_path = "/opt/TopPatch/tp/templates"
        static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "wwwstatic" )
        #ui_modules = { 'Header', HeaderModule }

        settings = {
            "cookie_secret": base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes),
            "login_url": "/login",
        }

        self.db = init_engine()
        Session = create_session(self.db)
        self.session = Session
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(SQLAlchemyJobStore(engine=self.db, tablename="tp_scheduler"), "toppatch")
        self.scheduler.start()
        self.session = validate_session(self.session)
        self.account_manager = AccountManager(self.session)
        self.tokens = TokenManager(self.session)

        tornado.web.Application.__init__(self, handlers, template_path=template_path, static_path=static_path, debug=debug, **settings)

    def log_request(self, handler):
        logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
        log = logging.getLogger('rvweb')
        log_method = log.debug
        if handler.get_status() <= 299:
            log_method = log.info
        elif handler.get_status() <= 399 and \
                handler.get_status() >= 300:
            log_method = log.warn
        elif handler.get_status() <= 499 and \
                handler.get_status() >= 400:
            log_method = log.error
        elif handler.get_status() <= 599 and \
                handler.get_status() >= 500:
            log_method = log.error
        request_time = 1000.0 * handler.request.request_time()
        log_message = '%d %s %.2fms' % (handler.get_status(), handler._request_summary(), request_time)
        log_method(log_message)
Exemple #29
0
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_configure_jobstore(self):
        conf = {
            'apscheduler.jobstore.ramstore.class':
            'apscheduler.jobstores.ram_store:RAMJobStore'
        }
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore('ramstore')

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {'misfire_grace_time': '2', 'daemonic': 'false'}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {
            'apscheduler.misfire_grace_time': 2,
            'apscheduler.daemonic': False
        }
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
Exemple #30
0
from datetime import datetime, timedelta

from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore


def alarm(time):
    print('Alarm! This alarm was scheduled at %s.' % time)


if __name__ == '__main__':
    scheduler = Scheduler(standalone=True)
    scheduler.add_jobstore(SQLAlchemyJobStore('sqlite:///cron.db'), 'job123')
    alarm_time = datetime.now() + timedelta(seconds=10)
    scheduler.add_date_job(alarm, alarm_time, name='alarm',
                           jobstore='job123', args=[datetime.now()])
    print('To clear the alarms, delete the example.db file.')
    print('Press Ctrl+d to exit')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #31
0
class TestJobExecution(object):
    def setup(self):
        self.scheduler = Scheduler(threadpool=FakeThreadPool())
        self.scheduler.add_jobstore(RAMJobStore(), 'default')

        # Make the scheduler think it's running
        self.scheduler._thread = FakeThread()

        self.logstream = StringIO()
        self.loghandler = StreamHandler(self.logstream)
        self.loghandler.setLevel(ERROR)
        scheduler.logger.addHandler(self.loghandler)

    def teardown(self):
        scheduler.logger.removeHandler(self.loghandler)
        if scheduler.datetime == FakeDateTime:
            scheduler.datetime = datetime
        FakeDateTime._now = original_now

    @raises(TypeError)
    def test_noncallable(self):
        date = datetime.now() + timedelta(days=1)
        self.scheduler.add_date_job('wontwork', date)

    def test_job_name(self):
        def my_job():
            pass

        job = self.scheduler.add_interval_job(my_job,
                                              start_date=datetime(2010, 5, 19))
        eq_(
            repr(job), '<Job (name=my_job, '
            'trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), '
            'start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>')

    def test_schedule_object(self):
        # Tests that any callable object is accepted (and not just functions)
        class A:
            def __init__(self):
                self.val = 0

            def __call__(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_schedule_method(self):
        # Tests that bound methods can be scheduled (at least with RAMJobStore)
        class A:
            def __init__(self):
                self.val = 0

            def method(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a.method, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_unschedule_job(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_cron_job(increment)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)
        self.scheduler.unschedule_job(job)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)

    def test_unschedule_func(self):
        def increment():
            vals[0] += 1

        def increment2():
            vals[0] += 1

        vals = [0]
        job1 = self.scheduler.add_cron_job(increment)
        job2 = self.scheduler.add_cron_job(increment2)
        job3 = self.scheduler.add_cron_job(increment)
        eq_(self.scheduler.get_jobs(), [job1, job2, job3])

        self.scheduler.unschedule_func(increment)
        eq_(self.scheduler.get_jobs(), [job2])

    @raises(KeyError)
    def test_unschedule_func_notfound(self):
        self.scheduler.unschedule_func(copy)

    def test_job_finished(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_interval_job(increment, max_runs=1)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [1])
        assert job not in self.scheduler.get_jobs()

    def test_job_exception(self):
        def failure():
            raise DummyException

        job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9))
        self.scheduler._process_jobs(job.next_run_time)
        assert 'DummyException' in self.logstream.getvalue()

    def test_misfire_grace_time(self):
        self.scheduler.misfire_grace_time = 3
        job = self.scheduler.add_interval_job(lambda: None, seconds=1)
        eq_(job.misfire_grace_time, 3)

        job = self.scheduler.add_interval_job(lambda: None,
                                              seconds=1,
                                              misfire_grace_time=2)
        eq_(job.misfire_grace_time, 2)

    def test_coalesce_on(self):
        # Makes sure that the job is only executed once when it is scheduled
        # to be executed twice in a row
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              start_date=FakeDateTime.now(),
                                              coalesce=True,
                                              misfire_grace_time=2)

        # Turn the clock 14 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 1)
        eq_(len(events), 1)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(vals, [1])

    def test_coalesce_off(self):
        # Makes sure that every scheduled run for the job is executed even
        # when they are in the past (but still within misfire_grace_time)
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              start_date=FakeDateTime.now(),
                                              coalesce=False,
                                              misfire_grace_time=2)

        # Turn the clock 2 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 3)
        eq_(len(events), 3)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(events[1].code, EVENT_JOB_EXECUTED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(vals, [3])

    def test_interval(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_interval_job(increment, seconds=1, args=[2])
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [4, 2])

    def test_interval_schedule(self):
        @self.scheduler.interval_schedule(seconds=1)
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [2])

    def test_cron(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_cron_job(increment, args=[3])
        start = job.next_run_time
        self.scheduler._process_jobs(start)
        eq_(vals, [3, 1])
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [6, 2])
        self.scheduler._process_jobs(start + timedelta(seconds=2))
        eq_(vals, [9, 3])

    def test_cron_schedule_1(self):
        @self.scheduler.cron_schedule()
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals[0], 2)

    def test_cron_schedule_2(self):
        @self.scheduler.cron_schedule(minute='*')
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        next_run = start + timedelta(seconds=60)
        eq_(increment.job.get_run_times(next_run), [start, next_run])
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(next_run)
        eq_(vals[0], 2)

    def test_date(self):
        def append_val(value):
            vals.append(value)

        vals = []
        date = datetime.now() + timedelta(seconds=1)
        self.scheduler.add_date_job(append_val, date, kwargs={'value': 'test'})
        self.scheduler._process_jobs(date)
        eq_(vals, ['test'])

    def test_print_jobs(self):
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = 'Jobstore default:%s'\
                   '    No scheduled jobs%s' % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

        self.scheduler.add_date_job(copy, datetime(2200, 5, 19))
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = 'Jobstore default:%s    '\
            'copy (trigger: date[2200-05-19 00:00:00], '\
            'next run at: 2200-05-19 00:00:00)%s' % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

    def test_jobstore(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        eq_(self.scheduler.get_jobs(), [job])
        self.scheduler.remove_jobstore('dummy')
        eq_(self.scheduler.get_jobs(), [])

    @raises(KeyError)
    def test_remove_nonexistent_jobstore(self):
        self.scheduler.remove_jobstore('dummy2')

    def test_job_next_run_time(self):
        # Tests against bug #5
        def increment():
            vars[0] += 1

        vars = [0]
        scheduler.datetime = FakeDateTime
        job = self.scheduler.add_interval_job(increment,
                                              seconds=1,
                                              misfire_grace_time=3,
                                              start_date=FakeDateTime.now())
        start = job.next_run_time

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vars, [2])
Exemple #32
0
上面通过装饰器定义了cron job,可以通过函数scheduler.add_cron_job添加,用装饰器更方便。Scheduler构造函数中传入daemonic参数,表示执行线程是非守护的,在Schduler的文档中推荐使用非守护线程

在添加job时还有一个比较重要的参数max_instances,指定一个job的并发实例数,默认值是1。默认情况下,如果一个job准备执行,但是该job的前一个实例尚未执行完,则后一个job会失败,可以通过这个参数来改变这种情况。

APScheduler提供了jobstore用于存储job的执行信息,默认使用的是RAMJobStore,还提供了SQLAlchemyJobStore、ShelveJobStore和MongoDBJobStore。APScheduler允许同时使用多个jobstore,通过别名(alias)区分,在添加job时需要指定具体的jobstore的别名,否则使用的是别名是default的jobstore,即RAMJobStore。下面以MongoDBJobStore举例说明。
[python] view plain copy print?
import pymongo  
from apscheduler.scheduler import Scheduler  
from apscheduler.jobstores.mongodb_store import MongoDBJobStore  
import time  
  
sched = Scheduler(daemonic = False)  
  
mongo = pymongo.Connection(host='127.0.0.1', port=27017)  
store = MongoDBJobStore(connection=mongo)  
sched.add_jobstore(store, 'mongo')        # 别名是mongo  

@sched.cron_schedule(second='*', day_of_week='0-4', hour='9-12,13-15', jobstore='mongo')        # 向别名为mongo的jobstore添加job  
def job():  
        print 'a job'  
        time.sleep(1)  
  
sched.start()  
        注意start必须在添加job动作之后调用,否则会抛错。默认会把job信息保存在apscheduler数据库下的jobs表:
[plain] view plain copy print?
> db.jobs.findOne()  
{  
        "_id" : ObjectId("502202d1443c1557fa8b8d66"),  
        "runs" : 20,  
        "name" : "job",  
        "misfire_grace_time" : 1,  
class TestJobExecution(object):
    def setup(self):
        self.scheduler = Scheduler(threadpool=FakeThreadPool())
        self.scheduler.add_jobstore(RAMJobStore(), "default")

        # Make the scheduler think it's running
        self.scheduler._thread = FakeThread()

        self.logstream = StringIO()
        self.loghandler = StreamHandler(self.logstream)
        self.loghandler.setLevel(ERROR)
        scheduler.logger.addHandler(self.loghandler)

    def teardown(self):
        scheduler.logger.removeHandler(self.loghandler)
        if scheduler.datetime == FakeDateTime:
            scheduler.datetime = datetime
        FakeDateTime._now = original_now

    def test_job_name(self):
        def my_job():
            pass

        job = self.scheduler.add_interval_job(my_job, start_date=datetime(2010, 5, 19))
        eq_(
            repr(job),
            "<Job (name=my_job, trigger=<IntervalTrigger (interval=datetime.timedelta(0, 1), "
            "start_date=datetime.datetime(2010, 5, 19, 0, 0))>)>",
        )

    def test_schedule_object(self):
        # Tests that any callable object is accepted (and not just functions)
        class A:
            def __init__(self):
                self.val = 0

            def __call__(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_schedule_method(self):
        # Tests that bound methods can be scheduled (at least with RAMJobStore)
        class A:
            def __init__(self):
                self.val = 0

            def method(self):
                self.val += 1

        a = A()
        job = self.scheduler.add_interval_job(a.method, seconds=1)
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(a.val, 2)

    def test_unschedule_job(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_cron_job(increment)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)
        self.scheduler.unschedule_job(job)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals[0], 1)

    def test_unschedule_func(self):
        def increment():
            vals[0] += 1

        def increment2():
            vals[0] += 1

        vals = [0]
        job1 = self.scheduler.add_cron_job(increment)
        job2 = self.scheduler.add_cron_job(increment2)
        job3 = self.scheduler.add_cron_job(increment)
        eq_(self.scheduler.get_jobs(), [job1, job2, job3])

        self.scheduler.unschedule_func(increment)
        eq_(self.scheduler.get_jobs(), [job2])

    @raises(KeyError)
    def test_unschedule_func_notfound(self):
        self.scheduler.unschedule_func(copy)

    def test_job_finished(self):
        def increment():
            vals[0] += 1

        vals = [0]
        job = self.scheduler.add_interval_job(increment, max_runs=1)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [1])
        assert job not in self.scheduler.get_jobs()

    def test_job_exception(self):
        def failure():
            raise DummyException

        job = self.scheduler.add_date_job(failure, datetime(9999, 9, 9))
        self.scheduler._process_jobs(job.next_run_time)
        assert "DummyException" in self.logstream.getvalue()

    def test_misfire_grace_time(self):
        self.scheduler.misfire_grace_time = 3
        job = self.scheduler.add_interval_job(lambda: None, seconds=1)
        eq_(job.misfire_grace_time, 3)

        job = self.scheduler.add_interval_job(lambda: None, seconds=1, misfire_grace_time=2)
        eq_(job.misfire_grace_time, 2)

    def test_coalesce_on(self):
        # Makes sure that the job is only executed once when it is scheduled
        # to be executed twice in a row
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(
            increment, seconds=1, start_date=FakeDateTime.now(), coalesce=True, misfire_grace_time=2
        )

        # Turn the clock 14 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 1)
        eq_(len(events), 1)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(vals, [1])

    def test_coalesce_off(self):
        # Makes sure that every scheduled run for the job is executed even
        # when they are in the past (but still within misfire_grace_time)
        def increment():
            vals[0] += 1

        vals = [0]
        events = []
        scheduler.datetime = FakeDateTime
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        job = self.scheduler.add_interval_job(
            increment, seconds=1, start_date=FakeDateTime.now(), coalesce=False, misfire_grace_time=2
        )

        # Turn the clock 2 seconds forward
        FakeDateTime._now += timedelta(seconds=2)

        self.scheduler._process_jobs(FakeDateTime.now())
        eq_(job.runs, 3)
        eq_(len(events), 3)
        eq_(events[0].code, EVENT_JOB_EXECUTED)
        eq_(events[1].code, EVENT_JOB_EXECUTED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(vals, [3])

    def test_interval(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_interval_job(increment, seconds=1, args=[2])
        self.scheduler._process_jobs(job.next_run_time)
        self.scheduler._process_jobs(job.next_run_time)
        eq_(vals, [4, 2])

    def test_interval_schedule(self):
        @self.scheduler.interval_schedule(seconds=1)
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [2])

    def test_cron(self):
        def increment(amount):
            vals[0] += amount
            vals[1] += 1

        vals = [0, 0]
        job = self.scheduler.add_cron_job(increment, args=[3])
        start = job.next_run_time
        self.scheduler._process_jobs(start)
        eq_(vals, [3, 1])
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals, [6, 2])
        self.scheduler._process_jobs(start + timedelta(seconds=2))
        eq_(vals, [9, 3])

    def test_cron_schedule_1(self):
        @self.scheduler.cron_schedule()
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vals[0], 2)

    def test_cron_schedule_2(self):
        @self.scheduler.cron_schedule(minute="*")
        def increment():
            vals[0] += 1

        vals = [0]
        start = increment.job.next_run_time
        next_run = start + timedelta(seconds=60)
        eq_(increment.job.get_run_times(next_run), [start, next_run])
        self.scheduler._process_jobs(start)
        self.scheduler._process_jobs(next_run)
        eq_(vals[0], 2)

    def test_date(self):
        def append_val(value):
            vals.append(value)

        vals = []
        date = datetime.now() + timedelta(seconds=1)
        self.scheduler.add_date_job(append_val, date, kwargs={"value": "test"})
        self.scheduler._process_jobs(date)
        eq_(vals, ["test"])

    def test_print_jobs(self):
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = "Jobstore default:%s" "    No scheduled jobs%s" % (os.linesep, os.linesep)
        eq_(out.getvalue(), expected)

        self.scheduler.add_date_job(copy, datetime(2200, 5, 19))
        out = StringIO()
        self.scheduler.print_jobs(out)
        expected = (
            "Jobstore default:%s    "
            "copy (trigger: date[2200-05-19 00:00:00], "
            "next run at: 2200-05-19 00:00:00)%s" % (os.linesep, os.linesep)
        )
        eq_(out.getvalue(), expected)

    def test_jobstore(self):
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")
        job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
        eq_(self.scheduler.get_jobs(), [job])
        self.scheduler.remove_jobstore("dummy")
        eq_(self.scheduler.get_jobs(), [])

    @raises(KeyError)
    def test_remove_nonexistent_jobstore(self):
        self.scheduler.remove_jobstore("dummy2")

    def test_job_next_run_time(self):
        # Tests against bug #5
        def increment():
            vars[0] += 1

        vars = [0]
        scheduler.datetime = FakeDateTime
        job = self.scheduler.add_interval_job(increment, seconds=1, misfire_grace_time=3, start_date=FakeDateTime.now())
        start = job.next_run_time

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start)
        eq_(vars, [1])

        self.scheduler._process_jobs(start + timedelta(seconds=1))
        eq_(vars, [2])
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_add_job_by_reference(self):
        job = self.scheduler.add_date_job("copy:copy", datetime(2200, 7, 24))
        eq_(job.func, copy)
        eq_(job.func_ref, "copy:copy")

    def test_configure_jobstore(self):
        conf = {"apscheduler.jobstore.ramstore.class": "apscheduler.jobstores.ram_store:RAMJobStore"}
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore("ramstore")

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {"misfire_grace_time": "2", "daemonic": "false"}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {"apscheduler.misfire_grace_time": 2, "apscheduler.daemonic": False}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
Exemple #35
0
from boto.provider import Provider
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
from shove import Shove
import logging
SHOVE_BUCKET = 'my-bucket'


class ShoveJobStore(ShelveJobStore):
    def __init__(self, path):
        self.jobs = []
        self.path = path
        self.store = Shove(path, optimize=False)


class S3JobStore(ShoveJobStore):
    def __init__(self, access_key, secret_key, bucket, prefix='job_'):
        self.prefix = prefix if prefix[-1] == '/' else (prefix + '/')
        path = 's3://{}:{}@{}'.format(access_key, secret_key, bucket)
        super(S3JobStore, self).__init__(path)


logging.basicConfig()
PROVIDER = Provider('aws')
JOB_STORE = S3JobStore(PROVIDER.get_access_key(), PROVIDER.get_secret_key(),
                       SHOVE_BUCKET)
SCHEDULER = Scheduler(misfire_grace_time=1000)
SCHEDULER.add_jobstore(JOB_STORE, 's3')
Exemple #36
0
webackup = Flask('modules',
                 static_folder="static",
                 template_folder='templates')
webackup.config.from_object(__name__)
Bootstrap(webackup)

# instantiate the db wrapper
db = Database(webackup)

# instatiate the login
login_manager = LoginManager()
login_manager.setup_app(webackup)

# initiate scheduler
sched = Scheduler()
sched.add_jobstore(ShelveJobStore('data/scheduler.db'), 'default')
sched.start()
""" Registering the blueprint controller """
dirs = os.listdir(APP_DIR)
for module in dirs:
    """ Blueprints """
    try:
        if module.startswith('__'): continue
        webackup.register_blueprint(
            import_string(APP_DIR + '.' + module + '.controller.register'))
    except ImportError, e:
        pass

dirs = os.listdir(APP_DIR + '/source')
for module in dirs:
    """ Blueprints """