Example #1
0
 def close(self):
     for job in self.jobs:
         logger.info(' + Unload %s' % job.id)
         self.collection.update({'_id': job.id},
                                {"$set": {
                                    'loaded': False
                                }}, True)
     MongoDBJobStore.close(self)
Example #2
0
 def __init__(self,
              database='canopsis',
              collection='objects',
              connection=None,
              pickle_protocol=pickle.HIGHEST_PROTOCOL,
              **connect_args):
     MongoDBJobStore.__init__(self,
                              database=database,
                              collection=collection,
                              connection=connection,
                              pickle_protocol=pickle_protocol)
Example #3
0
def start(config={}):

    # params init
    mongo_host = config.get('host', SETTING['host'])
    mongo_port = config.get('port', SETTING['port'])

    db = pymongo.Connection(mongo_host, mongo_port)
    store = MongoDBJobStore(connection=db)

    # create schedudler and run
    scheduler = Scheduler(daemonic=False)
    scheduler.start()
    scheduler.add_jobstore(store, 'mongo')

    # add cron jobs
    scheduler.add_cron_job(monitor_cron_job,
                           hour='0-23',
                           minute="0",
                           second="0",
                           jobstore='mongo')
Example #4
0
    def setup_class(cls):
        if not MongoDBJobStore:
            raise SkipTest

        cls.jobstore = MongoDBJobStore(database='apscheduler_unittest')
Example #5
0
	def __init__(self, database='canopsis', collection='objects',connection=None, pickle_protocol=pickle.HIGHEST_PROTOCOL,**connect_args):
		MongoDBJobStore.__init__(self,database=database, collection=collection,connection=connection, pickle_protocol=pickle_protocol)
Example #6
0
	def close(self):
		for job in self.jobs:
			logger.info(' + Unload %s' % job.id)
			self.collection.update({'_id':job.id},{"$set":{'loaded':False}},True)
		MongoDBJobStore.close(self)
Example #7
0
schedudler.start() 
上面通过装饰器定义了cron job,可以通过函数scheduler.add_cron_job添加,用装饰器更方便。Scheduler构造函数中传入daemonic参数,表示执行线程是非守护的,在Schduler的文档中推荐使用非守护线程

在添加job时还有一个比较重要的参数max_instances,指定一个job的并发实例数,默认值是1。默认情况下,如果一个job准备执行,但是该job的前一个实例尚未执行完,则后一个job会失败,可以通过这个参数来改变这种情况。

APScheduler提供了jobstore用于存储job的执行信息,默认使用的是RAMJobStore,还提供了SQLAlchemyJobStore、ShelveJobStore和MongoDBJobStore。APScheduler允许同时使用多个jobstore,通过别名(alias)区分,在添加job时需要指定具体的jobstore的别名,否则使用的是别名是default的jobstore,即RAMJobStore。下面以MongoDBJobStore举例说明。
[python] view plain copy print?
import pymongo  
from apscheduler.scheduler import Scheduler  
from apscheduler.jobstores.mongodb_store import MongoDBJobStore  
import time  
  
sched = Scheduler(daemonic = False)  
  
mongo = pymongo.Connection(host='127.0.0.1', port=27017)  
store = MongoDBJobStore(connection=mongo)  
sched.add_jobstore(store, 'mongo')        # 别名是mongo  

@sched.cron_schedule(second='*', day_of_week='0-4', hour='9-12,13-15', jobstore='mongo')        # 向别名为mongo的jobstore添加job  
def job():  
        print 'a job'  
        time.sleep(1)  
  
sched.start()  
        注意start必须在添加job动作之后调用,否则会抛错。默认会把job信息保存在apscheduler数据库下的jobs表:
[plain] view plain copy print?
> db.jobs.findOne()  
{  
        "_id" : ObjectId("502202d1443c1557fa8b8d66"),  
        "runs" : 20,  
        "name" : "job",  
Example #8
0
    def make_jobstore():
        if not MongoDBJobStore:
            raise SkipTest

        return MongoDBJobStore(database='apscheduler_unittest')