def run(self): self.logger.info('Running Scheduler') poll_interval = 5 try: while 1: next_event = now() + timedelta(1) for rule in self.rules: n = now() rrule = rrulestr(rule['rule'], dtstart=rule['created']) items = rrule.between(rule['checked'], n) if len(items) > 1: self.logger.warn("Schedular missed %i tasks! Enqueuing latest" % (len(items))) if items: if self.check_rule(rule, n): self.logger.info("Enqueueing task %r (%s)" % (rule['task'], items[0].ctime())) self.enqueue_from_rule(rule) else: self.logger.warn("Another schedular has already run this task. moving on") next_event = min(next_event, rrule.after(n)) self.logger.debug("Next event %s" % next_event.ctime()) next_event = (next_event - now()).total_seconds() self.logger.debug("Next event in %i seconds" % next_event) sleep = max(1, min(next_event, poll_interval)) self.logger.debug("Sleping for %i seconds" % sleep) time.sleep(sleep) except KeyboardInterrupt: self.logger.exception('Exiting main loop')
def add_job(self, rule, task, queue, tags=(), timeout=None): collection = self.factory.schedule_collection return collection.insert({'rule':rule, 'task':task, 'queue':queue, 'tags':tags, 'paused':False, 'active':True, 'modified':now(), 'created':now(), 'checked':now(), 'timeout': timeout, })
def max_age(arg): if arg.lower().endswith('s'): return now() - timedelta(0, int(arg[:-1])) elif arg.lower().endswith('m'): return now() - timedelta(0, int(arg[:-1]) * 60) elif arg.lower().endswith('h'): return now() - timedelta(0, int(arg[:-1]) * 60 * 60) elif arg.lower().endswith('d'): return now() - timedelta(int(arg[:-1])) else: return now() - timedelta(0, int(arg))
def init_rule(self): doc = self.doc self.rrule = rrulestr(doc['rule'], dtstart=now()) self.irule = iter(self.rrule) self.queue = self.factory.queue(doc['queue'], tags=doc['tags']) if self.timer is not None: self.set()
def write(self, message): doc = self.doc.copy() doc.update(message=message, time=now()) self.collection.insert(doc) if self.stream and not self.silence: self.stream.write(message) return len(message)
def check_in(self): query = {'_id': self.worker_id} update = {'$set':{'check-in':now(), 'working':True}} worker_info = self.collection.find_and_modify(query, update) if worker_info: should_exit = worker_info.get('terminate', False) status = worker_info.get('terminate_status', 0) return should_exit, status return False, 0
def test_is_empty(self): """Detecting empty queues.""" q = self.factory.queue('example') self.assertEqual(q.is_empty(), True) self.factory.queue_collection.insert({'qname': 'example', 'process_after': now(), 'priority': 0, 'processed': False }) self.assertEqual(q.is_empty(), False)
def finish(conn, args): coll = conn.queue_collection query = {'finished':False} if args.id: query['_id'] = args.id print('Flagging %i jobs as finished' % coll.find(query).count()) n = now() coll.update(query, {'$set':{'finished':True, 'finished_at': n, 'finished_at_': mktime(n.timetuple())}}, multi=True)
def reltime(dt): delta = now() - dt s = delta.total_seconds() hours = s / 3600. if hours > 1.5: return '%.1f hours ago' % hours minutes = s / 60.0 if minutes > 1.5: return '%.1f minutes ago' % minutes return '%.1f seconds ago' % s
def test_is_empty(self): """Detecting empty queues.""" q = self.factory.queue('example') self.assertEqual(q.is_empty(), True) self.factory.queue_collection.insert({ 'qname': 'example', 'process_after': now(), 'priority': 0, 'processed': False }) self.assertEqual(q.is_empty(), False)
def register(self): ''' Internal Contextmanager, register the birth and death of this worker eg:: with worker.register(): # Work ''' self.collection = self.factory.worker_collection self.worker_id = self.collection.insert({'name': self.name, 'host': platform.node(), 'system': platform.system(), 'pid': os.getgid(), 'user': getpass.getuser(), 'started':now(), 'finished':datetime.fromtimestamp(0), 'check-in':datetime.fromtimestamp(0), 'working':True, 'queues': self.queues, 'tags': self.tags, 'log_output': bool(self._log_worker_output), 'terminate': False, 'terminate_status': 0, }) if self._log_worker_output: hdlr = MongoHandler(self.factory.logging_collection, {'worker_id':self.worker_id}) self.logger.addHandler(hdlr) try: yield self.worker_id finally: if self._log_worker_output: self.logger.removeHandler(hdlr) query = {'_id': self.worker_id} update = {'$set':{'finished':now(), 'working':False}} self.collection.update(query, update)
def update_job(self, _id, rule=None, task=None, queue=None, tags=None): collection = self.factory.schedule_collection query = {'_id':_id} doc = {'$set':{'modified':now()}} if rule is not None: doc['$set']['rule'] = rule if task is not None: doc['$set']['task'] = task if queue is not None: doc['$set']['queue'] = queue if tags is not None: doc['$set']['tags'] = tags return collection.update(query, doc)
def register(self): ''' Internal Contextmanager, register the birth and death of this worker eg:: with worker.register(): # Work ''' self.collection = self.factory.worker_collection self.worker_id = self.collection.insert({'name': self.name, 'host': os.uname()[1], 'pid': os.getgid(), 'user': os.getlogin(), 'started':now(), 'finished':datetime.fromtimestamp(0), 'check-in':datetime.fromtimestamp(0), 'working':True, 'queues': self.queues, 'tags': self.tags, 'log_output': bool(self._log_worker_output), 'terminate': False, 'terminate_status': 0, }) if self._log_worker_output: hdlr = MongoHandler(self.factory.logging_collection, {'worker_id':self.worker_id}) self.logger.addHandler(hdlr) try: yield self.worker_id finally: if self._log_worker_output: self.logger.removeHandler(hdlr) query = {'_id': self.worker_id} update = {'$set':{'finished':now(), 'working':False}} self.collection.update(query, update)
def pop_item(self, worker_id, queues, tags, priority=0): 'Pop an item from the queue' n = now() update = {'$set':{'processed':True, 'started_at': n, 'started_at_': mktime(n.timetuple()), 'worker_id':worker_id} } query = self.make_query(queues, tags, priority) doc = self.queue_collection.find_and_modify(query, update) if doc is None: return None else: return mtq.Job(self, doc)
def pop_item(self, worker_id, queues, tags, priority=0, failed=False): 'Pop an item from the queue' n = now() update = {'$set':{'processed':True, 'started_at': n, 'started_at_': mktime(n.timetuple()), 'worker_id':worker_id} } query = self.make_query(queues, tags, priority, failed) doc = self.queue_collection.find_and_modify(query, update) if doc is None: return None else: return mtq.Job(self, doc)
def set_finished(self, failed=False): ''' Mark this jog as finished. :param failed: if true, this was a failed job ''' n = now() update = {'$set':{'processed':True, 'failed':failed, 'finished':True, 'finished_at': n, 'finished_at_': mktime(n.timetuple()) } } self.factory.queue_collection.update({'_id':self.id}, update)
def set_finished(self, failed=False): ''' Mark this jog as finished. :param failed: if true, this was a failed job ''' n = now() update = { '$set': { 'processed': True, 'failed': failed, 'finished': True, 'finished_at': n, 'finished_at_': mktime(n.timetuple()) } } self.factory.queue_collection.update({'_id': self.id}, update)
def make_query(self, queues, tags, priority=0, processed=False, **query): ''' return a mongodb query dict to get the next task in the queue ''' query.update({ 'priority':{'$gte':priority}, 'process_after': {'$lte':now()}, }) if processed is not None: query['processed'] = processed if queues: if len(queues) == 1: query['qname'] = queues[0] else: query['qname'] = {'$in': queues} query.update(self.make_tag_query(tags)) return query
def set(self): if self.timer is not None: self.timer.cancel() self.timer = None nw = now() self.nxt = nxt = next(self.irule, None) if nxt is None: self.logger.info('No more jobs to process, exiting') return if nxt < nw: timeout = 0 else: timeout = (nxt - nw).seconds self.logger.info('Scheduling task "%s" to enqueue in %i seconds (%s)' % (self.task, timeout, nxt.ctime())) self.timer = t = Timer(timeout, self.execute_task) t.start()
def new(cls, name, tags, priority, execute, timeout): n = now() no = mktime(n.timetuple()) return { 'qname': name, 'tags': tags, 'process_after': n, 'priority': priority, 'execute': execute, 'enqueued_at': n, 'enqueued_at_': no, 'started_at': nulltime(), 'started_at_': 0.0, 'finished_at': nulltime(), 'finished_at_': 0.0, 'processed': False, 'failed': False, 'finished': False, 'timeout': timeout, 'worker_id': ObjectId('000000000000000000000000'), }
def set_finished(self, failed=False): ''' Mark this jog as finished. :param failed: if true, this was a failed job ''' n = now() update = {'$set':{'processed':True, 'failed':failed, 'finished':True, 'finished_at': n, 'finished_at_': mktime(n.timetuple()) } } self.factory.queue_collection.update({'_id':self.id}, update) if not failed: data = self.factory.queue_collection.find_one({'_id':self.id}) if data: self.factory.queue_collection.remove({'_id':self.id}) self.factory.finished_jobs_collection.insert(data)
def new(cls, name, tags, priority, execute, timeout): n = now() no = mktime(n.timetuple()) return { 'qname':name, 'tags': tags, 'process_after': n, 'priority': priority, 'execute': execute, 'enqueued_at': n, 'enqueued_at_': no, 'started_at': nulltime(), 'started_at_': 0.0, 'finished_at': nulltime(), 'finished_at_': 0.0, 'processed': False, 'failed': False, 'finished': False, 'timeout':timeout, 'worker_id': ObjectId('000000000000000000000000'), }
def add_job(self, rule, task, queue, tags=()): collection = self.factory.schedule_collection return collection.insert({'rule':rule, 'task':task, 'queue':queue, 'tags':tags, 'paused':False, 'active':True, 'modified':now()})