def unlock(): """ Unlock jobs that have been allocated to some worker way too long ago. """ db = init_db('jobs') for job in db.query(Job).filter( Job.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 120)), Job.state == b'ALLOC' ).all(): log.debug("Unlocking %s job %s" % (job.job_type, job.uuid), level=7) job.state = b'PENDING' for worker in db.query(Worker).filter_by(job=job.id).all(): worker.job = None # process postponed jobs for job in db.query(Job).filter( Job.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 30)), Job.state == b'POSTPONED' ).all(): if job.pushbacks >= MAX_RETRIES: log.error("Too many pushbacks for job %s" % (job.uuid)) job.state = b'FAILED' else: log.debug("Re-activating postponed job %s" % (job.uuid), level=7) job.state = b'PENDING' job.pushbacks += 1 db.commit()
def set_state(identity, state, interests = []): db = init_db('collectors') collector = db.query(Collector).filter_by(identity=identity).first() if collector == None: db.add(Collector(identity, state)) db.commit() collector = db.query(Collector).filter_by(identity=identity).first() else: collector.state = state collector.timestamp = datetime.datetime.utcnow() db.commit() if state == b'READY': collector.job = None for cmd in interests: interest = db.query(Interest).filter_by(cmd=cmd).first() if interest == None: db.add(Interest(cmd)) db.commit() interest = db.query(Interest).filter_by(cmd=cmd).first() collector.interests.append(interest) db.commit()
def select_for_worker(identity): db = init_db('jobs') worker = db.query(Worker).filter(Worker.identity == identity, Worker.job == None, Worker.state == b'READY').first() if worker == None: return # This is influenced by .update(), which resets the .timestamp to # .utcnow(), effectively pushing all updated jobs to the back of the # queue. # # Practical result is a massive amount of metadata gathering # followed by a sudden surge of jobs getting DONE. #job = db.query(Job).filter_by(job_type='worker', state=b'PENDING').order_by(Job.timestamp).first() # This would result in "most recent first, work your way backwards." #job = db.query(Job).filter_by(job_type='worker', state=b'PENDING').order_by(Job.timestamp.desc()).first() # This results in following the storage order and is by far the # fastest methodology. job = db.query(Job).filter_by(job_type='worker', state=b'PENDING').order_by(Job.id).first() if job == None: return job.state = b'ALLOC' worker.job = job.id worker.state = b'BUSY' db.commit() return job
def expire(): db = init_db('workers') for worker in db.query(Worker).filter(Worker.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 90)), Worker.state == b'STALE').all(): log.debug("Purging worker %s as very stale" % (worker.identity), level=7) if not worker.job == None: _job = db.query(Job).filter_by(id=worker.job).first() if not _job == None: _job.state = b'PENDING' _job.timestamp = datetime.datetime.utcnow() db.delete(worker) db.commit() for worker in db.query(Worker).filter(Worker.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 90)), Worker.state != b'STALE').all(): log.debug("Marking worker %s as stale" % (worker.identity), level=7) if not worker.job == None: _job = db.query(Job).filter_by(id=worker.job).first() if not _job == None: _job.state = b'PENDING' _job.timestamp = datetime.datetime.utcnow() worker.state = b'STALE' worker.timestamp = datetime.datetime.utcnow() db.commit()
def set_job(identity, job_uuid): db = init_db('workers') job = db.query(Job).filter_by(uuid=job_uuid).first() if job == None: return worker = db.query(Worker).filter_by(identity=identity).first() worker.job = job.id job.worker = worker.id db.commit()
def set_state(identity, state): db = init_db('workers') worker = db.query(Worker).filter_by(identity=identity).first() if worker == None: db.add(Worker(identity, state)) db.commit() else: worker.state = state worker.timestamp = datetime.datetime.utcnow() db.commit()
def update(job_uuid, **kw): db = init_db('jobs') job = db.query(Job).filter_by(uuid=job_uuid).first() if job == None: return for attr, value in kw.iteritems(): setattr(job, attr, value) job.timestamp = datetime.datetime.utcnow() db.commit()
def update(identity, **kw): db = init_db('identity') worker = db.query(Worker).filter_by(identity=identity).first() if worker == None: db.add(Worker(identity, b'READY')) else: for attr, value in kw.iteritems(): setattr(worker, attr, value) worker.timestamp = datetime.datetime.utcnow() db.commit()
def expire(): """ Unlock jobs that have been allocated to some worker way too long ago. """ db = init_db('jobs') job_retention = (int)(conf.get("broker", "job_retention", 300)) for job in db.query(Job).filter(Job.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, job_retention)), Job.state == b'DONE').all(): log.debug("Purging job %s" % (job.uuid), level=7) db.delete(job) db.commit()
def update(identity, **kw): db = init_db('collectors') collector = db.query(Collector).filter_by(identity=identity).first() if collector == None: db.add(Collector(identity)) db.commit() collector = db.query(Collector).filter_by(identity=identity).first() for attr, value in kw.iteritems(): setattr(collector, attr, value) db.commit()
def add(identity, state = b'READY', interests = []): db = init_db('collectors') db.add(Collector(identity, state)) db.commit() collector = db.query(Collector).filter_by(identity=identity).first() for cmd in interests: interest = db.query(Interest).filter_by(cmd=cmd).first() if interest == None: db.add(Interest(cmd)) db.commit() interest = db.query(Interest).filter_by(cmd=cmd).first() collector.interests.append(interest) db.commit()
def add(dealer, notification, job_type='worker'): """ Add a new job. """ db = init_db('jobs') if db == None: return None job = None try: job = Job(dealer, notification, job_type) db.add(job) db.commit() except IntegrityError, errmsg: log.error("SQLAlchemy Integrity Error: %r" % (errmsg)) db.rollback() return None
def expire(): db = init_db('collectors') for collector in db.query(Collector).filter(Collector.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 90)), Collector.state == b'STALE').all(): log.debug("Purging collector %s as very stale" % (collector.identity), level=7) for _job in db.query(Job).filter_by(collector=collector.identity).all(): _job.state = b'PENDING' _job.timestamp = datetime.datetime.utcnow() db.delete(collector) db.commit() for collector in db.query(Collector).filter(Collector.timestamp <= (datetime.datetime.utcnow() - datetime.timedelta(0, 90)), Collector.state != b'STALE').all(): log.debug("Marking collector %s as stale" % (collector.identity), level=7) for _job in db.query(Job).filter_by(collector=collector.identity).all(): _job.state = b'PENDING' _job.timestamp = datetime.datetime.utcnow() collector.state = b'STALE' collector.timestamp = datetime.datetime.utcnow() db.commit()
def run(self): self.create_router( 'collector', 'tcp://*:5571', recv_multipart = self._cb_cr_recv_multipart ) self.create_router_process( 'dealer', 'tcp://*:5570', self._cb_dr_on_recv_stream ) self.create_router( 'worker', 'tcp://*:5573', recv_multipart = self._cb_wr_recv_multipart ) self.create_router( 'worker_controller', 'tcp://*:5572', recv_multipart = self._cb_wcr_recv_multipart ) self.running = True last_expire = time.time() last_state = time.time() last_vacuum = time.time() try: db = init_db('broker') db.execute("VACUUM") db.commit() except Exception, errmsg: pass
def set_state(uuid, state): db = init_db('jobs') for job in db.query(Job).filter_by(uuid=uuid).all(): job.state = state job.timestamp = datetime.datetime.utcnow() db.commit()
def select_by_state(state): db = init_db('workers') result = db.query(Worker).filter_by(state=state).all() return result
def select(identity): db = init_db('workers') result = db.query(Worker).filter_by(identity=identity).first() return result
def count(): db = init_db('workers') result = db.query(Worker).count() return result
def count_by_state(state): db = init_db('workers') result = db.query(Worker).filter_by(state=state).count() return result
def select_by_state(state): db = init_db('jobs') result = db.query(Job).filter_by(state=state).all() return result
def count_by_state(state): db = init_db('jobs') result = db.query(Job).filter_by(state=state).count() return result
def select(job_uuid): db = init_db('jobs') result = db.query(Job).filter_by(uuid=job_uuid).first() return result
def select_all(): db = init_db('jobs') result = db.query(Job).all() return result
def first(): db = init_db('jobs') result = db.query(Job).filter(Job.state != b'DONE', Job.state != b'FAILED').order_by(Job.id).first() return result
def count_by_type_and_state(job_type, state): db = init_db('jobs') result = db.query(Job).filter_by(job_type=job_type, state=state).count() return result
def set_job_type(uuid, job_type): db = init_db('jobs') job = db.query(Job).filter_by(uuid=uuid).first() job.job_type = job_type job.timestamp = datetime.datetime.utcnow() db.commit()
def select_by_state(state): db = init_db('collectors') collectors = db.query(Collector).filter_by(state=state).all() return collectors
def add(identity, state = b'READY'): db = init_db('workers') db.add(Worker(identity, state)) db.commit()
def count_by_type(job_type): db = init_db('jobs') result = db.query(Job).filter_by(job_type=job_type).all() return result
def count(): db = init_db('jobs') result = db.query(Job).count() return result