def process_all(pool, config): logging.info("Processing all...") conn = None curs = None try: logging.info("Opening connection to db...") conn = db.open_conn(config) curs = conn.cursor() logging.info("Done opening db connection.") # we don't worry about race conditions on starting jobs, since # the locking in the single processor will make sure that only # one job at a time is actually processing. logging.info("Finding pending jobs...") pending_job_ids = _find_all_pending(curs) logging.info("Done, found %d pending jobs: %s.", len(pending_job_ids), pending_job_ids) return pool.map_async(process_one, [(job_id, config) for job_id in pending_job_ids]) finally: if curs: curs.close() if conn: conn.close() logging.info("Done processing all.")
def process_one(job_id_and_config): job_id, config = job_id_and_config conn = None curs = None try: conn = db.open_conn(config) curs = conn.cursor() if not _lock_job(curs, job_id): _log_to_db(curs, job_id, "Could not acquire lock on job") return None job_info = _find_job(curs, job_id) if not job_info: _log_to_db(curs, job_id, "Could not find job") return None if not _is_workable(job_info): _log_to_db(curs, job_id, "Job not workable, skipping") return None result = _call_job(curs, job_info, config) _maybe_update_job(curs, job_id, result) if result.is_success(): _log_success(curs, job_id, result) else: _log_failure(curs, job_id, result) return result finally: # This will automatically release the lock, if one was # acquired. if curs: curs.close() if conn: conn.close()
def setUp(self): db.start_test() self.conn = db.open_conn(_read_default_db_ini()) self._clean_all_tables() self.server = None self.thread = None