def run_query(query_run_id): cur = False start_time = time.clock() try: celery_log.info("Starting run for qrun:%s", query_run_id) qrun = QueryRun.get_by_id(query_run_id) qrun.status = QueryRun.STATUS_RUNNING qrun.save() check_result = check_sql(qrun.augmented_sql) if check_result is not True: celery_log.info("Check result for qrun:%s failed, with message: %s", qrun.id, check_result[0]) raise pymysql.DatabaseError(0, check_result[1]) cur = g.conn.replica.cursor() cur.execute(qrun.augmented_sql) result = [] result.append(make_result(cur)) while cur.nextset(): result.append(make_result(cur)) total_time = time.clock() - start_time qresult = QuerySuccessResult(qrun, total_time, result, celery.conf.OUTPUT_PATH_TEMPLATE) qrun.status = QueryRun.STATUS_COMPLETE celery_log.info("Completed run for qrun:%s successfully", qrun.id) qresult.output() qrun.save() except pymysql.DatabaseError as e: total_time = time.clock() - start_time qresult = QueryErrorResult(qrun, total_time, celery.conf.OUTPUT_PATH_TEMPLATE, e.args[1]) qrun.status = QueryRun.STATUS_FAILED qresult.output() qrun.save() celery_log.info("Completed run for qrun:%s with failure: %s", qrun.id, e.args[1]) except SoftTimeLimitExceeded: celery_log.info( "Time limit exceeded for qrun:%s, thread:%s attempting to kill", qrun.id, g.conn.replica.thread_id() ) total_time = time.clock() - start_time kill_query.delay(g.conn.replica.thread_id()) qrun.state = QueryRun.STATUS_KILLED qrun.save() qresult = QueryKilledResult(qrun, total_time, celery.conf.OUTPUT_PATH_TEMPLATE) qresult.output() finally: if cur is not False: # It is possible the cursor was never created, # so check before we try to close it cur.close()