def job_listener(event): '''Listens to completed job''' job_id = event.job.args[0] if event.code == events.EVENT_JOB_MISSED: db.mark_job_as_missed(job_id) elif event.exception: if isinstance(event.exception, util.JobError): error_object = event.exception.as_dict() else: error_object = "\n".join( traceback.format_tb(event.traceback) + [repr(event.exception)]) db.mark_job_as_errored(job_id, error_object) else: db.mark_job_as_completed(job_id, event.retval) api_key = db.get_job(job_id)["api_key"] result_ok = send_result(job_id, api_key) if not result_ok: db.mark_job_as_failed_to_post_result(job_id) # Optionally notify tests that job_listener() has finished. if "_TEST_CALLBACK_URL" in app.config: requests.get(app.config["_TEST_CALLBACK_URL"])
def job_listener(event): '''Listens to completed job''' job_id = event.job.args[0] if event.code == events.EVENT_JOB_MISSED: db.mark_job_as_missed(job_id) elif event.exception: if isinstance(event.exception, util.JobError): error_object = event.exception.as_dict() else: error_object = "\n".join(traceback.format_tb(event.traceback) + [repr(event.exception)]) db.mark_job_as_errored(job_id, error_object) else: db.mark_job_as_completed(job_id, event.retval) api_key = db.get_job(job_id)["api_key"] result_ok = send_result(job_id, api_key) if not result_ok: db.mark_job_as_failed_to_post_result(job_id) # Optionally notify tests that job_listener() has finished. if "_TEST_CALLBACK_URL" in app.config: requests.get(app.config["_TEST_CALLBACK_URL"])
def xloader_data_into_datastore(input): '''This is the func that is queued. It is a wrapper for xloader_data_into_datastore, and makes sure it finishes by calling xloader_hook to update the task_status with the result. Errors are stored in task_status and job log and this method returns 'error' to let RQ know too. Should task_status fails, then we also return 'error'. ''' # First flag that this task is running, to indicate the job is not # stillborn, for when xloader_submit is deciding whether another job would # be a duplicate or not job_dict = dict(metadata=input['metadata'], status='running') callback_xloader_hook(result_url=input['result_url'], api_key=input['api_key'], job_dict=job_dict) job_id = get_current_job().id errored = False try: xloader_data_into_datastore_(input, job_dict) job_dict['status'] = 'complete' db.mark_job_as_completed(job_id, job_dict) except JobError as e: db.mark_job_as_errored(job_id, str(e)) job_dict['status'] = 'error' job_dict['error'] = str(e) log = logging.getLogger(__name__) log.error('xloader error: {0}, {1}'.format(e, traceback.format_exc())) errored = True except Exception as e: db.mark_job_as_errored( job_id, traceback.format_tb(sys.exc_traceback)[-1] + repr(e)) job_dict['status'] = 'error' job_dict['error'] = str(e) log = logging.getLogger(__name__) log.error('xloader error: {0}, {1}'.format(e, traceback.format_exc())) errored = True finally: # job_dict is defined in xloader_hook's docstring is_saved_ok = callback_xloader_hook(result_url=input['result_url'], api_key=input['api_key'], job_dict=job_dict) errored = errored or not is_saved_ok return 'error' if errored else None
asynchronous_job = async_types.get(job_type) return run_asynchronous_job(asynchronous_job, job_id, job_key, input) def run_synchronous_job(job, job_id, job_key, input): try: db.add_pending_job(job_id, job_key, **input) except sa.exc.IntegrityError, e: error_string = 'job_id {} already exists'.format(job_id) return json.dumps({"error": error_string}), 409, headers try: result = job(job_id, input) if hasattr(result, "__call__"): db.mark_job_as_completed(job_id) return flask.Response(result(), mimetype='application/json') else: db.mark_job_as_completed(job_id, result) except util.JobError, e: db.mark_job_as_errored(job_id, e.as_dict()) except Exception, e: db.mark_job_as_errored( job_id, traceback.format_tb(sys.exc_traceback)[-1] + repr(e)) api_key = db.get_job(job_id)['api_key'] result_ok = send_result(job_id, api_key) if not result_ok:
asynchronous_job = async_types.get(job_type) return run_asynchronous_job(asynchronous_job, job_id, job_key, input) def run_synchronous_job(job, job_id, job_key, input): try: db.add_pending_job(job_id, job_key, **input) except sa.exc.IntegrityError, e: error_string = 'job_id {} already exists'.format(job_id) return json.dumps({"error": error_string}), 409, headers try: result = job(job_id, input) if hasattr(result, "__call__"): db.mark_job_as_completed(job_id) return flask.Response(result(), mimetype='application/json') else: db.mark_job_as_completed(job_id, result) except util.JobError, e: db.mark_job_as_errored(job_id, e.as_dict()) except Exception, e: db.mark_job_as_errored( job_id, traceback.format_tb(sys.exc_traceback)[-1] + repr(e)) api_key = db.get_job(job_id)['api_key'] result_ok = send_result(job_id, api_key) if not result_ok: db.mark_job_as_failed_to_post_result(job_id)