def get_job_execution(task_id, job_id, execution_id): logger = g.logger.bind( operation="get_job_execution", task_id=task_id, job_id=job_id, execution_id=execution_id, ) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: msg = f"Task ({task_id}) or Job ({job_id}) not found." return return_error(msg, "get_job_execution", status=404, logger=logger) execution = job.get_execution_by_id(execution_id) if execution is None: msg = f"Job Execution ({execution_id}) not found in job ({job_id})." return return_error(msg, "get_job_execution", status=404, logger=logger) logger.debug("Job execution retrieved successfully...") return format_execution_details(job.task, job, execution)
def retrieve_execution_details(task_id, job_id, execution_id=None, get_data_fn=None): if get_data_fn is None: get_data_fn = lambda execution: execution.log # noqa: E731 logger = g.logger.bind(operation="get_response", task_id=task_id, job_id=job_id) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: msg = f"Task ({task_id}) or Job ({job_id}) not found." return return_error(msg, "retrieve_execution_details", status=404, logger=logger) if not job.executions: msg = f"No executions found in job ({job_id})." return return_error(msg, "retrieve_execution_details", status=400, logger=logger) if execution_id is None: execution = job.get_last_execution() else: execution = job.get_execution_by_id(execution_id) if not execution: msg = "No executions found in job with specified arguments." return return_error(msg, "retrieve_execution_details", status=400, logger=logger) headers = {"Fastlane-Exit-Code": str(execution.exit_code)} if execution.status in [ JobExecution.Status.running, JobExecution.Status.enqueued ]: logs = "" else: logs = get_data_fn(execution) return Response(headers=headers, response=logs, status=200)
def retry_job(task_id, job_id): logger = g.logger.bind(operation="retry", task_id=task_id, job_id=job_id) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: return return_error("Job not found in task.", "retry_job", status=404, logger=logger) execution = job.get_last_execution() if execution is None: return return_error("No execution yet to retry.", "retry_job", status=400, logger=logger) if "enqueued_id" in job.metadata and current_app.jobs_queue.is_scheduled( job.metadata["enqueued_id"]): msg = "Can't retry a scheduled job." return return_error(msg, "retry_job", status=400, logger=logger) if execution.status == JobExecution.Status.running: logger.debug("Stopping current execution...") executor = current_app.executor executor.stop_job(job.task, job, execution) logger.debug("Current execution stopped.") execution.status = JobExecution.Status.failed job.save() new_exec = job.create_execution(execution.image, execution.command) new_exec.status = JobExecution.Status.enqueued logger.debug("Enqueuing job execution...") args = [ task_id, job_id, new_exec.execution_id, execution.image, execution.command ] result = current_app.jobs_queue.enqueue(Categories.Job, *args) job.metadata["enqueued_id"] = result.id job.save() logger.info("Job execution enqueued successfully.") return get_job_summary(task_id, job_id)
def get_job(task_id, job_id): logger = g.logger.bind(operation="get_job", task_id=task_id, job_id=job_id) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: return return_error( "Job not found in task.", "get_task", status=404, logger=logger ) logger.debug("Job retrieved successfully...") details = job.to_dict( include_log=True, include_error=True, blacklist=current_app.config["ENV_BLACKLISTED_WORDS"].lower().split(","), ) for execution in details["executions"]: exec_url = url_for( "execution.get_job_execution", task_id=task_id, job_id=job_id, execution_id=execution["executionId"], _external=True, ) execution["url"] = exec_url task_url = url_for("task.get_task", task_id=task_id, _external=True) return jsonify({"task": {"id": task_id, "url": task_url}, "job": details})
def stop_job(task_id, job_id): logger = g.logger.bind(operation="stop_job", task_id=task_id, job_id=job_id) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: return return_error("Job not found in task.", "stop_job", status=404, logger=logger) execution = job.get_last_execution() _, response = perform_stop_job_execution(job, execution=execution, logger=logger, stop_schedule=True) if response is not None: return response return get_job_summary(task_id, job_id)
def get_task(task_id): logger = g.logger.bind(operation="get_task", task_id=task_id) logger.debug("Getting job...") task = Task.get_by_task_id(task_id) if task is None: return return_error("Task not found.", "get_task", status=404, logger=logger) logger.debug("Task retrieved successfully...") task_jobs = Job.objects(id__in=[str(job_id.id) for job_id in task.jobs]) jobs = [] for job in task_jobs: url = url_for("task.get_job", task_id=task_id, job_id=str(job.job_id), _external=True) job = { "id": str(job.job_id), "createdAt": job.created_at.isoformat(), "url": url } jobs.append(job) return jsonify({"taskId": task_id, "jobs": jobs})
def search_tasks(): logger = g.logger.bind(operation="search_tasks") query = request.args.get("query") if not query: msg = "The query param is required." return return_error(msg, "search_tasks", status=400, logger=logger) page, error = get_current_page(logger) if error: return error per_page = current_app.config["PAGINATION_PER_PAGE"] logger.debug(f"Getting tasks page={page} per_page={per_page}...") paginator = Task.search_tasks(query=query, page=page, per_page=per_page) logger.debug("Tasks retrieved successfully...") next_url = None if paginator.has_next: next_url = url_for("task.search_tasks", query=query, page=paginator.next_num, _external=True) prev_url = None if paginator.has_prev: prev_url = url_for("task.search_tasks", query=query, page=paginator.prev_num, _external=True) data = { "items": [], "total": paginator.total, "page": paginator.page, "pages": paginator.pages, "perPage": paginator.per_page, "hasNext": paginator.has_next, "hasPrev": paginator.has_prev, "nextUrl": next_url, "prevUrl": prev_url, } for task in paginator.items: data["items"].append(task.to_dict()) return jsonify(data)
def get_current_page(logger): try: page = int(request.args.get("page", 1)) if page <= 0: raise ValueError() return page, None except ValueError: msg = "Tasks pagination page param should be a positive integer." return None, return_error(msg, "get_tasks", status=400, logger=logger)
def stop_job_execution(task_id, job_id, execution_id): logger = g.logger.bind( operation="stop_job_execution", task_id=task_id, job_id=job_id, execution_id=execution_id, ) logger.debug("Getting job...") job = Job.get_by_id(task_id=task_id, job_id=job_id) if job is None: msg = f"Task ({task_id}) or Job ({job_id}) not found." return return_error(msg, "stop_job_execution", status=404, logger=logger) execution = job.get_execution_by_id(execution_id) if execution is None: msg = f"Job Execution ({execution_id}) not found in Job ({job_id})." return return_error(msg, "stop_job_execution", status=404, logger=logger) _, response = perform_stop_job_execution(job, execution=execution, logger=logger, stop_schedule=False) if response is not None: return response return format_execution_details(job.task, job, execution, shallow=True)
def perform_stop_job_execution(job, execution, logger, stop_schedule=True): if execution is None: if not job.executions: msg = "No executions found in job." return ( False, return_error(msg, "stop_job_execution", status=400, logger=logger), ) execution = job.get_last_execution() if execution is not None and execution.status == JobExecution.Status.running: logger.debug("Stopping current execution...") executor = current_app.executor executor.stop_job(job.task, job, execution) logger.debug("Current execution stopped.") if "retries" in job.metadata: job.metadata["retry_count"] = job.metadata["retries"] + 1 job.save() scheduler = Scheduler("jobs", connection=current_app.redis) if (stop_schedule and "enqueued_id" in job.metadata and job.metadata["enqueued_id"] in scheduler): scheduler.cancel(job.metadata["enqueued_id"]) job.scheduled = False if execution.error is None: execution.error = "" execution.error += "\nUser stopped job execution manually." execution.status = JobExecution.Status.failed job.save() logger.debug("Job stopped.") return True, None