def get_pipeline_schedules(): """ Endpoint for getting the pipeline schedules """ project = Project.find() schedule_service = ScheduleService(project) schedules = list(map(dict, schedule_service.schedules())) for schedule in schedules: finder = JobFinder(schedule["name"]) state_job = finder.latest(db.session) schedule["has_error"] = state_job.has_error() if state_job else False schedule["is_running"] = state_job.is_running() if state_job else False schedule["job_id"] = schedule["name"] schedule["started_at"] = state_job.started_at if state_job else None schedule["ended_at"] = state_job.ended_at if state_job else None schedule["trigger"] = state_job.trigger if state_job else None state_job_success = finder.latest_success(db.session) schedule["has_ever_succeeded"] = (state_job_success.is_success() if state_job_success else None) schedule["start_date"] = (schedule["start_date"].date().isoformat() if schedule["start_date"] else None) return jsonify(schedules)
def job_log(job_id) -> Response: """ Endpoint for getting the most recent log generated by a job with job_id """ project = Project.find() try: log_service = JobLoggingService(project) log = log_service.get_latest_log(job_id) has_log_exceeded_max_size = False except SizeThresholdJobLogException as err: log = None has_log_exceeded_max_size = True finder = JobFinder(job_id) state_job = finder.latest(db.session) state_job_success = finder.latest_success(db.session) return jsonify({ "job_id": job_id, "log": log, "has_log_exceeded_max_size": has_log_exceeded_max_size, "has_error": state_job.has_error() if state_job else False, "started_at": state_job.started_at if state_job else None, "ended_at": state_job.ended_at if state_job else None, "trigger": state_job.trigger if state_job else None, "has_ever_succeeded": state_job_success.is_success() if state_job_success else None, })
def job_state() -> Response: """ Endpoint for getting the status of N jobs """ project = Project.find() poll_payload = request.get_json() job_ids = poll_payload["job_ids"] jobs = [] for job_id in job_ids: finder = JobFinder(job_id) state_job = finder.latest(db.session) # Validate existence first as a job may not be queued yet as a result of # another prerequisite async process (dbt installation for example) if state_job: state_job_success = finder.latest_success(db.session) jobs.append({ "job_id": job_id, "is_complete": state_job.is_complete(), "has_error": state_job.has_error(), "started_at": state_job.started_at, "ended_at": state_job.ended_at, "has_ever_succeeded": state_job_success.is_success() if state_job_success else None, }) return jsonify({"jobs": jobs})
def get_pipeline_schedules(): """ endpoint for getting the pipeline schedules """ project = Project.find() schedule_service = ScheduleService(project) schedules = [s._asdict() for s in schedule_service.schedules()] for schedule in schedules: finder = JobFinder(schedule["name"]) state_job = finder.latest(db.session) schedule["has_error"] = state_job.has_error() if state_job else False schedule["is_running"] = state_job.is_running() if state_job else False schedule["job_id"] = state_job.job_id if state_job else None return jsonify(schedules)
def job_log(job_id) -> Response: """ Endpoint for getting the most recent log generated by a job with job_id """ project = Project.find() log_service = JobLoggingService(project) log = log_service.get_latest_log(job_id) finder = JobFinder(job_id) state_job = finder.latest(db.session) return jsonify({ "job_id": job_id, "log": log, "has_error": state_job.has_error() if state_job else False, })