def update(self, status): """ Invoked when the status of a task has changed (e.g., a slave is lost and so the task is lost, a task finishes and an executor sends a status update saying so, etc.) Note that returning from this callback acknowledges receipt of this status update. If for whatever reason the scheduler aborts during this callback (or the process exits) another status update will be delivered. Note, however, that this is currently not true if the slave sending the status update is lost or fails during that time. """ started = now() model = utils.create_task_update_model(status) mesos_status = model.status task_update = TaskStatusUpdate(model, utils.get_status_agent_id(status), utils.get_status_data(status)) task_id = task_update.task_id was_task_finished = task_update.status in TaskStatusUpdate.TERMINAL_STATUSES was_job_finished = False if mesos_status == 'TASK_ERROR': logger.error('Status update for task %s: %s', task_id, mesos_status) if mesos_status == 'TASK_LOST': logger.warning('Status update for task %s: %s', task_id, mesos_status) else: logger.info('Status update for task %s: %s', task_id, mesos_status) # Since we have a status update for this task, remove it from reconciliation set recon_mgr.remove_task_id(task_id) # Hand off task update to be saved in the database if task_id.startswith(JOB_TASK_ID_PREFIX): # Grab job execution ID from manager cluster_id = JobExecution.parse_cluster_id(task_id) job_exe = job_exe_mgr.get_running_job_exe(cluster_id) if job_exe: model.job_exe_id = job_exe.id task_update_mgr.add_task_update(model) # Update task with latest status # This should happen before the job execution or node manager are updated, since they will assume that the task # has already been updated task_mgr.handle_task_update(task_update) if task_id.startswith(JOB_TASK_ID_PREFIX): # Job task, so update the job execution try: job_exe = job_exe_mgr.handle_task_update(task_update) if job_exe and job_exe.is_finished(): logger.info("job_exe with job id %s and node id %s is finished", job_exe.job_id, job_exe.node_id) was_job_finished = True cleanup_mgr.add_job_execution(job_exe) GPUManager.release_gpus(job_exe.node_id, job_exe.job_id) except Exception: cluster_id = JobExecution.parse_cluster_id(task_id) logger.exception('Error handling status update for job execution: %s', cluster_id) # Error handling status update, add task so it can be reconciled task = task_mgr.get_task(task_id) if task: recon_mgr.add_tasks([task]) else: # Not a job task, so must be either a node or system task node_mgr.handle_task_update(task_update) system_task_mgr.handle_task_update(task_update) scheduler_mgr.add_task_update_counts(was_task_finished, was_job_finished) duration = now() - started msg = 'Scheduler statusUpdate() took %.3f seconds' if duration > ScaleScheduler.NORMAL_WARN_THRESHOLD: logger.warning(msg, duration.total_seconds()) else: logger.debug(msg, duration.total_seconds())
def statusUpdate(self, driver, status): """ Invoked when the status of a task has changed (e.g., a slave is lost and so the task is lost, a task finishes and an executor sends a status update saying so, etc.) Note that returning from this callback acknowledges receipt of this status update. If for whatever reason the scheduler aborts during this callback (or the process exits) another status update will be delivered. Note, however, that this is currently not true if the slave sending the status update is lost or fails during that time. See documentation for :meth:`mesos_api.mesos.Scheduler.statusUpdate`. """ started = now() model = utils.create_task_update_model(status) mesos_status = model.status task_update = TaskStatusUpdate(model, utils.get_status_agent_id(status)) task_id = task_update.task_id if mesos_status == 'TASK_LOST': logger.warning('Status update for task %s: %s', task_id, mesos_status) else: logger.info('Status update for task %s: %s', task_id, mesos_status) # Since we have a status update for this task, remove it from reconciliation set recon_mgr.remove_task_id(task_id) # Hand off task update to be saved in the database task_update_mgr.add_task_update(model) if task_id.startswith(CLEANUP_TASK_ID_PREFIX): cleanup_mgr.handle_task_update(task_update) else: job_exe_id = JobExecution.get_job_exe_id(task_id) try: running_job_exe = running_job_mgr.get_job_exe(job_exe_id) if running_job_exe: running_job_exe.task_update(task_update) # Remove finished job execution if running_job_exe.is_finished(): running_job_mgr.remove_job_exe(job_exe_id) cleanup_mgr.add_job_execution(running_job_exe) else: # Scheduler doesn't have any knowledge of this job execution Queue.objects.handle_job_failure(job_exe_id, now(), [], Error.objects.get_builtin_error('scheduler-lost')) except Exception: logger.exception('Error handling status update for job execution: %s', job_exe_id) # Error handling status update, add task so it can be reconciled recon_mgr.add_task_ids([task_id]) duration = now() - started msg = 'Scheduler statusUpdate() took %.3f seconds' if duration > ScaleScheduler.DATABASE_WARN_THRESHOLD: logger.warning(msg, duration.total_seconds()) else: logger.debug(msg, duration.total_seconds())