def handle_task_update(self, task_update): """Handles the given task update and returns the associated job execution if it has finished :param task_update: The task update :type task_update: :class:`job.tasks.update.TaskStatusUpdate` :returns: The job execution if it has finished, None otherwise :rtype: :class:`job.execution.job_exe.RunningJobExecution` """ finished_job_exe = None if task_update.task_id.startswith(JOB_TASK_ID_PREFIX): cluster_id = JobExecution.parse_cluster_id(task_update.task_id) with self._lock: if cluster_id in self._running_job_exes: job_exe = self._running_job_exes[cluster_id] job_exe.task_update(task_update) if job_exe.is_finished(): self._handle_finished_job_exe(job_exe) finished_job_exe = job_exe # return job_exe # TODO: this can be removed once database operations move to messaging backend if finished_job_exe: self._handle_finished_job_exe_in_database(finished_job_exe) return finished_job_exe return None
def handle_task_timeout(self, task, when): """Handles the timeout of the given task :param task: The task :type task: :class:`job.tasks.base_task.Task` :param when: The time that the time out occurred :type when: :class:`datetime.datetime` """ if task.id.startswith(JOB_TASK_ID_PREFIX): cluster_id = JobExecution.parse_cluster_id(task.id) with self._lock: if cluster_id in self._running_job_exes: job_exe = self._running_job_exes[cluster_id] # We do not remove the failed job execution at this point. We wait for the status update of the # killed task to come back so that job execution cleanup occurs after the task is dead. job_exe.execution_timed_out(task, when)
def handle_task_update(self, task_update): """Handles the given task update and returns the associated job execution if it has finished :param task_update: The task update :type task_update: :class:`job.tasks.update.TaskStatusUpdate` :returns: The job execution if it has finished, None otherwise :rtype: :class:`job.execution.job_exe.RunningJobExecution` """ if task_update.task_id.startswith(JOB_TASK_ID_PREFIX): cluster_id = JobExecution.parse_cluster_id(task_update.task_id) with self._lock: if cluster_id in self._running_job_exes: job_exe = self._running_job_exes[cluster_id] job_exe.task_update(task_update) if job_exe.is_finished(): self._handle_finished_job_exe(job_exe) return job_exe return None
def update(self, status): """ Invoked when the status of a task has changed (e.g., a slave is lost and so the task is lost, a task finishes and an executor sends a status update saying so, etc.) Note that returning from this callback acknowledges receipt of this status update. If for whatever reason the scheduler aborts during this callback (or the process exits) another status update will be delivered. Note, however, that this is currently not true if the slave sending the status update is lost or fails during that time. """ started = now() model = utils.create_task_update_model(status) mesos_status = model.status task_update = TaskStatusUpdate(model, utils.get_status_agent_id(status), utils.get_status_data(status)) task_id = task_update.task_id was_task_finished = task_update.status in TaskStatusUpdate.TERMINAL_STATUSES was_job_finished = False if mesos_status == 'TASK_ERROR': logger.error('Status update for task %s: %s', task_id, mesos_status) if mesos_status == 'TASK_LOST': logger.warning('Status update for task %s: %s', task_id, mesos_status) else: logger.info('Status update for task %s: %s', task_id, mesos_status) # Since we have a status update for this task, remove it from reconciliation set recon_mgr.remove_task_id(task_id) # Hand off task update to be saved in the database if task_id.startswith(JOB_TASK_ID_PREFIX): # Grab job execution ID from manager cluster_id = JobExecution.parse_cluster_id(task_id) job_exe = job_exe_mgr.get_running_job_exe(cluster_id) if job_exe: model.job_exe_id = job_exe.id task_update_mgr.add_task_update(model) # Update task with latest status # This should happen before the job execution or node manager are updated, since they will assume that the task # has already been updated task_mgr.handle_task_update(task_update) if task_id.startswith(JOB_TASK_ID_PREFIX): # Job task, so update the job execution try: job_exe = job_exe_mgr.handle_task_update(task_update) if job_exe and job_exe.is_finished(): logger.info("job_exe with job id %s and node id %s is finished", job_exe.job_id, job_exe.node_id) was_job_finished = True cleanup_mgr.add_job_execution(job_exe) GPUManager.release_gpus(job_exe.node_id, job_exe.job_id) except Exception: cluster_id = JobExecution.parse_cluster_id(task_id) logger.exception('Error handling status update for job execution: %s', cluster_id) # Error handling status update, add task so it can be reconciled task = task_mgr.get_task(task_id) if task: recon_mgr.add_tasks([task]) else: # Not a job task, so must be either a node or system task node_mgr.handle_task_update(task_update) system_task_mgr.handle_task_update(task_update) scheduler_mgr.add_task_update_counts(was_task_finished, was_job_finished) duration = now() - started msg = 'Scheduler statusUpdate() took %.3f seconds' if duration > ScaleScheduler.NORMAL_WARN_THRESHOLD: logger.warning(msg, duration.total_seconds()) else: logger.debug(msg, duration.total_seconds())