def default(self, job_id): try: job = self.job_pool.get_job_by_id(job_id) except KeyError: raise HTTPError(404) job_string = "<html><head><title>Job Browser</title></head>" job_string += "<body><table>" job_string += table_row("ID", job.id) job_string += table_row("Root task", task_link(job, job.root_task)) job_string += table_row("State", JOB_STATE_NAMES[job.state]) job_string += table_row("Output ref", ref_id_link(job, job.root_task.expected_outputs[0])) job_string += span_row("Task states") for name, state in TASK_STATES.items(): try: job_string += table_row("Tasks " + name, job.task_state_counts[state]) except KeyError: job_string += table_row("Tasks " + name, 0) job_string += span_row("Task type/duration", 5) job_string += table_row( "*", str(job.all_tasks.get()), str(job.all_tasks.min), str(job.all_tasks.max), str(job.all_tasks.count) ) for type, avg in job.all_tasks_by_type.items(): job_string += table_row(type, str(avg.get()), str(avg.min), str(avg.max), str(avg.count)) job_string += "</table></body></html>" return job_string
def __init__(self, id, root_task, job_dir, state, job_pool, job_options, journal=True): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.journal = journal self.job_options = job_options try: self.journal = self.job_options['journal'] except KeyError: pass # Start journalling immediately to capture the root task. if self.journal and self.task_journal_fp is None and self.job_dir is not None: self.task_journal_fp = open(os.path.join(self.job_dir, 'task_journal'), 'wb') self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy(self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() try: self.journal_sync_buffer = self.job_options['journal_sync_buffer'] except KeyError: self.journal_sync_buffer = None self.journal_sync_counter = 0 self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {}
def as_descriptor(self): counts = {} ret = {'job_id': self.id, 'task_counts': counts, 'state': JOB_STATE_NAMES[self.state], 'root_task': self.root_task.task_id if self.root_task is not None else None, 'expected_outputs': self.root_task.expected_outputs if self.root_task is not None else None, 'result_ref': self.result_ref} with self._lock: for (name, state_index) in TASK_STATES.items(): counts[name] = self.task_state_counts[state_index] return ret
def __init__(self, id, root_task, job_dir=None): self.id = id self.root_task = root_task self.job_dir = job_dir self.state = JOB_ACTIVE self.result_ref = None self.task_journal_fp = None # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self._lock = Lock() self._condition = Condition(self._lock)
def __init__(self, id, root_task, job_dir, state, job_pool, job_options): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.job_options = job_options self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy( self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {} self.job_pool.worker_pool.notify_job_about_current_workers(self)
def default(self, job_id): try: job = self.job_pool.get_job_by_id(job_id) except KeyError: raise HTTPError(404) job_string = '<html><head><title>Job Browser</title></head>' job_string += '<body><table>' job_string += table_row('ID', job.id) job_string += table_row('Root task', task_link(job.root_task)) job_string += table_row('State', JOB_STATE_NAMES[job.state]) job_string += table_row('Output ref', ref_id_link(job.root_task.expected_outputs[0])) job_string += span_row('Task states') for name, state in TASK_STATES.items(): try: job_string += table_row('Tasks ' + name, job.task_state_counts[state]) except KeyError: job_string += table_row('Tasks ' + name, 0) job_string += '</table></body></html>' return job_string
def __init__(self, id, root_task, job_dir, state, job_pool, job_options): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.job_options = job_options self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy(self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {} self.job_pool.worker_pool.notify_job_about_current_workers(self)
def as_descriptor(self): counts = {} ret = { 'job_id': self.id, 'task_counts': counts, 'state': JOB_STATE_NAMES[self.state], 'root_task': self.root_task.task_id if self.root_task is not None else None, 'expected_outputs': self.root_task.expected_outputs if self.root_task is not None else None, 'result_ref': self.result_ref } with self._lock: for (name, state_index) in TASK_STATES.items(): counts[name] = self.task_state_counts[state_index] return ret
def default(self, job_id): try: job = self.job_pool.get_job_by_id(job_id) except KeyError: raise HTTPError(404) job_string = header('Job Browser', job_id) job_string += '<table>' job_string += table_row('ID', job.id) job_string += table_row('Root task', task_link(job, job.root_task)) job_string += table_row('State', JOB_STATE_NAMES[job.state]) job_string += table_row('Output ref', ref_id_link(job, job.root_task.expected_outputs[0])) job_string += span_row('Task states') for name, state in TASK_STATES.items(): try: job_string += table_row('Tasks ' + name, job.task_state_counts[state]) except KeyError: job_string += table_row('Tasks ' + name, 0) job_string += span_row('Task type/duration', 5) job_string += table_row('*', str(job.all_tasks.get()), str(job.all_tasks.min), str(job.all_tasks.max), str(job.all_tasks.count)) for type, avg in job.all_tasks_by_type.items(): job_string += table_row(type, str(avg.get()), str(avg.min), str(avg.max), str(avg.count)) job_string += '</table></body></html>' return job_string