def __init__(self, id, root_task, job_dir, state, job_pool, job_options, journal=True): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.journal = journal self.job_options = job_options try: self.journal = self.job_options['journal'] except KeyError: pass # Start journalling immediately to capture the root task. if self.journal and self.task_journal_fp is None and self.job_dir is not None: self.task_journal_fp = open(os.path.join(self.job_dir, 'task_journal'), 'wb') self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy(self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() try: self.journal_sync_buffer = self.job_options['journal_sync_buffer'] except KeyError: self.journal_sync_buffer = None self.journal_sync_counter = 0 self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {}
def __init__(self, id, root_task, job_dir=None): self.id = id self.root_task = root_task self.job_dir = job_dir self.state = JOB_ACTIVE self.result_ref = None self.task_journal_fp = None # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self._lock = Lock() self._condition = Condition(self._lock)
def __init__(self, id, root_task, job_dir, state, job_pool, job_options): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.job_options = job_options self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy( self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {} self.job_pool.worker_pool.notify_job_about_current_workers(self)
def __init__(self, id, root_task, job_dir, state, job_pool, job_options): self.id = id self.root_task = root_task self.job_dir = job_dir self.job_pool = job_pool self.history = [] self.state = state self.runnable_queue = Queue.Queue() self.global_queues = {} self.result_ref = None self.task_journal_fp = None self.job_options = job_options self._lock = Lock() self._condition = Condition(self._lock) # Counters for each task state. self.task_state_counts = {} for state in TASK_STATES.values(): self.task_state_counts[state] = 0 self.all_tasks = RunningAverage() self.all_tasks_by_type = {} try: self.scheduling_policy = get_scheduling_policy(self.job_options['scheduler']) except KeyError: self.scheduling_policy = LocalitySchedulingPolicy() self.task_graph = JobTaskGraph(self, self.runnable_queue) self.workers = {} self.job_pool.worker_pool.notify_job_about_current_workers(self)