def _add_prerequisites(self, point, tdef): """Add task prerequisites.""" # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._is_satisfied = None self._suicide_is_satisfied = None identity = TaskID.get(tdef.name, str(point)) for sequence, dependencies in tdef.dependencies.items(): if not sequence.is_valid(point): continue for dependency in dependencies: cpre = dependency.get_prerequisite(point, tdef) if dependency.suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) cpre.add(prereq, p_prev < tdef.start_point) cpre.set_condition(tdef.name) self.prerequisites.append(cpre)
def get_prereq(self, point): """Return a prerequisite string.""" if self.message: # Message trigger preq = self.message msg_point = point if self.cycle_point: point = self.cycle_point msg_point = self.cycle_point else: if self.message_offset: msg_point = point + self.message_offset if self.graph_offset_string: msg_point = get_point_relative(self.graph_offset_string, msg_point) point = get_point_relative(self.graph_offset_string, point) preq = "%s %s" % (TaskID.get( self.task_name, point), re.sub('\[.*\]', str(msg_point), preq)) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative(self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq
def get_prereq(self, point): """Return a prerequisite string.""" if self.message: # Message trigger preq = self.message msg_point = point if self.cycle_point: point = self.cycle_point msg_point = self.cycle_point else: if self.message_offset: msg_point = point + self.message_offset if self.graph_offset_string: msg_point = get_point_relative( self.graph_offset_string, msg_point) point = get_point_relative(self.graph_offset_string, point) preq = "%s %s" % ( TaskID.get(self.task_name, point), re.sub('\[.*\]', str(msg_point), preq)) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative( self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq
def on_query_tooltip(self, widget, x, y, kbd_ctx, tooltip): """Handle a tooltip creation request.""" tip_context = self.led_treeview.get_tooltip_context(x, y, kbd_ctx) if tip_context is None: self._prev_tooltip_task_id = None return False x, y = self.led_treeview.convert_widget_to_bin_window_coords(x, y) path, column = self.led_treeview.get_path_at_pos(x, y)[0:2] col_index = self.led_treeview.get_columns().index(column) if not self.is_transposed: iter_ = self.led_treeview.get_model().get_iter(path) name = self.led_treeview.get_model().get_value(iter_, 0) try: point_string = self.led_headings[col_index] except IndexError: # This can occur for a tooltip while switching from transposed. return False if col_index == 0: task_id = name else: task_id = TaskID.get(name, point_string) else: try: point_string = self.point_strings[path[0]] except IndexError: return False if col_index == 0: task_id = point_string else: try: name = self.led_headings[col_index] except IndexError: return False task_id = TaskID.get(name, point_string) if task_id != self._prev_tooltip_task_id: self._prev_tooltip_task_id = task_id tooltip.set_text(None) return False if col_index == 0: tooltip.set_text(task_id) return True text = get_id_summary( task_id, self.state_summary, self.fam_state_summary, self.descendants) if text == task_id: return False tooltip.set_text(text) return True
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # xtriggers (represented by labels) satisfied or not self.xtriggers = {} for label in tdef.xtrig_labels: self.xtriggers[label] = False if tdef.xclock_label: self.xclock = (tdef.xclock_label, False) else: self.xclock = None # Message outputs. self.outputs = TaskOutputs(tdef) self.kill_failed = False
def on_query_tooltip(self, widget, x, y, kbd_ctx, tooltip): """Handle a tooltip creation request.""" tip_context = self.ttreeview.get_tooltip_context(x, y, kbd_ctx) if tip_context is None: self._prev_tooltip_task_id = None return False x, y = self.ttreeview.convert_widget_to_bin_window_coords(x, y) path = self.ttreeview.get_path_at_pos(x, y)[0] if not path: return False model = self.ttreeview.get_model() point_string = model.get_value(model.get_iter(path), 0) name = model.get_value(model.get_iter(path), 1) if point_string == name: # We are hovering over a cycle point row. task_id = point_string else: # We are hovering over a task or family row. task_id = TaskID.get(name, point_string) if task_id != self._prev_tooltip_task_id: # Clear tooltip when crossing row boundaries. self._prev_tooltip_task_id = task_id tooltip.set_text(None) return False text = get_id_summary( task_id, self.state_summary, self.fam_state_summary, self.descendants) if text == task_id: return False tooltip.set_text(text) return True
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # Message outputs. self.outputs = TaskOutputs(tdef, point) # Standard outputs. self.outputs.add(TASK_OUTPUT_SUBMITTED) self.outputs.add(TASK_OUTPUT_STARTED) self.outputs.add(TASK_OUTPUT_SUCCEEDED) self.kill_failed = False
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # Message outputs. self.outputs = TaskOutputs(tdef) # Standard outputs. self.outputs.add(TASK_OUTPUT_SUBMITTED) self.outputs.add(TASK_OUTPUT_STARTED) self.outputs.add(TASK_OUTPUT_SUCCEEDED) self.kill_failed = False self.confirming_with_poll = False
def _add_prerequisites(self, point, identity, tdef): """Add task prerequisites.""" # self.triggers[sequence] = [triggers for sequence] # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._is_satisfied = None self._suicide_is_satisfied = None for sequence, exps in tdef.triggers.items(): for ctrig, exp in exps: key = ctrig.keys()[0] if not sequence.is_valid(point): # This trigger is not valid for current cycle (see NOTE # just above) continue cpre = Prerequisite(identity, point, tdef.start_point) for label in ctrig: trig = ctrig[label] if trig.graph_offset_string is not None: prereq_offset_point = get_point_relative( trig.graph_offset_string, point) if prereq_offset_point > point: prereq_offset = prereq_offset_point - point if (tdef.max_future_prereq_offset is None or (prereq_offset > tdef.max_future_prereq_offset)): tdef.max_future_prereq_offset = ( prereq_offset) cpre.add(trig.get_prereq(point), label, ((prereq_offset_point < tdef.start_point) & (point >= tdef.start_point))) else: cpre.add(trig.get_prereq(point), label) cpre.set_condition(exp) if ctrig[key].suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(identity, point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) label = tdef.name cpre.add(prereq, label, p_prev < tdef.start_point) cpre.set_condition(label) self.prerequisites.append(cpre)
def _update_gui_regular(self, tasks_by_name, state_summary): """Logic for updating the gui in regular mode.""" children = [] to_unfold = [] parent_iter = None for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: if name in self.family_tree: # Task is a family. self.led_treestore.append( None, row=[name] + state_list) children = self.family_tree[name] # Get iter for this family's entry. iter_ = self.led_treestore.get_iter_first() temp = self.led_treestore.get_value( iter_, 0) while temp != name: iter_ = self.led_treestore.iter_next(iter_) temp = self.led_treestore.get_value(iter_, 0) parent_iter = iter_ # Unfold if family was folded before update if name in self.expanded_rows: to_unfold.append( self.led_treestore.get_path(iter_)) elif name in children: # Task belongs to a family. self.led_treestore.append( parent_iter, row=[name] + state_list) else: # Task does not belong to a family. self.led_treestore.append( None, row=[name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False # Unfold any rows that were unfolded before the update. for path in to_unfold: self.led_treeview.expand_row(path, True)
def _add_prerequisites(self, point, identity, tdef): """Add task prerequisites.""" # self.triggers[sequence] = [triggers for sequence] # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._recalc_satisfied = True for sequence, exps in tdef.triggers.items(): for ctrig, exp in exps: key = ctrig.keys()[0] if not sequence.is_valid(point): # This trigger is not valid for current cycle (see NOTE # just above) continue cpre = Prerequisite(identity, point, tdef.start_point) for label in ctrig: trig = ctrig[label] if trig.graph_offset_string is not None: prereq_offset_point = get_point_relative( trig.graph_offset_string, point) if prereq_offset_point > point: prereq_offset = prereq_offset_point - point if (tdef.max_future_prereq_offset is None or (prereq_offset > tdef.max_future_prereq_offset)): tdef.max_future_prereq_offset = ( prereq_offset) cpre.add(trig.get_prereq(point), label, ((prereq_offset_point < tdef.start_point) & (point >= tdef.start_point))) else: cpre.add(trig.get_prereq(point), label) cpre.set_condition(exp) if ctrig[key].suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(identity, point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) label = tdef.name cpre.add(prereq, label, p_prev < tdef.start_point) cpre.set_condition(label) self.prerequisites.append(cpre)
def get_right(self, inpoint, start_point): inpoint_string = str(inpoint) if self.right is None: return None # strip off special outputs self.right = re.sub(':\w+', '', self.right) return TaskID.get(self.right, inpoint_string)
def job_poll(self, st_file_path): """Poll status of the job specified in the "st_file_path". Return a status string that can be recognised by the suite. """ # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status st_file_path_strs = st_file_path.rsplit(os.sep, 6) task_id = TaskID.get(st_file_path_strs[4], st_file_path_strs[3]) self.configure_suite_run_dir(st_file_path_strs[0]) statuses = {} try: for line in open(st_file_path): key, value = line.strip().split("=", 1) statuses[key] = value except IOError: return "polled %s submission failed\n" % (task_id) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT") == "SUCCEEDED"): return "polled %s succeeded at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT")): return "polled %s failed at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (self.CYLC_BATCH_SYS_NAME not in statuses or self.CYLC_BATCH_SYS_JOB_ID not in statuses): return "polled %s submission failed\n" % (task_id) # Ask batch system if job is still alive or not batch_sys = self.get_inst(statuses[self.CYLC_BATCH_SYS_NAME]) job_id = statuses[self.CYLC_BATCH_SYS_JOB_ID] proc = Popen( shlex.split(batch_sys.POLL_CMD_TMPL % {"job_id": job_id}), stdout=PIPE) is_in_batch_sys = (proc.wait() == 0) if is_in_batch_sys and hasattr(batch_sys, "filter_poll_output"): is_in_batch_sys = batch_sys.filter_poll_output( proc.communicate()[0], job_id) if is_in_batch_sys and "CYLC_JOB_INIT_TIME" in statuses: return "polled %s started at %s\n" % ( task_id, statuses["CYLC_JOB_INIT_TIME"]) if is_in_batch_sys: return "polled %s submitted\n" % (task_id) if "CYLC_JOB_INIT_TIME" in statuses: return "polled %s failed at unknown-time\n" % (task_id) # Submitted but disappeared return "polled %s submission failed\n" % (task_id)
def job_poll(self, st_file_path): """Poll status of the job specified in the "st_file_path". Return a status string that can be recognised by the suite. """ # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status st_file_path_strs = st_file_path.rsplit(os.sep, 6) task_id = TaskID.get(st_file_path_strs[4], st_file_path_strs[3]) self.configure_suite_run_dir(st_file_path_strs[0]) statuses = {} try: for line in open(st_file_path): key, value = line.strip().split("=", 1) statuses[key] = value except IOError: return "polled %s submission failed\n" % (task_id) if statuses.get(TaskMessage.CYLC_JOB_EXIT_TIME) and statuses.get(TaskMessage.CYLC_JOB_EXIT) == "SUCCEEDED": return "polled %s succeeded at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_EXIT_TIME]) if statuses.get(TaskMessage.CYLC_JOB_EXIT_TIME) and statuses.get(TaskMessage.CYLC_JOB_EXIT): return "polled %s failed at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_EXIT_TIME]) if self.CYLC_BATCH_SYS_NAME not in statuses or self.CYLC_BATCH_SYS_JOB_ID not in statuses: return "polled %s submission failed\n" % (task_id) # Ask batch system if job is still alive or not batch_sys = self.get_inst(statuses[self.CYLC_BATCH_SYS_NAME]) job_id = statuses[self.CYLC_BATCH_SYS_JOB_ID] command = shlex.split(batch_sys.POLL_CMD_TMPL % {"job_id": job_id}) try: proc = Popen(command, stdout=PIPE) except OSError as exc: # subprocess.Popen has a bad habit of not setting the filename of # the executable when it raises an OSError. if not exc.filename: exc.filename = command[0] raise is_in_batch_sys = proc.wait() == 0 if is_in_batch_sys and hasattr(batch_sys, "filter_poll_output"): is_in_batch_sys = batch_sys.filter_poll_output(proc.communicate()[0], job_id) if is_in_batch_sys and TaskMessage.CYLC_JOB_INIT_TIME in statuses: return "polled %s started at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_INIT_TIME]) if is_in_batch_sys: return "polled %s submitted\n" % (task_id) if TaskMessage.CYLC_JOB_INIT_TIME in statuses: return "polled %s failed at unknown-time\n" % (task_id) # Submitted but disappeared return "polled %s submission failed\n" % (task_id)
def filter_families(self, families): """Remove family summaries if no members are present.""" fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: fam_states[fam_id] = summary break return fam_states
def on_treeview_button_pressed( self, treeview, event ): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # the following sets selection to the position at which the # right click was done (otherwise selection lags behind the # right click): x = int( event.x ) y = int( event.y ) time = event.time pth = treeview.get_path_at_pos(x,y) if pth is None: return False treeview.grab_focus() path, col, cellx, celly = pth treeview.set_cursor( path, col, 0 ) selection = treeview.get_selection() treemodel, iter = selection.get_selected() point_string = treemodel.get_value( iter, 0 ) name = treemodel.get_value( iter, 1 ) if point_string == name: # must have clicked on the top level point_string return task_id = TaskID.get(name, point_string) is_fam = (name in self.t.descendants) menu = self.get_right_click_menu( task_id, task_is_family=is_fam ) sep = gtk.SeparatorMenuItem() sep.show() menu.append( sep ) group_item = gtk.CheckMenuItem( 'Toggle Family Grouping' ) group_item.set_active( self.t.should_group_families ) menu.append( group_item ) group_item.connect( 'toggled', self.toggle_grouping ) group_item.show() menu.popup( None, None, None, event.button, event.time ) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def get_id_summary(id_, task_state_summary, fam_state_summary, id_family_map): """Return some state information about a task or family id.""" prefix_text = "" meta_text = "" sub_text = "" sub_states = {} stack = [(id_, 0)] done_ids = [] for summary in [task_state_summary, fam_state_summary]: if id_ in summary: title = summary[id_].get('title') if title: meta_text += "\n" + title.strip() description = summary[id_].get('description') if description: meta_text += "\n" + description.strip() while stack: this_id, depth = stack.pop(0) if this_id in done_ids: # family dive down will give duplicates continue done_ids.append(this_id) prefix = "\n" + " " * 4 * depth + this_id if this_id in task_state_summary: submit_num = task_state_summary[this_id].get('submit_num') if submit_num: prefix += "(%02d)" % submit_num state = task_state_summary[this_id]['state'] sub_text += prefix + " " + state sub_states.setdefault(state, 0) sub_states[state] += 1 elif this_id in fam_state_summary: name, point_string = TaskID.split(this_id) sub_text += prefix + " " + fam_state_summary[this_id]['state'] for child in reversed(sorted(id_family_map[name])): child_id = TaskID.get(child, point_string) stack.insert(0, (child_id, depth + 1)) if not prefix_text: prefix_text = sub_text.strip() sub_text = "" if len(sub_text.splitlines()) > 10: state_items = sub_states.items() state_items.sort() state_items.sort(lambda x, y: cmp(y[1], x[1])) sub_text = "" for state, number in state_items: sub_text += "\n {0} tasks {1}".format(number, state) if sub_text and meta_text: sub_text = "\n" + sub_text text = prefix_text + meta_text + sub_text if not text: return id_ return text
def filter_families(self, families): """Remove family summaries if no members are present.""" # TODO - IS THERE ANY NEED TO DO THIS? fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: remove = False break if not remove: fam_states[fam_id] = summary return fam_states
def get_left(self, inpoint, start_point, base_interval): # strip off special outputs left = re.sub(':[\w-]+', '', self.left) left_graphnode = graphnode(left, base_interval=base_interval) if left_graphnode.offset_is_from_ict: point = get_point_relative(left_graphnode.offset_string, start_point) elif left_graphnode.offset_string: point = get_point_relative(left_graphnode.offset_string, inpoint) else: point = inpoint name = left_graphnode.name return TaskID.get(name, point)
def get_prereq(self, point): """Return a prerequisite string and the relevant point.""" if self.message: # Message trigger preq = self.message if self.cycle_point: point = self.cycle_point else: if self.message_offset: point += self.message_offset if self.graph_offset_string: point = get_point_relative(self.graph_offset_string, point) preq = re.sub('\[.*\]', str(point), preq) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative(self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq, point
def get_prereq(self, point): """Return a prerequisite string and the relevant point.""" if self.message: # Message trigger preq = self.message if self.cycle_point: point = self.cycle_point else: if self.message_offset: point += self.message_offset if self.graph_offset_string: point = get_point_relative(self.graph_offset_string, point) preq = re.sub('\[.*\]', str(point), preq) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative( self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq, point
def _update_gui_transpose(self, tasks_by_point_string, state_summary): """Logic for updating the gui in transpose mode.""" for point_string in self.point_strings: tasks_at_point_string = tasks_by_point_string[point_string] state_list = [] for name in self.task_list: task_id = TaskID.get(name, point_string) if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' if name in tasks_at_point_string: state = state_summary[task_id]['state'] state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots[dot_type]['empty']) try: self.led_treestore.append( None, row=[point_string] + state_list + [point_string]) except ValueError: # A very laggy store can change the columns and raise this. return False
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.is_updated = False self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # xtriggers (represented by labels) satisfied or not self.xtriggers = {} for label in tdef.xtrig_labels: self.xtriggers[label] = False if tdef.xclock_label: self.xclock = (tdef.xclock_label, False) else: self.xclock = None # Message outputs. self.outputs = TaskOutputs(tdef) self.kill_failed = False
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): global_summary = {} family_summary = {} task_summary, task_states = self._get_tasks_info(tasks, tasks_rh) fam_states = {} all_states = [] config = SuiteConfig.get_inst() ancestors_dict = config.get_first_parent_ancestors() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, c_task_states in task_states: # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} count = {} for key in c_task_states: state = c_task_states[key] if state is None: continue try: count[state] += 1 except KeyError: count[state] = 1 all_states.append(state) for parent in ancestors_dict.get(key, []): if parent == key: continue c_fam_task_states.setdefault(parent, set([])) c_fam_task_states[parent].add(state) state_count_cycles[point_string] = count for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = {'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state} state_count_totals = {} for point_string, count in state_count_cycles.items(): for state, state_count in count.items(): state_count_totals.setdefault(state, 0) state_count_totals[state] += state_count all_states.sort() global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = time.time() global_summary['run_mode'] = self.run_mode global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals # Construct a suite status string for use by monitoring clients. global_summary['status_string'] = get_suite_status_string( paused, stopping, will_pause_at, will_stop_at) # TODO - delete this block post back-compat concerns (<= 6.9.1): # Report separate status string components for older clients that # construct their own suite status strings. global_summary['paused'] = paused global_summary['stopping'] = stopping global_summary['will_pause_at'] = will_pause_at global_summary['will_stop_at'] = will_stop_at self._summary_update_time = time.time() # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary self.first_update_completed = True self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def __init__( self, tdef, start_point, status=TASK_STATUS_WAITING, hold_swap=None, has_spawned=False, stop_point=None, is_startup=False, submit_num=0): self.tdef = tdef if submit_num is None: submit_num = 0 self.submit_num = submit_num if is_startup: # adjust up to the first on-sequence cycle point adjusted = [] for seq in self.tdef.sequences: adj = seq.get_first_point(start_point) if adj: # may be None if out of sequence bounds adjusted.append(adj) if not adjusted: # This task is out of sequence bounds raise TaskProxySequenceBoundsError(self.tdef.name) self.point = min(adjusted) else: self.point = start_point self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point( self.point, self.tdef.intercycle_offsets) self.identity = TaskID.get(self.tdef.name, self.point) self.has_spawned = has_spawned self.point_as_seconds = None # Manually inserted tasks may have a final cycle point set. self.stop_point = stop_point self.manual_trigger = False self.is_manual_submit = False self.summary = { 'latest_message': "", 'submitted_time': None, 'submitted_time_string': None, 'submit_num': self.submit_num, 'started_time': None, 'started_time_string': None, 'finished_time': None, 'finished_time_string': None, 'name': self.tdef.name, 'description': self.tdef.rtconfig['description'], 'title': self.tdef.rtconfig['title'], 'label': str(self.point), 'logfiles': [], 'job_hosts': {}, 'execution_time_limit': None, } self.local_job_file_path = None self.task_host = 'localhost' self.task_owner = None self.job_vacated = False self.poll_timers = {} self.timeout_timers = {} self.try_timers = {} self.delayed_start = None self.expire_time = None self.state = TaskState(tdef, self.point, status, hold_swap) if tdef.sequential: # Adjust clean-up cutoff. p_next = None adjusted = [] for seq in tdef.sequences: nxt = seq.get_next_point(self.point) if nxt: # may be None if beyond the sequence bounds adjusted.append(nxt) if adjusted: p_next = min(adjusted) if (self.cleanup_cutoff is not None and self.cleanup_cutoff < p_next): self.cleanup_cutoff = p_next
def test_get(self): self.assertEqual("a.1", TaskID.get("a", 1)) self.assertEqual("a._1", TaskID.get("a", "_1")) self.assertEqual("WTASK.20101010T101010", TaskID.get("WTASK", "20101010T101010"))
def update(self, schd): """Update.""" self.update_time = time() global_summary = {} family_summary = {} task_summary, task_states = self._get_tasks_info(schd) all_states = [] ancestors_dict = schd.config.get_first_parent_ancestors() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} count = {} for key in c_task_states: state = c_task_states[key] if state is None: continue try: count[state] += 1 except KeyError: count[state] = 1 all_states.append(state) for parent in ancestors_dict.get(key, []): if parent == key: continue c_fam_task_states.setdefault(parent, set([])) c_fam_task_states[parent].add(state) state_count_cycles[point_string] = count for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = schd.config.cfg['runtime'][fam]['meta'] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = { 'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state } state_count_totals = {} for point_string, count in list(state_count_cycles.items()): for state, state_count in count.items(): state_count_totals.setdefault(state, 0) state_count_totals[state] += state_count all_states.sort() for key, value in (('oldest cycle point string', schd.pool.get_min_point()), ('newest cycle point string', schd.pool.get_max_point()), ('newest runahead cycle point string', schd.pool.get_max_point_runahead())): if value: global_summary[key] = str(value) else: global_summary[key] = None if get_utc_mode(): global_summary['time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = self.update_time global_summary['run_mode'] = schd.run_mode global_summary['states'] = all_states global_summary['namespace definition order'] = ( schd.config.ns_defn_order) global_summary['reloading'] = schd.pool.do_reload global_summary['state totals'] = state_count_totals # Extract suite and task URLs from config. global_summary['suite_urls'] = dict( (i, j['meta']['URL']) for (i, j) in schd.config.cfg['runtime'].items()) global_summary['suite_urls']['suite'] = schd.config.cfg['meta']['URL'] # Construct a suite status string for use by monitoring clients. if schd.pool.is_held: global_summary['status_string'] = SUITE_STATUS_HELD elif schd.stop_mode is not None: global_summary['status_string'] = SUITE_STATUS_STOPPING elif schd.pool.hold_point: global_summary['status_string'] = (SUITE_STATUS_RUNNING_TO_HOLD % schd.pool.hold_point) elif schd.stop_point: global_summary['status_string'] = (SUITE_STATUS_RUNNING_TO_STOP % schd.stop_point) elif schd.stop_clock_time is not None: global_summary['status_string'] = (SUITE_STATUS_RUNNING_TO_STOP % schd.stop_clock_time_string) elif schd.stop_task: global_summary['status_string'] = (SUITE_STATUS_RUNNING_TO_STOP % schd.stop_task) elif schd.final_point: global_summary['status_string'] = (SUITE_STATUS_RUNNING_TO_STOP % schd.final_point) else: global_summary['status_string'] = SUITE_STATUS_RUNNING # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def on_treeview_button_pressed(self, treeview, event): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # the following sets selection to the position at which the # right click was done (otherwise selection lags behind the # right click): x = int(event.x) y = int(event.y) time = event.time pth = treeview.get_path_at_pos(x, y) if pth is None: return False path, col, cellx, celly = pth r_iter = treeview.get_model().get_iter(path) column_index = treeview.get_columns().index(col) if column_index == 0: return False if not self.t.is_transposed: point_string = self.t.led_headings[column_index] name = treeview.get_model().get_value(r_iter, 0) else: name = self.t.led_headings[column_index] point_string_column = treeview.get_model().get_n_columns() - 1 point_string = treeview.get_model().get_value( r_iter, point_string_column) task_id = TaskID.get(name, point_string) is_fam = (name in self.t.descendants) if is_fam: if task_id not in self.t.fam_state_summary: return False task_state = self.t.fam_state_summary[task_id]['state'] else: if task_id not in self.t.state_summary: return False task_state = self.t.state_summary[task_id]['state'] menu = self.get_right_click_menu( [task_id], [task_state], task_is_family=[is_fam]) sep = gtk.SeparatorMenuItem() sep.show() menu.append(sep) toggle_item = gtk.CheckMenuItem('Toggle Hide Task Headings') toggle_item.set_active(self.t.should_hide_headings) menu.append(toggle_item) toggle_item.connect('toggled', self.toggle_headings) toggle_item.show() group_item = gtk.CheckMenuItem('Toggle Family Grouping') group_item.set_active(self.t.should_group_families) menu.append(group_item) group_item.connect('toggled', self.toggle_grouping) group_item.show() transpose_menu_item = gtk.CheckMenuItem('Toggle _Transpose View') transpose_menu_item.set_active(self.t.should_transpose_view) menu.append(transpose_menu_item) transpose_menu_item.connect('toggled', self.toggle_transpose) transpose_menu_item.show() if self.cfg.use_defn_order: defn_order_menu_item = gtk.CheckMenuItem( 'Toggle _Definition Order') defn_order_menu_item.set_active(self.t.defn_order_on) menu.append(defn_order_menu_item) defn_order_menu_item.connect('toggled', self.toggle_defn_order) defn_order_menu_item.show() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def on_treeview_button_pressed(self, treeview, event): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # the following sets selection to the position at which the # right click was done (otherwise selection lags behind the # right click): x = int(event.x) y = int(event.y) pth = treeview.get_path_at_pos(x, y) if pth is None: return False path, col = pth[0:2] r_iter = treeview.get_model().get_iter(path) column_index = treeview.get_columns().index(col) if column_index == 0: return False if not self.t.is_transposed: point_string = self.t.led_headings[column_index] name = treeview.get_model().get_value(r_iter, 0) else: name = self.t.led_headings[column_index] point_string_column = treeview.get_model().get_n_columns() - 1 point_string = treeview.get_model().get_value( r_iter, point_string_column) task_id = TaskID.get(name, point_string) is_fam = (name in self.t.descendants) if is_fam: if task_id not in self.t.fam_state_summary: return False task_state = self.t.fam_state_summary[task_id]['state'] else: if task_id not in self.t.state_summary: return False task_state = self.t.state_summary[task_id]['state'] menu = self.get_right_click_menu([task_id], [task_state], task_is_family=[is_fam]) sep = gtk.SeparatorMenuItem() sep.show() menu.append(sep) toggle_item = gtk.CheckMenuItem('Toggle Hide Task Headings') toggle_item.set_active(self.t.should_hide_headings) menu.append(toggle_item) toggle_item.connect('toggled', self.toggle_headings) toggle_item.show() group_item = gtk.CheckMenuItem('Toggle Family Grouping') group_item.set_active(self.t.should_group_families) menu.append(group_item) group_item.connect('toggled', self.toggle_grouping) group_item.show() transpose_menu_item = gtk.CheckMenuItem('Toggle _Transpose View') transpose_menu_item.set_active(self.t.should_transpose_view) menu.append(transpose_menu_item) transpose_menu_item.connect('toggled', self.toggle_transpose) transpose_menu_item.show() if self.cfg.use_defn_order: defn_order_menu_item = gtk.CheckMenuItem( 'Toggle _Definition Order') defn_order_menu_item.set_active(self.t.defn_order_on) menu.append(defn_order_menu_item) defn_order_menu_item.connect('toggled', self.toggle_defn_order) defn_order_menu_item.show() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def on_treeview_button_pressed(self, treeview, event): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # the following sets selection to the position at which the # right click was done (otherwise selection lags behind the # right click): x = int(event.x) y = int(event.y) time = event.time pth = treeview.get_path_at_pos(x, y) if pth is None: return False treeview.grab_focus() path, col, cellx, celly = pth treeview.set_cursor(path, col, 0) selection = treeview.get_selection() treemodel, iter = selection.get_selected() point_string = treemodel.get_value(iter, 0) name = treemodel.get_value(iter, 1) if point_string == name: # must have clicked on the top level point_string return task_id = TaskID.get(name, point_string) is_fam = (name in self.t.descendants) if is_fam: task_state = self.t.fam_state_summary[task_id]['state'] submit_num = None else: task_state = self.t.state_summary[task_id]['state'] submit_num = self.t.state_summary[task_id]['submit_num'] menu = self.get_right_click_menu(task_id, t_state=task_state, task_is_family=is_fam, submit_num=submit_num) sep = gtk.SeparatorMenuItem() sep.show() menu.append(sep) group_item = gtk.CheckMenuItem('Toggle Family Grouping') group_item.set_active(self.t.should_group_families) menu.append(group_item) group_item.connect('toggled', self.toggle_grouping) group_item.show() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def update_gui(self): new_data = {} state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) # flat (a liststore would do) names = tasks_by_name.keys() names.sort() tvcs = self.led_treeview.get_columns() if not self.is_transposed: for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: self.led_liststore.append([name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False else: for point_string in self.point_strings: tasks_at_point_string = tasks_by_point_string[point_string] state_list = [] for name in self.task_list: task_id = TaskID.get(name, point_string) if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' if name in tasks_at_point_string: state = state_summary[task_id]['state'] state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots[dot_type]['empty']) try: self.led_liststore.append([point_string] + state_list + [point_string]) except ValueError: # A very laggy store can change the columns and raise this. return False self.led_treeview.columns_autosize() return False
def update_gui(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False if not self.global_summary: return self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. self.newest_point_string = ( self.global_summary[ 'newest runahead cycle point string']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all ) except ClientError: if cylc.flags.debug: try: traceback.print_exc() except IOError: pass # Cannot print to terminal (session may be closed). return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] current_id = self.get_graph_id(gr_edges) if current_id != self.prev_graph_id: self.graphw = CGraphPlain( self.cfg.suite, suite_polling_tasks) self.graphw.add_edges( gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id_ = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id_) if name not in self.all_families: # This node is a task, not a family. if id_ in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id_ not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id_ in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id_ in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = ( set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' if not node.attr['URL'].startswith(self.PREFIX_BASE): node.attr['URL'] = self.PREFIX_BASE + node.attr['URL'] for id_ in self.state_summary: try: node = self.graphw.get_node(id_) except KeyError: continue self.set_live_node_attr(node, id_) for id_ in self.fam_state_summary: try: node = self.graphw.get_node(id_) except KeyError: # Node not in graph. continue self.set_live_node_attr(node, id_) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.update_xdot(no_zoom=(current_id == self.prev_graph_id)) self.prev_graph_id = current_id
def on_treeview_button_pressed(self, treeview, event): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # If clicking on a task that is not selected, set the selection to be # that task. x = int(event.x) y = int(event.y) pth = treeview.get_path_at_pos(x, y) if pth is None: return False treeview.grab_focus() path, col, _, _ = pth tvte = TreeViewTaskExtractor(treeview) selected_paths = [row[0] for row in tvte.get_selected_rows()] if path not in selected_paths: treeview.set_cursor(path, col, 0) # Populate lists of task info from the selected tasks. task_ids = [] t_states = [] task_is_family = [] # List of boolean values. for task in tvte.get_selected_tasks(): # get_selected_tasks() does not return tasks if their parent node # is also returned, i.e. no duplicates. point_string, name = task if point_string == name: name = 'root' task_id = TaskID.get(name, point_string) task_ids.append(task_id) is_fam = (name in self.t.descendants) task_is_family.append(is_fam) if is_fam: if task_id not in self.t.fam_state_summary: return False t_states.append(self.t.fam_state_summary[task_id]['state']) else: if task_id not in self.t.state_summary: return False t_states.append(self.t.state_summary[task_id]['state']) menu = self.get_right_click_menu(task_ids, t_states, task_is_family=task_is_family) sep = gtk.SeparatorMenuItem() sep.show() menu.append(sep) group_item = gtk.CheckMenuItem('Toggle Family Grouping') group_item.set_active(self.t.should_group_families) menu.append(group_item) group_item.connect('toggled', self.toggle_grouping) group_item.show() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def on_treeview_button_pressed(self, treeview, event): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # If clicking on a task that is not selected, set the selection to be # that task. x = int(event.x) y = int(event.y) pth = treeview.get_path_at_pos(x, y) if pth is None: return False treeview.grab_focus() path, col = pth[:2] tvte = TreeViewTaskExtractor(treeview) if path not in (row[0] for row in tvte.get_selected_rows()): treeview.set_cursor(path, col, 0) # Populate lists of task info from the selected tasks. task_ids = [] t_states = [] task_is_family = [] # List of boolean values. for task in tvte.get_selected_tasks(): # get_selected_tasks() does not return tasks if their parent node # is also returned, i.e. no duplicates. point_string, name = task if point_string == name: name = 'root' task_id = TaskID.get(name, point_string) task_ids.append(task_id) is_fam = (name in self.t.descendants) task_is_family.append(is_fam) if is_fam: if task_id not in self.t.fam_state_summary: return False t_states.append(self.t.fam_state_summary[task_id]['state']) else: if task_id not in self.t.state_summary: return False t_states.append(self.t.state_summary[task_id]['state']) menu = self.get_right_click_menu(task_ids, t_states, task_is_family=task_is_family) sep = gtk.SeparatorMenuItem() sep.show() menu.append(sep) group_item = gtk.CheckMenuItem('Toggle Family Grouping') group_item.set_active(self.t.should_group_families) menu.append(group_item) group_item.connect('toggled', self.toggle_grouping) group_item.show() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def __init__( self, tdef, start_point, status=TASK_STATUS_WAITING, hold_swap=None, has_spawned=False, stop_point=None, is_startup=False, submit_num=0, is_late=False): self.tdef = tdef if submit_num is None: submit_num = 0 self.submit_num = submit_num if is_startup: # adjust up to the first on-sequence cycle point adjusted = [] for seq in self.tdef.sequences: adj = seq.get_first_point(start_point) if adj: # may be None if out of sequence bounds adjusted.append(adj) if not adjusted: # This task is out of sequence bounds raise TaskProxySequenceBoundsError(self.tdef.name) self.point = min(adjusted) self.late_time = None else: self.point = start_point self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(self.point) self.identity = TaskID.get(self.tdef.name, self.point) self.has_spawned = has_spawned self.reload_successor = None self.point_as_seconds = None # Manually inserted tasks may have a final cycle point set. self.stop_point = stop_point self.manual_trigger = False self.is_manual_submit = False self.summary = { 'latest_message': '', 'submitted_time': None, 'submitted_time_string': None, 'started_time': None, 'started_time_string': None, 'finished_time': None, 'finished_time_string': None, 'logfiles': [], 'job_hosts': {}, 'execution_time_limit': None, 'batch_sys_name': None, 'submit_method_id': None } self.local_job_file_path = None self.task_host = 'localhost' self.task_owner = None self.job_vacated = False self.poll_timer = None self.timeout = None self.try_timers = {} # Use dict here for Python 2.6 compat. # Should use collections.Counter in Python 2.7+ self.non_unique_events = {} self.clock_trigger_time = None self.expire_time = None self.late_time = None self.is_late = is_late self.state = TaskState(tdef, self.point, status, hold_swap) if tdef.sequential: # Adjust clean-up cutoff. p_next = None adjusted = [] for seq in tdef.sequences: nxt = seq.get_next_point(self.point) if nxt: # may be None if beyond the sequence bounds adjusted.append(nxt) if adjusted: p_next = min(adjusted) if (self.cleanup_cutoff is not None and self.cleanup_cutoff < p_next): self.cleanup_cutoff = p_next
def update_gui( self ): new_data = {} state_summary = {} state_summary.update( self.state_summary ) state_summary.update( self.fam_state_summary ) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault( point_string, [] ) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault( name, [] ) tasks_by_name[name].append(point_string) # flat (a liststore would do) names = tasks_by_name.keys() names.sort() tvcs = self.led_treeview.get_columns() if not self.is_transposed: for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: self.led_liststore.append([name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False else: for point_string in self.point_strings: tasks_at_point_string = tasks_by_point_string[point_string] state_list = [] for name in self.task_list: task_id = TaskID.get(name, point_string) if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' if name in tasks_at_point_string: state = state_summary[task_id]['state'] state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots[dot_type]['empty']) try: self.led_liststore.append( [point_string] + state_list + [point_string]) except ValueError: # A very laggy store can change the columns and raise this. return False self.led_treeview.columns_autosize() return False
def update_graph(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False try: self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. try: self.newest_point_string = ( self. global_summary['newest runahead cycle point string']) except KeyError: # back compat <= 6.2.0 pass except KeyError: # Pre cylc-6 back compat. self.oldest_point_string = ( self.global_summary['oldest cycle time']) self.newest_point_string = ( self.global_summary['newest cycle time']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.suite_info_client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all) except Exception as exc: print >> sys.stderr, str(exc) return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] current_id = self.get_graph_id(gr_edges) needs_redraw = current_id != self.prev_graph_id if needs_redraw: self.graphw = CGraphPlain(self.cfg.suite, suite_polling_tasks) self.graphw.add_edges(gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id) if name not in self.all_families: # This node is a task, not a family. if id in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = (set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' for id in self.state_summary: try: node = self.graphw.get_node(id) except KeyError: continue self.set_live_node_attr(node, id) for id in self.fam_state_summary: try: node = self.graphw.get_node(id) except: continue self.set_live_node_attr(node, id) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.prev_graph_id = current_id return not needs_redraw
def update_graph(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) try: self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if 'runahead' not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. try: self.newest_point_string = ( self.global_summary[ 'newest runahead cycle point string']) except KeyError: # back compat <= 6.2.0 pass except KeyError: # Pre cylc-6 back compat. self.oldest_point_string = ( self.global_summary['oldest cycle time']) self.newest_point_string = ( self.global_summary['newest cycle time']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string try: res = self.updater.suite_info_client.get_info( 'get_graph_raw', oldest, newest, self.group, self.ungroup, self.ungroup_recursive, self.group_all, self.ungroup_all) except TypeError: # Back compat with pre cylc-6 suite daemons. res = self.updater.suite_info_client.get( 'get_graph_raw', oldest, newest, False, self.group, self.ungroup, self.ungroup_recursive, self.group_all, self.ungroup_all) except Exception as exc: # PyroError? print >> sys.stderr, str(exc) return False # backward compatibility for old suite daemons still running self.have_leaves_and_feet = False if isinstance(res, list): # prior to suite-polling tasks in 5.4.0 gr_edges = res suite_polling_tasks = [] self.leaves = [] self.feet = [] else: if len(res) == 2: # prior to graph view grouping fix in 5.4.2 gr_edges, suite_polling_tasks = res self.leaves = [] self.feet = [] elif len(res) == 4: # 5.4.2 and later self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res current_id = self.get_graph_id(gr_edges) needs_redraw = current_id != self.prev_graph_id if needs_redraw: self.graphw = graphing.CGraphPlain( self.cfg.suite, suite_polling_tasks) self.graphw.add_edges( gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id) if name not in self.all_families: # This node is a task, not a family. if id in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = ( set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: if name in self.triggering_families: node.attr['shape'] = 'doubleoctagon' else: node.attr['shape'] = 'tripleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' for id in self.state_summary: try: node = self.graphw.get_node(id) except KeyError: continue self.set_live_node_attr(node, id) for id in self.fam_state_summary: try: node = self.graphw.get_node(id) except: continue self.set_live_node_attr(node, id) self.graphw.graph_attr['rankdir'] = self.orientation self.action_required = False if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.prev_graph_id = current_id return not needs_redraw
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): task_summary = {} global_summary = {} family_summary = {} task_states = {} fs = None for tlist in [tasks, tasks_rh]: for task in tlist: ts = task.get_state_summary() if fs: ts['state'] = fs task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) point_string = str(point_string) task_states.setdefault(point_string, {}) task_states[point_string][name] = ( task_summary[task.identity]['state']) fs = 'runahead' fam_states = {} all_states = [] for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} config = SuiteConfig.get_inst() for key, parent_list in ( config.get_first_parent_ancestors().items()): state = c_task_states.get(key) if state is None: continue all_states.append(state) for parent in parent_list: if parent == key: continue c_fam_task_states.setdefault(parent, []) c_fam_task_states[parent].append(state) for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = { 'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state } all_states.sort() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, name_states in task_states.items(): count = {} for name, state in name_states.items(): try: count[state] += 1 except KeyError: count[state] = 1 try: state_count_totals[state] += 1 except KeyError: state_count_totals[state] = 1 state_count_cycles[point_string] = count global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = time.time() global_summary['run_mode'] = self.run_mode global_summary['paused'] = paused global_summary['stopping'] = stopping global_summary['will_pause_at'] = self.str_or_None(will_pause_at) global_summary['will_stop_at'] = self.str_or_None(will_stop_at) global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals self._summary_update_time = time.time() # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary task_states = {} self.first_update_completed = True self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def test_get(self): self.assertEqual("a.1", TaskID.get("a", 1)) self.assertEqual("a._1", TaskID.get("a", "_1")) self.assertEqual( "WTASK.20101010T101010", TaskID.get("WTASK", "20101010T101010"))
def __init__( self, tdef, start_point, status=TASK_STATUS_WAITING, hold_swap=None, has_spawned=False, stop_point=None, is_startup=False, submit_num=0): self.tdef = tdef if submit_num is None: submit_num = 0 self.submit_num = submit_num if is_startup: # adjust up to the first on-sequence cycle point adjusted = [] for seq in self.tdef.sequences: adj = seq.get_first_point(start_point) if adj: # may be None if out of sequence bounds adjusted.append(adj) if not adjusted: # This task is out of sequence bounds raise TaskProxySequenceBoundsError(self.tdef.name) self.point = min(adjusted) else: self.point = start_point self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point( self.point, self.tdef.intercycle_offsets) self.identity = TaskID.get(self.tdef.name, self.point) self.has_spawned = has_spawned self.point_as_seconds = None # Manually inserted tasks may have a final cycle point set. self.stop_point = stop_point self.manual_trigger = False self.is_manual_submit = False self.summary = { 'latest_message': "", 'submitted_time': None, 'submitted_time_string': None, 'submit_num': self.submit_num, 'started_time': None, 'started_time_string': None, 'finished_time': None, 'finished_time_string': None, 'name': self.tdef.name, 'description': self.tdef.rtconfig['meta']['description'], 'title': self.tdef.rtconfig['meta']['title'], 'label': str(self.point), 'logfiles': [], 'job_hosts': {}, 'execution_time_limit': None, 'batch_sys_name': None, 'submit_method_id': None } self.local_job_file_path = None self.task_host = 'localhost' self.task_owner = None self.job_vacated = False self.poll_timers = {} self.timeout_timers = {} self.try_timers = {} self.delayed_start = None self.expire_time = None self.state = TaskState(tdef, self.point, status, hold_swap) if tdef.sequential: # Adjust clean-up cutoff. p_next = None adjusted = [] for seq in tdef.sequences: nxt = seq.get_next_point(self.point) if nxt: # may be None if beyond the sequence bounds adjusted.append(nxt) if adjusted: p_next = min(adjusted) if (self.cleanup_cutoff is not None and self.cleanup_cutoff < p_next): self.cleanup_cutoff = p_next
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): task_summary = {} global_summary = {} family_summary = {} task_states = {} fs = None for tlist in [tasks, tasks_rh]: for task in tlist: ts = task.get_state_summary() if fs: ts['state'] = fs task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) point_string = str(point_string) task_states.setdefault(point_string, {}) task_states[point_string][name] = ( task_summary[task.identity]['state']) fs = 'runahead' fam_states = {} all_states = [] for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} config = SuiteConfig.get_inst() for key, parent_list in ( config.get_first_parent_ancestors().items()): state = c_task_states.get(key) if state is None: continue all_states.append(state) for parent in parent_list: if parent == key: continue c_fam_task_states.setdefault(parent, []) c_fam_task_states[parent].append(state) for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = {'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state} all_states.sort() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, name_states in task_states.items(): count = {} for name, state in name_states.items(): try: count[state] += 1 except KeyError: count[state] = 1 try: state_count_totals[state] += 1 except KeyError: state_count_totals[state] = 1 state_count_cycles[point_string] = count global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = time.time() global_summary['run_mode'] = self.run_mode global_summary['paused'] = paused global_summary['stopping'] = stopping global_summary['will_pause_at'] = self.str_or_None(will_pause_at) global_summary['will_stop_at'] = self.str_or_None(will_stop_at) global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals self._summary_update_time = time.time() # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary task_states = {} self.first_update_completed = True self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def job_poll(self, st_file_path): """Poll status of the job specified in the "st_file_path". Return a status string that can be recognised by the suite. """ # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status st_file_path_strs = st_file_path.rsplit(os.sep, 6) task_id = TaskID.get(st_file_path_strs[4], st_file_path_strs[3]) self.configure_suite_run_dir(st_file_path_strs[0]) statuses = {} try: for line in open(st_file_path): key, value = line.strip().split("=", 1) statuses[key] = value except IOError: return "polled %s submission failed\n" % (task_id) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT") == "SUCCEEDED"): return "polled %s succeeded at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT")): return "polled %s failed at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (self.CYLC_BATCH_SYS_NAME not in statuses or self.CYLC_BATCH_SYS_JOB_ID not in statuses): return "polled %s submission failed\n" % (task_id) # Ask batch system if job is still alive or not batch_sys = self.get_inst(statuses[self.CYLC_BATCH_SYS_NAME]) job_id = statuses[self.CYLC_BATCH_SYS_JOB_ID] command = shlex.split(batch_sys.POLL_CMD_TMPL % {"job_id": job_id}) try: proc = Popen(command, stdout=PIPE) except OSError as exc: # subprocess.Popen has a bad habit of not setting the filename of # the executable when it raises an OSError. if not exc.filename: exc.filename = command[0] raise is_in_batch_sys = (proc.wait() == 0) if is_in_batch_sys and hasattr(batch_sys, "filter_poll_output"): is_in_batch_sys = batch_sys.filter_poll_output( proc.communicate()[0], job_id) if is_in_batch_sys and "CYLC_JOB_INIT_TIME" in statuses: return "polled %s started at %s\n" % ( task_id, statuses["CYLC_JOB_INIT_TIME"]) if is_in_batch_sys: return "polled %s submitted\n" % (task_id) if "CYLC_JOB_INIT_TIME" in statuses: return "polled %s failed at unknown-time\n" % (task_id) # Submitted but disappeared return "polled %s submission failed\n" % (task_id)
def update(self, schd): """Update.""" self.update_time = time() global_summary = {} family_summary = {} task_summary, task_states = self._get_tasks_info(schd) all_states = [] ancestors_dict = schd.config.get_first_parent_ancestors() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} count = {} for key in c_task_states: state = c_task_states[key] if state is None: continue try: count[state] += 1 except KeyError: count[state] = 1 all_states.append(state) for parent in ancestors_dict.get(key, []): if parent == key: continue c_fam_task_states.setdefault(parent, set([])) c_fam_task_states[parent].add(state) state_count_cycles[point_string] = count for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = schd.config.cfg['runtime'][fam]['meta'] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = {'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state} state_count_totals = {} for point_string, count in list(state_count_cycles.items()): for state, state_count in count.items(): state_count_totals.setdefault(state, 0) state_count_totals[state] += state_count all_states.sort() for key, value in ( ('oldest cycle point string', schd.pool.get_min_point()), ('newest cycle point string', schd.pool.get_max_point()), ('newest runahead cycle point string', schd.pool.get_max_point_runahead())): if value: global_summary[key] = str(value) else: global_summary[key] = None if get_utc_mode(): global_summary['time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = self.update_time global_summary['run_mode'] = schd.run_mode global_summary['states'] = all_states global_summary['namespace definition order'] = ( schd.config.ns_defn_order) global_summary['reloading'] = schd.pool.do_reload global_summary['state totals'] = state_count_totals # Extract suite and task URLs from config. global_summary['suite_urls'] = dict( (i, j['meta']['URL']) for (i, j) in schd.config.cfg['runtime'].items()) global_summary['suite_urls']['suite'] = schd.config.cfg['meta']['URL'] # Construct a suite status string for use by monitoring clients. if schd.pool.is_held: global_summary['status_string'] = SUITE_STATUS_HELD elif schd.stop_mode is not None: global_summary['status_string'] = SUITE_STATUS_STOPPING elif schd.pool.hold_point: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_HOLD % schd.pool.hold_point) elif schd.stop_point: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_STOP % schd.stop_point) elif schd.stop_clock_time is not None: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_STOP % schd.stop_clock_time_string) elif schd.stop_task: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_STOP % schd.stop_task) elif schd.final_point: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_STOP % schd.final_point) else: global_summary['status_string'] = SUITE_STATUS_RUNNING # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): self.summary_update_time = time() global_summary = {} family_summary = {} task_summary, task_states = self._get_tasks_info(tasks, tasks_rh) fam_states = {} all_states = [] config = SuiteConfig.get_inst() ancestors_dict = config.get_first_parent_ancestors() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, c_task_states in task_states: # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} count = {} for key in c_task_states: state = c_task_states[key] if state is None: continue try: count[state] += 1 except KeyError: count[state] = 1 all_states.append(state) for parent in ancestors_dict.get(key, []): if parent == key: continue c_fam_task_states.setdefault(parent, set([])) c_fam_task_states[parent].add(state) state_count_cycles[point_string] = count for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = {'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state} state_count_totals = {} for point_string, count in state_count_cycles.items(): for state, state_count in count.items(): state_count_totals.setdefault(state, 0) state_count_totals[state] += state_count all_states.sort() global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = self.summary_update_time global_summary['run_mode'] = self.run_mode global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals # Construct a suite status string for use by monitoring clients. if paused: global_summary['status_string'] = SUITE_STATUS_HELD elif stopping: global_summary['status_string'] = SUITE_STATUS_STOPPING elif will_pause_at: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_HOLD % will_pause_at) elif will_stop_at: global_summary['status_string'] = ( SUITE_STATUS_RUNNING_TO_STOP % will_stop_at) else: global_summary['status_string'] = SUITE_STATUS_RUNNING # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def __init__(self, tdef, start_point, status=TASK_STATUS_WAITING, hold_swap=None, has_spawned=False, stop_point=None, is_startup=False, submit_num=0, is_late=False): self.tdef = tdef if submit_num is None: submit_num = 0 self.submit_num = submit_num if is_startup: # adjust up to the first on-sequence cycle point adjusted = [] for seq in self.tdef.sequences: adj = seq.get_first_point(start_point) if adj: # may be None if out of sequence bounds adjusted.append(adj) if not adjusted: # This task is out of sequence bounds raise TaskProxySequenceBoundsError(self.tdef.name) self.point = min(adjusted) self.late_time = None else: self.point = start_point self.cleanup_cutoff = self.tdef.get_cleanup_cutoff_point(self.point) self.identity = TaskID.get(self.tdef.name, self.point) self.has_spawned = has_spawned self.point_as_seconds = None # Manually inserted tasks may have a final cycle point set. self.stop_point = stop_point self.manual_trigger = False self.is_manual_submit = False self.summary = { 'latest_message': '', 'submitted_time': None, 'submitted_time_string': None, 'started_time': None, 'started_time_string': None, 'finished_time': None, 'finished_time_string': None, 'logfiles': [], 'job_hosts': {}, 'execution_time_limit': None, 'batch_sys_name': None, 'submit_method_id': None } self.local_job_file_path = None self.task_host = 'localhost' self.task_owner = None self.job_vacated = False self.poll_timer = None self.timeout = None self.try_timers = {} # Use dict here for Python 2.6 compat. # Should use collections.Counter in Python 2.7+ self.non_unique_events = {} self.clock_trigger_time = None self.expire_time = None self.late_time = None self.is_late = is_late self.state = TaskState(tdef, self.point, status, hold_swap) if tdef.sequential: # Adjust clean-up cutoff. p_next = None adjusted = [] for seq in tdef.sequences: nxt = seq.get_next_point(self.point) if nxt: # may be None if beyond the sequence bounds adjusted.append(nxt) if adjusted: p_next = min(adjusted) if (self.cleanup_cutoff is not None and self.cleanup_cutoff < p_next): self.cleanup_cutoff = p_next
def update_gui(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False if not self.global_summary: return self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. self.newest_point_string = ( self.global_summary['newest runahead cycle point string']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all) except ClientError: if cylc.flags.debug: try: traceback.print_exc() except IOError: pass # Cannot print to terminal (session may be closed). return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] fgcolor = gtk_rgb_to_hex( getattr(self.xdot.widget.style, 'fg', None)[gtk.STATE_NORMAL]) current_id = self.get_graph_id(gr_edges) if current_id != self.prev_graph_id: self.graphw = CGraphPlain(self.cfg.suite, suite_polling_tasks) self.graphw.add_edges(gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id_ = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id_) if name not in self.all_families: # This node is a task, not a family. if id_ in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id_ not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id_ in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id_ in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = (set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' elif name.startswith('@'): node.attr['shape'] = 'none' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges, fgcolor) # Set base node style defaults fg_ghost = "%s%s" % (fgcolor, GHOST_TRANSP_HEX) for node in self.graphw.nodes(): node.attr['style'] = 'dotted' node.attr['color'] = fg_ghost node.attr['fontcolor'] = fg_ghost if not node.attr['URL'].startswith(self.PREFIX_BASE): node.attr['URL'] = self.PREFIX_BASE + node.attr['URL'] for id_ in self.state_summary: try: node = self.graphw.get_node(id_) except KeyError: continue self.set_live_node_attr(node, id_) for id_ in self.fam_state_summary: try: node = self.graphw.get_node(id_) except KeyError: # Node not in graph. continue self.set_live_node_attr(node, id_) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.update_xdot(no_zoom=(current_id == self.prev_graph_id)) self.prev_graph_id = current_id