def get_prereq(self, point): """Return a prerequisite string.""" if self.message: # Message trigger preq = self.message msg_point = point if self.cycle_point: point = self.cycle_point msg_point = self.cycle_point else: if self.message_offset: msg_point = point + self.message_offset if self.graph_offset_string: msg_point = get_point_relative(self.graph_offset_string, msg_point) point = get_point_relative(self.graph_offset_string, point) preq = "%s %s" % (TaskID.get( self.task_name, point), re.sub('\[.*\]', str(msg_point), preq)) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative(self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq
def get_prereq(self, point): """Return a prerequisite string.""" if self.message: # Message trigger preq = self.message msg_point = point if self.cycle_point: point = self.cycle_point msg_point = self.cycle_point else: if self.message_offset: msg_point = point + self.message_offset if self.graph_offset_string: msg_point = get_point_relative( self.graph_offset_string, msg_point) point = get_point_relative(self.graph_offset_string, point) preq = "%s %s" % ( TaskID.get(self.task_name, point), re.sub('\[.*\]', str(msg_point), preq)) else: # Built-in trigger if self.cycle_point: point = self.cycle_point elif self.graph_offset_string: point = get_point_relative( self.graph_offset_string, point) preq = TaskID.get(self.task_name, point) + ' ' + self.builtin return preq
def _add_prerequisites(self, point, tdef): """Add task prerequisites.""" # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._is_satisfied = None self._suicide_is_satisfied = None identity = TaskID.get(tdef.name, str(point)) for sequence, dependencies in tdef.dependencies.items(): if not sequence.is_valid(point): continue for dependency in dependencies: cpre = dependency.get_prerequisite(point, tdef) if dependency.suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) cpre.add(prereq, p_prev < tdef.start_point) cpre.set_condition(tdef.name) self.prerequisites.append(cpre)
def test_is_valid_id(self): for id1 in ["a.1", "_.098098439535$#%#@!#~"]: self.assertTrue(TaskID.is_valid_id(id1)) for id2 in [ "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC", "a.A A" ]: self.assertFalse(TaskID.is_valid_id(id2))
def test_is_valid_name(self): for name in [ "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC" ]: self.assertTrue(TaskID.is_valid_name(name)) for name in [ "a.1", None, "%abc", "", " " ]: self.assertFalse(TaskID.is_valid_name(name))
def test_is_valid_id(self): for id1 in [ "a.1", "_.098098439535$#%#@!#~" ]: self.assertTrue(TaskID.is_valid_id(id1)) for id2 in [ "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC", "a.A A" ]: self.assertFalse(TaskID.is_valid_id(id2))
def update(self): if not self.updater.connected: if not self.cleared: gobject.idle_add(self.clear_list) self.cleared = True return False self.cleared = False if not self.action_required and ( self.last_update_time is not None and self.last_update_time >= self.updater.last_update_time): return False self.last_update_time = self.updater.last_update_time self.updater.set_update(False) self.state_summary = deepcopy(self.updater.state_summary) self.fam_state_summary = deepcopy(self.updater.fam_state_summary) self.ancestors_pruned = deepcopy(self.updater.ancestors_pruned) self.descendants = deepcopy(self.updater.descendants) self.updater.set_update(True) self.point_strings = [] for id_ in self.state_summary: name, point_string = TaskID.split(id_) if point_string not in self.point_strings: self.point_strings.append(point_string) try: self.point_strings.sort(key=int) except (TypeError, ValueError): # iso cycle points self.point_strings.sort() if not self.should_group_families: # Display the full task list. self.task_list = deepcopy(self.updater.task_list) else: # Replace tasks with their top level family name. self.task_list = [] for task_id in self.state_summary: name, point_string = TaskID.split(task_id) # Family name below root, or task name. item = self.ancestors_pruned[name][-2] if item not in self.task_list: self.task_list.append(item) if (self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on): self.task_list = [ i for i in self.updater.ns_defn_order if i in self.task_list ] else: self.task_list.sort() return True
def test_is_valid_id_2(self): # TBD: a.A A is invalid for valid_id, but valid for valid_id_2? # TBD: a/a.a is OK? for id1 in [ "a.1", "_.098098439535$#%#@!#~", "a/1", "_/098098439535$#%#@!#~", "a.A A", "a/a.a" ]: self.assertTrue(TaskID.is_valid_id_2(id1)) for id2 in ["abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC"]: self.assertFalse(TaskID.is_valid_id_2(id2))
def update(self): if not self.updater.connected: if not self.cleared: gobject.idle_add(self.clear_list) self.cleared = True return False self.cleared = False if not self.action_required and ( self.last_update_time is not None and self.last_update_time >= self.updater.last_update_time): return False self.last_update_time = self.updater.last_update_time self.updater.set_update(False) self.state_summary = deepcopy(self.updater.state_summary) self.fam_state_summary = deepcopy(self.updater.fam_state_summary) self.ancestors_pruned = deepcopy(self.updater.ancestors_pruned) self.descendants = deepcopy(self.updater.descendants) self.updater.set_update(True) self.point_strings = [] for id_ in self.state_summary: name, point_string = TaskID.split(id_) if point_string not in self.point_strings: self.point_strings.append(point_string) try: self.point_strings.sort(key=int) except (TypeError, ValueError): # iso cycle points self.point_strings.sort() if not self.should_group_families: # Display the full task list. self.task_list = deepcopy(self.updater.task_list) else: # Replace tasks with their top level family name. self.task_list = [] for task_id in self.state_summary: name, point_string = TaskID.split(task_id) # Family name below root, or task name. item = self.ancestors_pruned[name][-2] if item not in self.task_list: self.task_list.append(item) if (self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on): self.task_list = [ i for i in self.updater.ns_defn_order if i in self.task_list] else: self.task_list.sort() return True
def filter_families(self, families): """Remove family summaries if no members are present.""" fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: fam_states[fam_id] = summary break return fam_states
def test_is_valid_id_2(self): # TBD: a.A A is invalid for valid_id, but valid for valid_id_2? # TBD: a/a.a is OK? for id1 in [ "a.1", "_.098098439535$#%#@!#~", "a/1", "_/098098439535$#%#@!#~", "a.A A", "a/a.a" ]: self.assertTrue(TaskID.is_valid_id_2(id1)) for id2 in [ "abc", "123", "____", "_", "a_b", "a_1", "1_b", "ABC" ]: self.assertFalse(TaskID.is_valid_id_2(id2))
def get_id_summary(id_, task_state_summary, fam_state_summary, id_family_map): """Return some state information about a task or family id.""" prefix_text = "" meta_text = "" sub_text = "" sub_states = {} stack = [(id_, 0)] done_ids = [] for summary in [task_state_summary, fam_state_summary]: if id_ in summary: title = summary[id_].get('title') if title: meta_text += "\n" + title.strip() description = summary[id_].get('description') if description: meta_text += "\n" + description.strip() while stack: this_id, depth = stack.pop(0) if this_id in done_ids: # family dive down will give duplicates continue done_ids.append(this_id) prefix = "\n" + " " * 4 * depth + this_id if this_id in task_state_summary: submit_num = task_state_summary[this_id].get('submit_num') if submit_num: prefix += "(%02d)" % submit_num state = task_state_summary[this_id]['state'] sub_text += prefix + " " + state sub_states.setdefault(state, 0) sub_states[state] += 1 elif this_id in fam_state_summary: name, point_string = TaskID.split(this_id) sub_text += prefix + " " + fam_state_summary[this_id]['state'] for child in reversed(sorted(id_family_map[name])): child_id = TaskID.get(child, point_string) stack.insert(0, (child_id, depth + 1)) if not prefix_text: prefix_text = sub_text.strip() sub_text = "" if len(sub_text.splitlines()) > 10: state_items = sub_states.items() state_items.sort() state_items.sort(lambda x, y: cmp(y[1], x[1])) sub_text = "" for state, number in state_items: sub_text += "\n {0} tasks {1}".format(number, state) if sub_text and meta_text: sub_text = "\n" + sub_text text = prefix_text + meta_text + sub_text if not text: return id_ return text
def filter_families(self, families): """Remove family summaries if no members are present.""" # TODO - IS THERE ANY NEED TO DO THIS? fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: remove = False break if not remove: fam_states[fam_id] = summary return fam_states
def on_query_tooltip(self, widget, x, y, kbd_ctx, tooltip): """Handle a tooltip creation request.""" tip_context = self.led_treeview.get_tooltip_context(x, y, kbd_ctx) if tip_context is None: self._prev_tooltip_task_id = None return False x, y = self.led_treeview.convert_widget_to_bin_window_coords(x, y) path, column = self.led_treeview.get_path_at_pos(x, y)[0:2] col_index = self.led_treeview.get_columns().index(column) if not self.is_transposed: iter_ = self.led_treeview.get_model().get_iter(path) name = self.led_treeview.get_model().get_value(iter_, 0) try: point_string = self.led_headings[col_index] except IndexError: # This can occur for a tooltip while switching from transposed. return False if col_index == 0: task_id = name else: task_id = TaskID.get(name, point_string) else: try: point_string = self.point_strings[path[0]] except IndexError: return False if col_index == 0: task_id = point_string else: try: name = self.led_headings[col_index] except IndexError: return False task_id = TaskID.get(name, point_string) if task_id != self._prev_tooltip_task_id: self._prev_tooltip_task_id = task_id tooltip.set_text(None) return False if col_index == 0: tooltip.set_text(task_id) return True text = get_id_summary( task_id, self.state_summary, self.fam_state_summary, self.descendants) if text == task_id: return False tooltip.set_text(text) return True
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # xtriggers (represented by labels) satisfied or not self.xtriggers = {} for label in tdef.xtrig_labels: self.xtriggers[label] = False if tdef.xclock_label: self.xclock = (tdef.xclock_label, False) else: self.xclock = None # Message outputs. self.outputs = TaskOutputs(tdef) self.kill_failed = False
def _write_epilogue(cls, handle, job_conf): """Write epilogue.""" if job_conf["use manual completion"]: handle.write( r""" # (detaching task: cannot safely remove the WORK DIRECTORY here) echo 'JOB SCRIPT EXITING: THIS TASK HANDLES ITS OWN COMPLETION MESSAGING' trap '' EXIT """ ) else: handle.write( r""" # EMPTY WORK DIRECTORY REMOVE: cd rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true # SEND TASK SUCCEEDED MESSAGE: wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true cylc task message '%(message)s' echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)' trap '' EXIT """ % {"message": TaskMessage.SUCCEEDED} ) task_name, point_string = TaskID.split(job_conf["task id"]) job_conf["absolute submit number"] handle.write("%s%s" % (BATCH_SYS_MANAGER.LINE_PREFIX_EOF, os.path.dirname(job_conf["common job log path"])))
def match_ext_trigger(self, itask): """Match external triggers for a waiting task proxy.""" if not self.ext_triggers or not itask.state.external_triggers: return has_changed = False for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in self.ext_triggers.copy(): if trig == qmsg: # Matched. point_string = TaskID.split(itask.identity)[1] # Set trigger satisfied. itask.state.external_triggers[trig] = True # Broadcast the event ID to the cycle point. if qid is not None: self.put_broadcast( [point_string], ['root'], [{'environment': {'CYLC_EXT_TRIGGER_ID': qid}}], ) self.ext_triggers[(qmsg, qid)] -= 1 if not self.ext_triggers[(qmsg, qid)]: del self.ext_triggers[(qmsg, qid)] has_changed = True break return has_changed
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # Message outputs. self.outputs = TaskOutputs(tdef, point) # Standard outputs. self.outputs.add(TASK_OUTPUT_SUBMITTED) self.outputs.add(TASK_OUTPUT_STARTED) self.outputs.add(TASK_OUTPUT_SUCCEEDED) self.kill_failed = False
def get_stop_state_summary(suite, owner=None, hostname=None, lines=None): """Load the contents of the last state file into summary maps.""" global_summary = {} task_summary = {} family_summary = {} if not lines: state_file_text = get_stop_state(suite, owner, hostname) if state_file_text is None: return global_summary, task_summary, family_summary lines = state_file_text.splitlines() if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line0 = lines.pop(0) if line0.startswith('suite time') or \ line0.startswith('simulation time'): # backward compatibility with pre-5.4.11 state dumps global_summary["last_updated"] = time.time() else: # (line0 is run mode) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) try: time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) except (TypeError, ValueError, IndexError): # back compat pre cylc-6 global_summary["last_updated"] = time.time() # Skip initial and final cycle points. lines[0:2] = [] global_summary["status_string"] = SUITE_STATUS_STOPPED while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue except Exception as e: sys.stderr.write(str(e) + "\n") continue task_summary.setdefault(task_id, {"name": name, "point": point_string, "label": point_string}) # reconstruct state from a dumped state string items = dict([p.split("=") for p in info.split(', ')]) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = TASK_STATUS_READY task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" return global_summary, task_summary, family_summary
def get_graph(self, group_nodes=None, ungroup_nodes=None, ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants() # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. graph = CGraph.get_graph( self.suiterc, group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on) graph.graph_attr['rankdir'] = self.orientation # Style nodes. cache = {} # For caching is_on_sequence() calls. for node in graph.iternodes(): name, point = TaskID.split(node.get_name()) if name in family_nodes: # Style family nodes. node.attr['shape'] = 'doubleoctagon' # Detecting ghost families would involve analysing triggers # in the suite's graphing. elif self.is_ghost_task(name, point, cache=cache): # Style ghost nodes. style_ghost_node(node) self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def __init__(self, name, rtcfg, run_mode, start_point): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.sequences = [] self.implicit_sequences = [] # Implicit sequences are deprecated. self.used_in_offset_trigger = False # some defaults self.max_future_prereq_offset = None self.intercycle_offsets = [] self.sequential = False self.is_coldstart = False self.suite_polling_cfg = {} self.clocktrigger_offset = None self.namespace_hierarchy = [] # triggers[0,6] = [ A, B:1, C(T-6), ... ] self.triggers = {} # cond[6,18] = [ '(A & B)|C', 'C | D | E', ... ] self.cond_triggers = {} # list of explicit internal outputs; change to dict if need to vary per # cycle. self.outputs = [] self.name = name self.elapsed_times = [] self.mean_total_elapsed_time = None
def __init__(self, name, rtcfg, run_mode, start_point, spawn_ahead): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.spawn_ahead = spawn_ahead self.sequences = [] self.used_in_offset_trigger = False # some defaults self.max_future_prereq_offset = None self.intercycle_offsets = set([]) self.sequential = False self.suite_polling_cfg = {} self.clocktrigger_offset = None self.expiration_offset = None self.namespace_hierarchy = [] self.dependencies = {} self.outputs = [] self.param_var = {} self.external_triggers = [] self.xtrig_labels = set() self.xclock_label = None # Note a task can only have one clock xtrigger - if it depends on # several we just keep the label of the one with the largest offset # (this is determined and set during suite config parsing, to avoid # storing the offset here in the taskdef). self.name = name self.elapsed_times = deque(maxlen=self.MAX_LEN_ELAPSED_TIMES)
def __init__(self, name, rtcfg, run_mode, start_point): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.sequences = [] self.implicit_sequences = [] # Implicit sequences are deprecated. self.used_in_offset_trigger = False # some defaults self.max_future_prereq_offset = None self.intercycle_offsets = [] self.sequential = False self.is_coldstart = False self.suite_polling_cfg = {} self.clocktrigger_offset = None self.expiration_offset = None self.namespace_hierarchy = [] self.triggers = {} self.outputs = [] self.external_triggers = [] self.name = name self.elapsed_times = [] self.mean_total_elapsed_time = None
def get(self, task_id=None): """Retrieve all broadcast variables that target a given task ID.""" check_access_priv(self, 'full-read') self.report('broadcast_get') if task_id == "None": task_id = None if not task_id: # all broadcast settings requested return self.settings try: name, point_string = TaskID.split(task_id) except ValueError: raise Exception("Can't split task_id %s" % task_id) ret = {} # The order is: # all:root -> all:FAM -> ... -> all:task # -> tag:root -> tag:FAM -> ... -> tag:task for cycle in self.ALL_CYCLE_POINTS_STRS + [point_string]: if cycle not in self.settings: continue for namespace in reversed(self.linearized_ancestors[name]): if namespace in self.settings[cycle]: self._addict(ret, self.settings[cycle][namespace]) return ret
def update_gui(self): self.action_required = False state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) names = tasks_by_name.keys() names.sort() if not self.is_transposed: self._update_gui_regular(tasks_by_name, state_summary) else: self._update_gui_transpose(tasks_by_point_string, state_summary) self.led_treeview.columns_autosize() return False
def __init__(self, name, rtcfg, run_mode, start_point, spawn_ahead): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.spawn_ahead = spawn_ahead self.sequences = [] self.used_in_offset_trigger = False # some defaults self.max_future_prereq_offset = None self.intercycle_offsets = set([]) self.sequential = False self.suite_polling_cfg = {} self.clocktrigger_offset = None self.expiration_offset = None self.namespace_hierarchy = [] self.dependencies = {} self.outputs = [] self.param_var = {} self.external_triggers = [] self.name = name self.elapsed_times = deque(maxlen=self.MAX_LEN_ELAPSED_TIMES)
def match_ext_trigger(self, itask): """Match external triggers for a waiting task proxy.""" if not self.ext_triggers or not itask.state.external_triggers: return has_changed = False for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in self.ext_triggers.copy(): if trig == qmsg: # Matched. point_string = TaskID.split(itask.identity)[1] # Set trigger satisfied. itask.state.external_triggers[trig] = True # Broadcast the event ID to the cycle point. if qid is not None: self.put_broadcast( [point_string], ['root'], [{ 'environment': { 'CYLC_EXT_TRIGGER_ID': qid } }], ) self.ext_triggers[(qmsg, qid)] -= 1 if not self.ext_triggers[(qmsg, qid)]: del self.ext_triggers[(qmsg, qid)] has_changed = True break return has_changed
def _write_epilogue(cls, handle, job_conf): """Write epilogue.""" if job_conf['use manual completion']: handle.write(r""" # (detaching task: cannot safely remove the WORK DIRECTORY here) echo 'JOB SCRIPT EXITING: THIS TASK HANDLES ITS OWN COMPLETION MESSAGING' trap '' EXIT """) else: handle.write(r""" # EMPTY WORK DIRECTORY REMOVE: cd rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true # SEND TASK SUCCEEDED MESSAGE: wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true cylc task message '%(message)s' echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)' trap '' EXIT """ % {"message": TaskMessage.SUCCEEDED}) task_name, point_string = TaskID.split(job_conf['task id']) job_conf['absolute submit number'] handle.write("%s%s\n" % (BATCH_SYS_MANAGER.LINE_PREFIX_EOF, os.path.dirname(job_conf['common job log path'])))
def get_graph(self, group_nodes=[], ungroup_nodes=[], ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants().keys() suite_polling_tasks = self.suiterc.suite_polling_tasks # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. graph = self.suiterc.get_graph( group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on) graph.graph_attr['rankdir'] = self.orientation for node in graph.nodes(): name, point_string = TaskID.split(node.get_name()) if name in family_nodes: node.attr['shape'] = 'doubleoctagon' self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def on_query_tooltip(self, widget, x, y, kbd_ctx, tooltip): """Handle a tooltip creation request.""" tip_context = self.ttreeview.get_tooltip_context(x, y, kbd_ctx) if tip_context is None: self._prev_tooltip_task_id = None return False x, y = self.ttreeview.convert_widget_to_bin_window_coords(x, y) path = self.ttreeview.get_path_at_pos(x, y)[0] if not path: return False model = self.ttreeview.get_model() point_string = model.get_value(model.get_iter(path), 0) name = model.get_value(model.get_iter(path), 1) if point_string == name: # We are hovering over a cycle point row. task_id = point_string else: # We are hovering over a task or family row. task_id = TaskID.get(name, point_string) if task_id != self._prev_tooltip_task_id: # Clear tooltip when crossing row boundaries. self._prev_tooltip_task_id = task_id tooltip.set_text(None) return False text = get_id_summary( task_id, self.state_summary, self.fam_state_summary, self.descendants) if text == task_id: return False tooltip.set_text(text) return True
def __init__(self, name, rtcfg, run_mode, start_point, spawn_ahead): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.spawn_ahead = spawn_ahead self.sequences = [] self.implicit_sequences = [] # Implicit sequences are deprecated. self.used_in_offset_trigger = False # some defaults self.max_future_prereq_offset = None self.intercycle_offsets = set([]) self.sequential = False self.suite_polling_cfg = {} self.clocktrigger_offset = None self.expiration_offset = None self.namespace_hierarchy = [] self.dependencies = {} self.outputs = [] self.param_var = {} self.external_triggers = [] self.name = name self.elapsed_times = deque(maxlen=self.MAX_LEN_ELAPSED_TIMES)
def __init__(self, tdef, point, status, hold_swap): self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.hold_swap = hold_swap self.time_updated = None self._is_satisfied = None self._suicide_is_satisfied = None # Prerequisites. self.prerequisites = [] self.suicide_prerequisites = [] self._add_prerequisites(point, tdef) # External Triggers. self.external_triggers = {} for ext in tdef.external_triggers: # Allow cycle-point-specific external triggers - GitHub #1893. if '$CYLC_TASK_CYCLE_POINT' in ext: ext = ext.replace('$CYLC_TASK_CYCLE_POINT', str(point)) # set unsatisfied self.external_triggers[ext] = False # Message outputs. self.outputs = TaskOutputs(tdef) # Standard outputs. self.outputs.add(TASK_OUTPUT_SUBMITTED) self.outputs.add(TASK_OUTPUT_STARTED) self.outputs.add(TASK_OUTPUT_SUCCEEDED) self.kill_failed = False self.confirming_with_poll = False
def update_gui(self): self.action_required = False state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) names = tasks_by_name.keys() names.sort() if not self.is_transposed: self._update_gui_regular(tasks_by_name, state_summary) else: self._update_gui_transpose(tasks_by_point_string, state_summary) self.led_treeview.columns_autosize() if self.is_transposed == self.should_transpose_view: # Only select rows if we have not changed view mode. self._set_selected_rows() return False
def _add_prerequisites(self, point, identity, tdef): """Add task prerequisites.""" # self.triggers[sequence] = [triggers for sequence] # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._is_satisfied = None self._suicide_is_satisfied = None for sequence, exps in tdef.triggers.items(): for ctrig, exp in exps: key = ctrig.keys()[0] if not sequence.is_valid(point): # This trigger is not valid for current cycle (see NOTE # just above) continue cpre = Prerequisite(identity, point, tdef.start_point) for label in ctrig: trig = ctrig[label] if trig.graph_offset_string is not None: prereq_offset_point = get_point_relative( trig.graph_offset_string, point) if prereq_offset_point > point: prereq_offset = prereq_offset_point - point if (tdef.max_future_prereq_offset is None or (prereq_offset > tdef.max_future_prereq_offset)): tdef.max_future_prereq_offset = ( prereq_offset) cpre.add(trig.get_prereq(point), label, ((prereq_offset_point < tdef.start_point) & (point >= tdef.start_point))) else: cpre.add(trig.get_prereq(point), label) cpre.set_condition(exp) if ctrig[key].suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(identity, point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) label = tdef.name cpre.add(prereq, label, p_prev < tdef.start_point) cpre.set_condition(label) self.prerequisites.append(cpre)
def _update_gui_regular(self, tasks_by_name, state_summary): """Logic for updating the gui in regular mode.""" children = [] to_unfold = [] parent_iter = None for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: if name in self.family_tree: # Task is a family. self.led_treestore.append( None, row=[name] + state_list) children = self.family_tree[name] # Get iter for this family's entry. iter_ = self.led_treestore.get_iter_first() temp = self.led_treestore.get_value( iter_, 0) while temp != name: iter_ = self.led_treestore.iter_next(iter_) temp = self.led_treestore.get_value(iter_, 0) parent_iter = iter_ # Unfold if family was folded before update if name in self.expanded_rows: to_unfold.append( self.led_treestore.get_path(iter_)) elif name in children: # Task belongs to a family. self.led_treestore.append( parent_iter, row=[name] + state_list) else: # Task does not belong to a family. self.led_treestore.append( None, row=[name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False # Unfold any rows that were unfolded before the update. for path in to_unfold: self.led_treeview.expand_row(path, True)
def _add_prerequisites(self, point, identity, tdef): """Add task prerequisites.""" # self.triggers[sequence] = [triggers for sequence] # Triggers for sequence_i only used if my cycle point is a # valid member of sequence_i's sequence of cycle points. self._recalc_satisfied = True for sequence, exps in tdef.triggers.items(): for ctrig, exp in exps: key = ctrig.keys()[0] if not sequence.is_valid(point): # This trigger is not valid for current cycle (see NOTE # just above) continue cpre = Prerequisite(identity, point, tdef.start_point) for label in ctrig: trig = ctrig[label] if trig.graph_offset_string is not None: prereq_offset_point = get_point_relative( trig.graph_offset_string, point) if prereq_offset_point > point: prereq_offset = prereq_offset_point - point if (tdef.max_future_prereq_offset is None or (prereq_offset > tdef.max_future_prereq_offset)): tdef.max_future_prereq_offset = ( prereq_offset) cpre.add(trig.get_prereq(point), label, ((prereq_offset_point < tdef.start_point) & (point >= tdef.start_point))) else: cpre.add(trig.get_prereq(point), label) cpre.set_condition(exp) if ctrig[key].suicide: self.suicide_prerequisites.append(cpre) else: self.prerequisites.append(cpre) if tdef.sequential: # Add a previous-instance succeeded prerequisite. p_prev = None adjusted = [] for seq in tdef.sequences: prv = seq.get_nearest_prev_point(point) if prv: # None if out of sequence bounds. adjusted.append(prv) if adjusted: p_prev = max(adjusted) cpre = Prerequisite(identity, point, tdef.start_point) prereq = "%s %s" % (TaskID.get(tdef.name, p_prev), TASK_STATUS_SUCCEEDED) label = tdef.name cpre.add(prereq, label, p_prev < tdef.start_point) cpre.set_condition(label) self.prerequisites.append(cpre)
def get_right(self, inpoint, start_point): inpoint_string = str(inpoint) if self.right is None: return None # strip off special outputs self.right = re.sub(':\w+', '', self.right) return TaskID.get(self.right, inpoint_string)
def job_poll(self, st_file_path): """Poll status of the job specified in the "st_file_path". Return a status string that can be recognised by the suite. """ # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status st_file_path_strs = st_file_path.rsplit(os.sep, 6) task_id = TaskID.get(st_file_path_strs[4], st_file_path_strs[3]) self.configure_suite_run_dir(st_file_path_strs[0]) statuses = {} try: for line in open(st_file_path): key, value = line.strip().split("=", 1) statuses[key] = value except IOError: return "polled %s submission failed\n" % (task_id) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT") == "SUCCEEDED"): return "polled %s succeeded at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (statuses.get("CYLC_JOB_EXIT_TIME") and statuses.get("CYLC_JOB_EXIT")): return "polled %s failed at %s\n" % ( task_id, statuses["CYLC_JOB_EXIT_TIME"]) if (self.CYLC_BATCH_SYS_NAME not in statuses or self.CYLC_BATCH_SYS_JOB_ID not in statuses): return "polled %s submission failed\n" % (task_id) # Ask batch system if job is still alive or not batch_sys = self.get_inst(statuses[self.CYLC_BATCH_SYS_NAME]) job_id = statuses[self.CYLC_BATCH_SYS_JOB_ID] proc = Popen( shlex.split(batch_sys.POLL_CMD_TMPL % {"job_id": job_id}), stdout=PIPE) is_in_batch_sys = (proc.wait() == 0) if is_in_batch_sys and hasattr(batch_sys, "filter_poll_output"): is_in_batch_sys = batch_sys.filter_poll_output( proc.communicate()[0], job_id) if is_in_batch_sys and "CYLC_JOB_INIT_TIME" in statuses: return "polled %s started at %s\n" % ( task_id, statuses["CYLC_JOB_INIT_TIME"]) if is_in_batch_sys: return "polled %s submitted\n" % (task_id) if "CYLC_JOB_INIT_TIME" in statuses: return "polled %s failed at unknown-time\n" % (task_id) # Submitted but disappeared return "polled %s submission failed\n" % (task_id)
def job_poll(self, st_file_path): """Poll status of the job specified in the "st_file_path". Return a status string that can be recognised by the suite. """ # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status st_file_path_strs = st_file_path.rsplit(os.sep, 6) task_id = TaskID.get(st_file_path_strs[4], st_file_path_strs[3]) self.configure_suite_run_dir(st_file_path_strs[0]) statuses = {} try: for line in open(st_file_path): key, value = line.strip().split("=", 1) statuses[key] = value except IOError: return "polled %s submission failed\n" % (task_id) if statuses.get(TaskMessage.CYLC_JOB_EXIT_TIME) and statuses.get(TaskMessage.CYLC_JOB_EXIT) == "SUCCEEDED": return "polled %s succeeded at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_EXIT_TIME]) if statuses.get(TaskMessage.CYLC_JOB_EXIT_TIME) and statuses.get(TaskMessage.CYLC_JOB_EXIT): return "polled %s failed at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_EXIT_TIME]) if self.CYLC_BATCH_SYS_NAME not in statuses or self.CYLC_BATCH_SYS_JOB_ID not in statuses: return "polled %s submission failed\n" % (task_id) # Ask batch system if job is still alive or not batch_sys = self.get_inst(statuses[self.CYLC_BATCH_SYS_NAME]) job_id = statuses[self.CYLC_BATCH_SYS_JOB_ID] command = shlex.split(batch_sys.POLL_CMD_TMPL % {"job_id": job_id}) try: proc = Popen(command, stdout=PIPE) except OSError as exc: # subprocess.Popen has a bad habit of not setting the filename of # the executable when it raises an OSError. if not exc.filename: exc.filename = command[0] raise is_in_batch_sys = proc.wait() == 0 if is_in_batch_sys and hasattr(batch_sys, "filter_poll_output"): is_in_batch_sys = batch_sys.filter_poll_output(proc.communicate()[0], job_id) if is_in_batch_sys and TaskMessage.CYLC_JOB_INIT_TIME in statuses: return "polled %s started at %s\n" % (task_id, statuses[TaskMessage.CYLC_JOB_INIT_TIME]) if is_in_batch_sys: return "polled %s submitted\n" % (task_id) if TaskMessage.CYLC_JOB_INIT_TIME in statuses: return "polled %s failed at unknown-time\n" % (task_id) # Submitted but disappeared return "polled %s submission failed\n" % (task_id)
def get_stop_state_summary(lines): """Parse state dump content into summary maps.""" global_summary = {} task_summary = {} if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line0 = lines.pop(0) if line0.startswith('suite time') or \ line0.startswith('simulation time'): # backward compatibility with pre-5.4.11 state dumps global_summary["last_updated"] = time.time() else: # (line0 is run mode) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) try: time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) except (TypeError, ValueError, IndexError): # back compat pre cylc-6 global_summary["last_updated"] = time.time() # Skip initial and final cycle points. lines[0:2] = [] global_summary["status_string"] = SUITE_STATUS_STOPPED while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue task_summary.setdefault(task_id, { "name": name, "point": point_string, "label": point_string }) # reconstruct state from a dumped state string items = dict([p.split("=") for p in info.split(', ')]) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = TASK_STATUS_READY task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" return global_summary, task_summary
def node_attr_by_taskname(self, node_string): try: name = TaskID.split(node_string)[0] except ValueError: # Special node? if node_string.startswith("__remove_"): return [] raise if name in self.task_attr: return self.task_attr[name] else: return []
def node_attr_by_taskname(self, node_string): try: name, point_string = TaskID.split(node_string) except ValueError: # Special node? if node_string.startswith("__remove_"): return [] raise if name in self.task_attr: return self.task_attr[name] else: return []
def on_treeview_button_pressed( self, treeview, event ): # DISPLAY MENU ONLY ON RIGHT CLICK ONLY if event.button != 3: return False # the following sets selection to the position at which the # right click was done (otherwise selection lags behind the # right click): x = int( event.x ) y = int( event.y ) time = event.time pth = treeview.get_path_at_pos(x,y) if pth is None: return False treeview.grab_focus() path, col, cellx, celly = pth treeview.set_cursor( path, col, 0 ) selection = treeview.get_selection() treemodel, iter = selection.get_selected() point_string = treemodel.get_value( iter, 0 ) name = treemodel.get_value( iter, 1 ) if point_string == name: # must have clicked on the top level point_string return task_id = TaskID.get(name, point_string) is_fam = (name in self.t.descendants) menu = self.get_right_click_menu( task_id, task_is_family=is_fam ) sep = gtk.SeparatorMenuItem() sep.show() menu.append( sep ) group_item = gtk.CheckMenuItem( 'Toggle Family Grouping' ) group_item.set_active( self.t.should_group_families ) menu.append( group_item ) group_item.connect( 'toggled', self.toggle_grouping ) group_item.show() menu.popup( None, None, None, event.button, event.time ) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def _get_tasks_info(schd): """Retrieve task summary info and states.""" task_summary = {} task_states = {} for task in schd.pool.get_tasks(): ts = task.get_state_summary() task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) task_states.setdefault(point_string, {}) task_states[point_string][name] = ts['state'] for task in schd.pool.get_rh_tasks(): ts = task.get_state_summary() ts['state'] = TASK_STATUS_RUNAHEAD task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) task_states.setdefault(point_string, {}) task_states[point_string][name] = ts['state'] return task_summary, task_states
def get_graph(self, group_nodes=None, ungroup_nodes=None, ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants() # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. bg_color = gtk_rgb_to_hex( getattr(self.style, 'bg', None)[gtk.STATE_NORMAL]) fg_color = gtk_rgb_to_hex( getattr(self.style, 'fg', None)[gtk.STATE_NORMAL]) graph = CGraph.get_graph(self.suiterc, group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on, bgcolor=bg_color, fgcolor=fg_color) graph.graph_attr['rankdir'] = self.orientation # Style nodes. cache = {} # For caching is_on_sequence() calls. fg_ghost = "%s%s" % (fg_color, GHOST_TRANSP_HEX) for node in graph.iternodes(): name, point = TaskID.split(node.get_name()) if name.startswith('@'): # Style action trigger nodes. node.attr['shape'] = 'none' elif name in family_nodes: # Style family nodes. node.attr['shape'] = 'doubleoctagon' # Detecting ghost families would involve analysing triggers # in the suite's graphing. elif self.is_off_sequence(name, point, cache=cache): node.attr['style'] = 'dotted' node.attr['color'] = fg_ghost node.attr['fontcolor'] = fg_ghost self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def get_left(self, inpoint, start_point, base_interval): # strip off special outputs left = re.sub(':[\w-]+', '', self.left) left_graphnode = graphnode(left, base_interval=base_interval) if left_graphnode.offset_is_from_ict: point = get_point_relative(left_graphnode.offset_string, start_point) elif left_graphnode.offset_string: point = get_point_relative(left_graphnode.offset_string, inpoint) else: point = inpoint name = left_graphnode.name return TaskID.get(name, point)
def style_node(self, node_string): node = self.get_node(node_string) try: name, point_string = TaskID.split(node_string) except ValueError: # Special node? if node_string.startswith("__remove_"): node.attr['style'] = 'dashed' node.attr['label'] = u'\u2702' return raise label = name if name in self.suite_polling_tasks: label += "\\n" + self.suite_polling_tasks[name][3] label += "\\n" + point_string node.attr['label'] = label node.attr['URL'] = node_string
def retrieve(self, itask): """Match external triggers for a waiting task proxy.""" # Note this has to allow multiple same-message triggers to be queued # and only used one at a time. if self.queue.empty(): return if len(itask.state.external_triggers) == 0: return bcast = BroadcastServer.get_inst() queued = [] while True: try: queued.append(self.queue.get_nowait()) except Empty: break used = [] for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in queued: if trig == qmsg: # Matched. name, point_string = TaskID.split(itask.identity) # Set trigger satisfied. itask.state.external_triggers[trig] = True cylc.flags.pflag = True # Broadcast the event ID to the cycle point. if qid is not None: bcast.put( [point_string], ["root"], [{ 'environment': { 'CYLC_EXT_TRIGGER_ID': qid } }], not_from_client=True ) used.append((qmsg, qid)) break for q in queued: if q not in used: self.queue.put(q)