def update(self): if not self.updater.connected: if not self.cleared: gobject.idle_add(self.clear_list) self.cleared = True return False self.cleared = False if not self.action_required and ( self.last_update_time is not None and self.last_update_time >= self.updater.last_update_time): return False self.last_update_time = self.updater.last_update_time self.updater.set_update(False) self.state_summary = deepcopy(self.updater.state_summary) self.fam_state_summary = deepcopy(self.updater.fam_state_summary) self.ancestors_pruned = deepcopy(self.updater.ancestors_pruned) self.descendants = deepcopy(self.updater.descendants) self.updater.set_update(True) self.point_strings = [] for id_ in self.state_summary: name, point_string = TaskID.split(id_) if point_string not in self.point_strings: self.point_strings.append(point_string) try: self.point_strings.sort(key=int) except (TypeError, ValueError): # iso cycle points self.point_strings.sort() if not self.should_group_families: # Display the full task list. self.task_list = deepcopy(self.updater.task_list) else: # Replace tasks with their top level family name. self.task_list = [] for task_id in self.state_summary: name, point_string = TaskID.split(task_id) # Family name below root, or task name. item = self.ancestors_pruned[name][-2] if item not in self.task_list: self.task_list.append(item) if (self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on): self.task_list = [ i for i in self.updater.ns_defn_order if i in self.task_list ] else: self.task_list.sort() return True
def update(self): if not self.updater.connected: if not self.cleared: gobject.idle_add(self.clear_list) self.cleared = True return False self.cleared = False if not self.action_required and ( self.last_update_time is not None and self.last_update_time >= self.updater.last_update_time): return False self.last_update_time = self.updater.last_update_time self.updater.set_update(False) self.state_summary = deepcopy(self.updater.state_summary) self.fam_state_summary = deepcopy(self.updater.fam_state_summary) self.ancestors_pruned = deepcopy(self.updater.ancestors_pruned) self.descendants = deepcopy(self.updater.descendants) self.updater.set_update(True) self.point_strings = [] for id_ in self.state_summary: name, point_string = TaskID.split(id_) if point_string not in self.point_strings: self.point_strings.append(point_string) try: self.point_strings.sort(key=int) except (TypeError, ValueError): # iso cycle points self.point_strings.sort() if not self.should_group_families: # Display the full task list. self.task_list = deepcopy(self.updater.task_list) else: # Replace tasks with their top level family name. self.task_list = [] for task_id in self.state_summary: name, point_string = TaskID.split(task_id) # Family name below root, or task name. item = self.ancestors_pruned[name][-2] if item not in self.task_list: self.task_list.append(item) if (self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on): self.task_list = [ i for i in self.updater.ns_defn_order if i in self.task_list] else: self.task_list.sort() return True
def get(self, task_id=None): """Retrieve all broadcast variables that target a given task ID.""" check_access_priv(self, 'full-read') self.report('broadcast_get') if task_id == "None": task_id = None if not task_id: # all broadcast settings requested return self.settings try: name, point_string = TaskID.split(task_id) except ValueError: raise Exception("Can't split task_id %s" % task_id) ret = {} # The order is: # all:root -> all:FAM -> ... -> all:task # -> tag:root -> tag:FAM -> ... -> tag:task for cycle in self.ALL_CYCLE_POINTS_STRS + [point_string]: if cycle not in self.settings: continue for namespace in reversed(self.linearized_ancestors[name]): if namespace in self.settings[cycle]: self._addict(ret, self.settings[cycle][namespace]) return ret
def get_stop_state_summary(suite, owner=None, hostname=None, lines=None): """Load the contents of the last state file into summary maps.""" global_summary = {} task_summary = {} family_summary = {} if not lines: state_file_text = get_stop_state(suite, owner, hostname) if state_file_text is None: return global_summary, task_summary, family_summary lines = state_file_text.splitlines() if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line0 = lines.pop(0) if line0.startswith('suite time') or \ line0.startswith('simulation time'): # backward compatibility with pre-5.4.11 state dumps global_summary["last_updated"] = time.time() else: # (line0 is run mode) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) try: time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) except (TypeError, ValueError, IndexError): # back compat pre cylc-6 global_summary["last_updated"] = time.time() # Skip initial and final cycle points. lines[0:2] = [] global_summary["status_string"] = SUITE_STATUS_STOPPED while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue except Exception as e: sys.stderr.write(str(e) + "\n") continue task_summary.setdefault(task_id, {"name": name, "point": point_string, "label": point_string}) # reconstruct state from a dumped state string items = dict([p.split("=") for p in info.split(', ')]) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = TASK_STATUS_READY task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" return global_summary, task_summary, family_summary
def update_gui(self): self.action_required = False state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) names = tasks_by_name.keys() names.sort() if not self.is_transposed: self._update_gui_regular(tasks_by_name, state_summary) else: self._update_gui_transpose(tasks_by_point_string, state_summary) self.led_treeview.columns_autosize() if self.is_transposed == self.should_transpose_view: # Only select rows if we have not changed view mode. self._set_selected_rows() return False
def get_graph(self, group_nodes=None, ungroup_nodes=None, ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants() # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. graph = CGraph.get_graph( self.suiterc, group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on) graph.graph_attr['rankdir'] = self.orientation # Style nodes. cache = {} # For caching is_on_sequence() calls. for node in graph.iternodes(): name, point = TaskID.split(node.get_name()) if name in family_nodes: # Style family nodes. node.attr['shape'] = 'doubleoctagon' # Detecting ghost families would involve analysing triggers # in the suite's graphing. elif self.is_ghost_task(name, point, cache=cache): # Style ghost nodes. style_ghost_node(node) self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def update_gui(self): self.action_required = False state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) names = tasks_by_name.keys() names.sort() if not self.is_transposed: self._update_gui_regular(tasks_by_name, state_summary) else: self._update_gui_transpose(tasks_by_point_string, state_summary) self.led_treeview.columns_autosize() return False
def get_graph(self, group_nodes=[], ungroup_nodes=[], ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants().keys() suite_polling_tasks = self.suiterc.suite_polling_tasks # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. graph = self.suiterc.get_graph( group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on) graph.graph_attr['rankdir'] = self.orientation for node in graph.nodes(): name, point_string = TaskID.split(node.get_name()) if name in family_nodes: node.attr['shape'] = 'doubleoctagon' self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def match_ext_trigger(self, itask): """Match external triggers for a waiting task proxy.""" if not self.ext_triggers or not itask.state.external_triggers: return has_changed = False for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in self.ext_triggers.copy(): if trig == qmsg: # Matched. point_string = TaskID.split(itask.identity)[1] # Set trigger satisfied. itask.state.external_triggers[trig] = True # Broadcast the event ID to the cycle point. if qid is not None: self.put_broadcast( [point_string], ['root'], [{'environment': {'CYLC_EXT_TRIGGER_ID': qid}}], ) self.ext_triggers[(qmsg, qid)] -= 1 if not self.ext_triggers[(qmsg, qid)]: del self.ext_triggers[(qmsg, qid)] has_changed = True break return has_changed
def _write_epilogue(cls, handle, job_conf): """Write epilogue.""" if job_conf['use manual completion']: handle.write(r""" # (detaching task: cannot safely remove the WORK DIRECTORY here) echo 'JOB SCRIPT EXITING: THIS TASK HANDLES ITS OWN COMPLETION MESSAGING' trap '' EXIT """) else: handle.write(r""" # EMPTY WORK DIRECTORY REMOVE: cd rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true # SEND TASK SUCCEEDED MESSAGE: wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true cylc task message '%(message)s' echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)' trap '' EXIT """ % {"message": TaskMessage.SUCCEEDED}) task_name, point_string = TaskID.split(job_conf['task id']) job_conf['absolute submit number'] handle.write("%s%s\n" % (BATCH_SYS_MANAGER.LINE_PREFIX_EOF, os.path.dirname(job_conf['common job log path'])))
def match_ext_trigger(self, itask): """Match external triggers for a waiting task proxy.""" if not self.ext_triggers or not itask.state.external_triggers: return has_changed = False for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in self.ext_triggers.copy(): if trig == qmsg: # Matched. point_string = TaskID.split(itask.identity)[1] # Set trigger satisfied. itask.state.external_triggers[trig] = True # Broadcast the event ID to the cycle point. if qid is not None: self.put_broadcast( [point_string], ['root'], [{ 'environment': { 'CYLC_EXT_TRIGGER_ID': qid } }], ) self.ext_triggers[(qmsg, qid)] -= 1 if not self.ext_triggers[(qmsg, qid)]: del self.ext_triggers[(qmsg, qid)] has_changed = True break return has_changed
def _write_epilogue(cls, handle, job_conf): """Write epilogue.""" if job_conf["use manual completion"]: handle.write( r""" # (detaching task: cannot safely remove the WORK DIRECTORY here) echo 'JOB SCRIPT EXITING: THIS TASK HANDLES ITS OWN COMPLETION MESSAGING' trap '' EXIT """ ) else: handle.write( r""" # EMPTY WORK DIRECTORY REMOVE: cd rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true # SEND TASK SUCCEEDED MESSAGE: wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true cylc task message '%(message)s' echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)' trap '' EXIT """ % {"message": TaskMessage.SUCCEEDED} ) task_name, point_string = TaskID.split(job_conf["task id"]) job_conf["absolute submit number"] handle.write("%s%s" % (BATCH_SYS_MANAGER.LINE_PREFIX_EOF, os.path.dirname(job_conf["common job log path"])))
def filter_families(self, families): """Remove family summaries if no members are present.""" fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: fam_states[fam_id] = summary break return fam_states
def get_stop_state_summary(lines): """Parse state dump content into summary maps.""" global_summary = {} task_summary = {} if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line0 = lines.pop(0) if line0.startswith('suite time') or \ line0.startswith('simulation time'): # backward compatibility with pre-5.4.11 state dumps global_summary["last_updated"] = time.time() else: # (line0 is run mode) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) try: time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) except (TypeError, ValueError, IndexError): # back compat pre cylc-6 global_summary["last_updated"] = time.time() # Skip initial and final cycle points. lines[0:2] = [] global_summary["status_string"] = SUITE_STATUS_STOPPED while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue task_summary.setdefault(task_id, { "name": name, "point": point_string, "label": point_string }) # reconstruct state from a dumped state string items = dict([p.split("=") for p in info.split(', ')]) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = TASK_STATUS_READY task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" return global_summary, task_summary
def node_attr_by_taskname(self, node_string): try: name = TaskID.split(node_string)[0] except ValueError: # Special node? if node_string.startswith("__remove_"): return [] raise if name in self.task_attr: return self.task_attr[name] else: return []
def node_attr_by_taskname(self, node_string): try: name, point_string = TaskID.split(node_string) except ValueError: # Special node? if node_string.startswith("__remove_"): return [] raise if name in self.task_attr: return self.task_attr[name] else: return []
def get_id_summary(id_, task_state_summary, fam_state_summary, id_family_map): """Return some state information about a task or family id.""" prefix_text = "" meta_text = "" sub_text = "" sub_states = {} stack = [(id_, 0)] done_ids = [] for summary in [task_state_summary, fam_state_summary]: if id_ in summary: title = summary[id_].get('title') if title: meta_text += "\n" + title.strip() description = summary[id_].get('description') if description: meta_text += "\n" + description.strip() while stack: this_id, depth = stack.pop(0) if this_id in done_ids: # family dive down will give duplicates continue done_ids.append(this_id) prefix = "\n" + " " * 4 * depth + this_id if this_id in task_state_summary: submit_num = task_state_summary[this_id].get('submit_num') if submit_num: prefix += "(%02d)" % submit_num state = task_state_summary[this_id]['state'] sub_text += prefix + " " + state sub_states.setdefault(state, 0) sub_states[state] += 1 elif this_id in fam_state_summary: name, point_string = TaskID.split(this_id) sub_text += prefix + " " + fam_state_summary[this_id]['state'] for child in reversed(sorted(id_family_map[name])): child_id = TaskID.get(child, point_string) stack.insert(0, (child_id, depth + 1)) if not prefix_text: prefix_text = sub_text.strip() sub_text = "" if len(sub_text.splitlines()) > 10: state_items = sub_states.items() state_items.sort() state_items.sort(lambda x, y: cmp(y[1], x[1])) sub_text = "" for state, number in state_items: sub_text += "\n {0} tasks {1}".format(number, state) if sub_text and meta_text: sub_text = "\n" + sub_text text = prefix_text + meta_text + sub_text if not text: return id_ return text
def _get_tasks_info(schd): """Retrieve task summary info and states.""" task_summary = {} task_states = {} for task in schd.pool.get_tasks(): ts = task.get_state_summary() task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) task_states.setdefault(point_string, {}) task_states[point_string][name] = ts['state'] for task in schd.pool.get_rh_tasks(): ts = task.get_state_summary() ts['state'] = TASK_STATUS_RUNAHEAD task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) task_states.setdefault(point_string, {}) task_states[point_string][name] = ts['state'] return task_summary, task_states
def get_graph(self, group_nodes=None, ungroup_nodes=None, ungroup_recursive=False, ungroup_all=False, group_all=False): if not self.suiterc: return family_nodes = self.suiterc.get_first_parent_descendants() # Note this is used by "cylc graph" but not gcylc. # self.start_ and self.stop_point_string come from CLI. bg_color = gtk_rgb_to_hex( getattr(self.style, 'bg', None)[gtk.STATE_NORMAL]) fg_color = gtk_rgb_to_hex( getattr(self.style, 'fg', None)[gtk.STATE_NORMAL]) graph = CGraph.get_graph(self.suiterc, group_nodes=group_nodes, ungroup_nodes=ungroup_nodes, ungroup_recursive=ungroup_recursive, group_all=group_all, ungroup_all=ungroup_all, ignore_suicide=self.ignore_suicide, subgraphs_on=self.subgraphs_on, bgcolor=bg_color, fgcolor=fg_color) graph.graph_attr['rankdir'] = self.orientation # Style nodes. cache = {} # For caching is_on_sequence() calls. fg_ghost = "%s%s" % (fg_color, GHOST_TRANSP_HEX) for node in graph.iternodes(): name, point = TaskID.split(node.get_name()) if name.startswith('@'): # Style action trigger nodes. node.attr['shape'] = 'none' elif name in family_nodes: # Style family nodes. node.attr['shape'] = 'doubleoctagon' # Detecting ghost families would involve analysing triggers # in the suite's graphing. elif self.is_off_sequence(name, point, cache=cache): node.attr['style'] = 'dotted' node.attr['color'] = fg_ghost node.attr['fontcolor'] = fg_ghost self.graph = graph self.filter_graph() self.set_dotcode(graph.string())
def filter_families(self, families): """Remove family summaries if no members are present.""" # TODO - IS THERE ANY NEED TO DO THIS? fam_states = {} for fam_id, summary in families.items(): name, point_string = TaskID.split(fam_id) remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.state_summary: remove = False break if not remove: fam_states[fam_id] = summary return fam_states
def style_node( self, node_string, autoURL, base=False ): node = self.get_node( node_string ) name, point_string = TaskID.split(node_string) label = name if name in self.suite_polling_tasks: label += "\\n" + self.suite_polling_tasks[name][3] label += "\\n" + point_string node.attr[ 'label' ] = label if autoURL: if base: # TODO - This is only called from cylc_add_edge in this # base class ... should it also be called from add_node? node.attr[ 'URL' ] = 'base:' + node_string else: node.attr['URL'] = node_string
def add_cycle_point_subgraphs( self, edges ): """Draw nodes within cycle point groups (subgraphs).""" point_string_id_map = {} for edge_entry in edges: for id_ in edge_entry[:2]: if id_ is None: continue point_string = TaskID.split(id_)[1] point_string_id_map.setdefault(point_string, []) point_string_id_map[point_string].append(id_) for point_string, ids in point_string_id_map.items(): self.add_subgraph( nbunch=ids, name="cluster_" + point_string, label=point_string, fontsize=28, rank="max", style="dashed" )
def style_node(self, node_string): node = self.get_node(node_string) try: name, point_string = TaskID.split(node_string) except ValueError: # Special node? if node_string.startswith("__remove_"): node.attr['style'] = 'dashed' node.attr['label'] = u'\u2702' return raise label = name if name in self.suite_polling_tasks: label += "\\n" + self.suite_polling_tasks[name][3] label += "\\n" + point_string node.attr['label'] = label node.attr['URL'] = node_string
def retrieve(self, itask): """Match external triggers for a waiting task proxy.""" # Note this has to allow multiple same-message triggers to be queued # and only used one at a time. if self.queue.empty(): return if len(itask.state.external_triggers) == 0: return bcast = BroadcastServer.get_inst() queued = [] while True: try: queued.append(self.queue.get_nowait()) except Empty: break used = [] for trig, satisfied in itask.state.external_triggers.items(): if satisfied: continue for qmsg, qid in queued: if trig == qmsg: # Matched. name, point_string = TaskID.split(itask.identity) # Set trigger satisfied. itask.state.external_triggers[trig] = True cylc.flags.pflag = True # Broadcast the event ID to the cycle point. if qid is not None: bcast.put( [point_string], ["root"], [{ 'environment': { 'CYLC_EXT_TRIGGER_ID': qid } }], not_from_client=True ) used.append((qmsg, qid)) break for q in queued: if q not in used: self.queue.put(q)
def get(self, task_id=None): """Retrieve all broadcast variables that target a given task ID.""" if not task_id: # all broadcast settings requested return self.settings name, point_string = TaskID.split(task_id) ret = {} # The order is: # all:root -> all:FAM -> ... -> all:task # -> tag:root -> tag:FAM -> ... -> tag:task for cycle in self.ALL_CYCLE_POINTS_STRS + [point_string]: if cycle not in self.settings: continue for namespace in reversed(self.linearized_ancestors[name]): if namespace in self.settings[cycle]: self._addict(ret, self.settings[cycle][namespace]) return ret
def __init__(self, task_id, filenames, cmd_tmpls, init_active_index): self.filenames = OrderedDict() name_str, point_str = TaskID.split(task_id) for filename in filenames: try: f_point_str, f_name_str, f_submit_num_str, f_base_name = ( filename.rsplit(os.sep, 4)[1:]) if (f_point_str == point_str and f_name_str == name_str and int(f_submit_num_str) and f_base_name): name = f_submit_num_str + os.sep + f_base_name if ":" in filename: name += " (%s)" % (filename.split(":", 1)[0]) except ValueError: name = filename self.filenames[name] = filename self.init_active_index = init_active_index self.cmd_tmpls = cmd_tmpls logviewer.__init__(self, task_id, None, filenames[self.init_active_index])
def add_cycle_point_subgraphs(self, edges): """Draw nodes within cycle point groups (subgraphs).""" point_string_id_map = {} for edge_entry in edges: for id_ in edge_entry[:2]: if id_ is None: continue try: point_string = TaskID.split(id_)[1] except IndexError: # Probably a special node - ignore it. continue point_string_id_map.setdefault(point_string, []) point_string_id_map[point_string].append(id_) for point_string, ids in point_string_id_map.items(): self.add_subgraph( nbunch=ids, name="cluster_" + point_string, label=point_string, fontsize=28, rank="max", style="dashed" )
def __init__(self, task_id, filenames, cmd_tmpls, init_active_index): self.filenames = OrderedDict() name_str, point_str = TaskID.split(task_id) for filename in filenames: try: f_point_str, f_name_str, f_submit_num_str, f_base_name = ( filename.rsplit(os.sep, 4)[1:]) if (f_point_str == point_str and f_name_str == name_str and int(f_submit_num_str) and f_base_name): name = f_submit_num_str + os.sep + f_base_name if ":" in filename: name += " (%s)" % (filename.split(":", 1)[0]) except ValueError: name = filename self.filenames[name] = filename self.init_active_index = init_active_index self.cmd_tmpls = cmd_tmpls logviewer.__init__( self, task_id, None, filenames[self.init_active_index])
def get_stop_state_summary(lines): """Parse state dump content into summary maps.""" global_summary = {} task_summary = {} if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) # Skip initial and final cycle points. lines[0:2] = [] global_summary["status_string"] = SUITE_STATUS_STOPPED while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue task_summary.setdefault(task_id, {"name": name, "point": point_string, "label": point_string}) # reconstruct state from a dumped state string items = dict(p.split("=") for p in info.split(', ')) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = TASK_STATUS_READY task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" return global_summary, task_summary
def _write_epilogue(cls, handle, job_conf): """Write epilogue.""" handle.write(r""" # EMPTY WORK DIRECTORY REMOVE: cd rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true # SEND TASK SUCCEEDED MESSAGE: wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true cylc task message '%(message)s' echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)' trap '' EXIT """ % {"message": TASK_OUTPUT_SUCCEEDED}) task_name, point_string = TaskID.split(job_conf['task id']) job_conf['absolute submit number'] handle.write("%s%s\n" % ( BATCH_SYS_MANAGER.LINE_PREFIX_EOF, os.path.dirname(job_conf['common job log path'])))
def style_node(self, node_string, autoURL, base=False): node = self.get_node(node_string) try: name, point_string = TaskID.split(node_string) except ValueError: # Special node? if node_string.startswith("__remove_"): node.attr['style'] = 'dashed' node.attr['label'] = u'\u2702' return raise label = name if name in self.suite_polling_tasks: label += "\\n" + self.suite_polling_tasks[name][3] label += "\\n" + point_string node.attr['label'] = label if autoURL: if base: # TODO - This is only called from cylc_add_edge in this # base class ... should it also be called from add_node? node.attr['URL'] = 'base:' + node_string else: node.attr['URL'] = node_string
def test_split(self): self.assertEqual(["a", '1'], TaskID.split("a.1")) self.assertEqual(["a", '_1'], TaskID.split("a._1")) self.assertEqual(["WTAS", '20101010T101010'], TaskID.split("WTAS.20101010T101010"))
def update_graph(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False try: self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. try: self.newest_point_string = ( self. global_summary['newest runahead cycle point string']) except KeyError: # back compat <= 6.2.0 pass except KeyError: # Pre cylc-6 back compat. self.oldest_point_string = ( self.global_summary['oldest cycle time']) self.newest_point_string = ( self.global_summary['newest cycle time']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.suite_info_client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all) except Exception as exc: print >> sys.stderr, str(exc) return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] current_id = self.get_graph_id(gr_edges) needs_redraw = current_id != self.prev_graph_id if needs_redraw: self.graphw = CGraphPlain(self.cfg.suite, suite_polling_tasks) self.graphw.add_edges(gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id) if name not in self.all_families: # This node is a task, not a family. if id in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = (set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' for id in self.state_summary: try: node = self.graphw.get_node(id) except KeyError: continue self.set_live_node_attr(node, id) for id in self.fam_state_summary: try: node = self.graphw.get_node(id) except: continue self.set_live_node_attr(node, id) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.prev_graph_id = current_id return not needs_redraw
def test_split(self): self.assertEqual(["a", '1'], TaskID.split("a.1")) self.assertEqual(["a", '_1'], TaskID.split("a._1")) self.assertEqual( ["WTAS", '20101010T101010'], TaskID.split("WTAS.20101010T101010"))
def update_graph(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) try: self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if 'runahead' not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. try: self.newest_point_string = ( self.global_summary[ 'newest runahead cycle point string']) except KeyError: # back compat <= 6.2.0 pass except KeyError: # Pre cylc-6 back compat. self.oldest_point_string = ( self.global_summary['oldest cycle time']) self.newest_point_string = ( self.global_summary['newest cycle time']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string try: res = self.updater.suite_info_client.get_info( 'get_graph_raw', oldest, newest, self.group, self.ungroup, self.ungroup_recursive, self.group_all, self.ungroup_all) except TypeError: # Back compat with pre cylc-6 suite daemons. res = self.updater.suite_info_client.get( 'get_graph_raw', oldest, newest, False, self.group, self.ungroup, self.ungroup_recursive, self.group_all, self.ungroup_all) except Exception as exc: # PyroError? print >> sys.stderr, str(exc) return False # backward compatibility for old suite daemons still running self.have_leaves_and_feet = False if isinstance(res, list): # prior to suite-polling tasks in 5.4.0 gr_edges = res suite_polling_tasks = [] self.leaves = [] self.feet = [] else: if len(res) == 2: # prior to graph view grouping fix in 5.4.2 gr_edges, suite_polling_tasks = res self.leaves = [] self.feet = [] elif len(res) == 4: # 5.4.2 and later self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res current_id = self.get_graph_id(gr_edges) needs_redraw = current_id != self.prev_graph_id if needs_redraw: self.graphw = graphing.CGraphPlain( self.cfg.suite, suite_polling_tasks) self.graphw.add_edges( gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id) if name not in self.all_families: # This node is a task, not a family. if id in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = ( set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: if name in self.triggering_families: node.attr['shape'] = 'doubleoctagon' else: node.attr['shape'] = 'tripleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' for id in self.state_summary: try: node = self.graphw.get_node(id) except KeyError: continue self.set_live_node_attr(node, id) for id in self.fam_state_summary: try: node = self.graphw.get_node(id) except: continue self.set_live_node_attr(node, id) self.graphw.graph_attr['rankdir'] = self.orientation self.action_required = False if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.prev_graph_id = current_id return not needs_redraw
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): task_summary = {} global_summary = {} family_summary = {} task_states = {} fs = None for tlist in [tasks, tasks_rh]: for task in tlist: ts = task.get_state_summary() if fs: ts['state'] = fs task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) point_string = str(point_string) task_states.setdefault(point_string, {}) task_states[point_string][name] = ( task_summary[task.identity]['state']) fs = 'runahead' fam_states = {} all_states = [] for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} config = SuiteConfig.get_inst() for key, parent_list in ( config.get_first_parent_ancestors().items()): state = c_task_states.get(key) if state is None: continue all_states.append(state) for parent in parent_list: if parent == key: continue c_fam_task_states.setdefault(parent, []) c_fam_task_states[parent].append(state) for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = { 'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state } all_states.sort() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, name_states in task_states.items(): count = {} for name, state in name_states.items(): try: count[state] += 1 except KeyError: count[state] = 1 try: state_count_totals[state] += 1 except KeyError: state_count_totals[state] = 1 state_count_cycles[point_string] = count global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = time.time() global_summary['run_mode'] = self.run_mode global_summary['paused'] = paused global_summary['stopping'] = stopping global_summary['will_pause_at'] = self.str_or_None(will_pause_at) global_summary['will_stop_at'] = self.str_or_None(will_stop_at) global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals self._summary_update_time = time.time() # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary task_states = {} self.first_update_completed = True self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def update_gui(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False if not self.global_summary: return self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. self.newest_point_string = ( self.global_summary[ 'newest runahead cycle point string']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all ) except ClientError: if cylc.flags.debug: try: traceback.print_exc() except IOError: pass # Cannot print to terminal (session may be closed). return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] current_id = self.get_graph_id(gr_edges) if current_id != self.prev_graph_id: self.graphw = CGraphPlain( self.cfg.suite, suite_polling_tasks) self.graphw.add_edges( gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id_ = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id_) if name not in self.all_families: # This node is a task, not a family. if id_ in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id_ not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id_ in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id_ in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = ( set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges) # Set base node style defaults for node in self.graphw.nodes(): node.attr.setdefault('style', 'filled') node.attr['color'] = '#888888' node.attr['fillcolor'] = 'white' node.attr['fontcolor'] = '#888888' if not node.attr['URL'].startswith(self.PREFIX_BASE): node.attr['URL'] = self.PREFIX_BASE + node.attr['URL'] for id_ in self.state_summary: try: node = self.graphw.get_node(id_) except KeyError: continue self.set_live_node_attr(node, id_) for id_ in self.fam_state_summary: try: node = self.graphw.get_node(id_) except KeyError: # Node not in graph. continue self.set_live_node_attr(node, id_) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.update_xdot(no_zoom=(current_id == self.prev_graph_id)) self.prev_graph_id = current_id
def update_gui(self): """Update the treeview with new task and family information. This redraws the treeview, but keeps a memory of user-expanded rows in 'expand_me' so that the tree is still expanded in the right places. If auto-expand is on, calculate which rows need auto-expansion and expand those as well. """ self.action_required = False # We've a view -> sort model -> filter model -> base model hierarchy. model = self.ttreeview.get_model() # Retrieve any user-expanded rows so that we can expand them later. # This is only really necessary for edge cases in tree reconstruction. expand_me = self._get_user_expanded_row_ids() try: time_zone_info = self.updater.global_summary.get("time zone info") except KeyError: # Back compat <= 7.5.0 time_zone_info = self.updater.global_summary.get( "daemon time zone info") # Store the state, times, messages, etc for tasks and families. new_data = {} new_fam_data = {} self.ttree_paths.clear() if "T" in self.updater.update_time_str: last_update_date = self.updater.update_time_str.split("T")[0] else: last_update_date = None tetc_cached_ids_left = set(self._id_tetc_cache) # Start figuring out if we can get away with not rebuilding the tree. id_named_paths = {} should_rebuild_tree = False update_row_ids = [] task_row_ids_left = set() for point_string, name_paths in self._prev_id_named_paths.items(): for name in name_paths: task_row_ids_left.add((point_string, name)) for summary, dest, prev, is_fam in [ (self.updater.state_summary, new_data, self._prev_data, False), (self.updater.fam_state_summary, new_fam_data, self._prev_fam_data, True)]: # Populate new_data and new_fam_data. for id_ in summary: name, point_string = TaskID.split(id_) if point_string not in dest: dest[point_string] = {} state = summary[id_].get('state') # Populate task timing slots. t_info = {} tkeys = ['submitted_time_string', 'started_time_string', 'finished_time_string'] if is_fam: # Family timing currently left empty. for dt in tkeys: t_info[dt] = "" t_info['mean_elapsed_time_string'] = "" t_info['progress'] = 0 else: meant = summary[id_].get('mean_elapsed_time') tstart = summary[id_].get('started_time') tetc_string = None for dt in tkeys: t_info[dt] = summary[id_][dt] # Compute percent progress. t_info['progress'] = 0 if (isinstance(tstart, float) and ( isinstance(meant, float) or isinstance(meant, int))): tetc_unix = tstart + meant tnow = time() if tstart > tnow: # Reportably possible via interraction with # cylc reset. t_info['progress'] = 0 elif tnow > tetc_unix: t_info['progress'] = 100 elif meant != 0: t_info['progress'] = int( 100 * (tnow - tstart) / (meant)) if (t_info['finished_time_string'] is None and isinstance(tstart, float) and (isinstance(meant, float) or isinstance(meant, int))): # Task not finished, but has started and has a meant; # so we can compute an expected time of completion. tetc_string = ( self._id_tetc_cache.get(id_, {}).get(tetc_unix)) if tetc_string is None: # We have to calculate it. tetc_string = get_time_string_from_unix_time( tetc_unix, custom_time_zone_info=time_zone_info) self._id_tetc_cache[id_] = {tetc_unix: tetc_string} t_info['finished_time_string'] = tetc_string estimated_t_finish = True else: estimated_t_finish = False if isinstance(meant, float) or isinstance(meant, int): if meant == 0: # This is a very fast (sub cylc-resolution) task. meant = 1 meant = int(meant) meant_minutes, meant_seconds = divmod(meant, 60) if meant_minutes != 0: meant_string = "PT%dM%dS" % ( meant_minutes, meant_seconds) else: meant_string = "PT%dS" % meant_seconds elif isinstance(meant, str): meant_string = meant else: meant_string = "*" t_info['mean_elapsed_time_string'] = meant_string for dt in tkeys: if t_info[dt] is None: # Or (no time info yet) use an asterix. t_info[dt] = "*" if estimated_t_finish: t_info['finished_time_string'] = "%s?" % ( t_info['finished_time_string']) # Use "*" (or "" for family rows) until slot is populated. job_id = summary[id_].get('submit_method_id') batch_sys_name = summary[id_].get('batch_sys_name') host = summary[id_].get('host') message = summary[id_].get('latest_message') if message is not None: if last_update_date is not None: message = message.replace( last_update_date + "T", "", 1) submit_num = summary[id_].get('submit_num') if submit_num: message = "job(%02d) " % submit_num + message if is_fam: dot_type = 'family' job_id = job_id or "" batch_sys_name = batch_sys_name or "" host = host or "" message = message or "" else: dot_type = 'task' job_id = job_id or "*" batch_sys_name = batch_sys_name or "*" host = host or "*" message = message or "*" icon = self.dots[dot_type][state] new_info = [ state, host, batch_sys_name, job_id, t_info['submitted_time_string'], t_info['started_time_string'], t_info['finished_time_string'], t_info['mean_elapsed_time_string'], message, icon, t_info['progress'] ] dest[point_string][name] = new_info # Did we already have this information? prev_info = prev.get(point_string, {}).get(name) if prev_info is None: # No entry for this task or family before, rebuild tree. should_rebuild_tree = True if prev_info != new_info: # Different info: add it to the list of to-be-updated-ids. if is_fam and name == "root": name = point_string update_row_ids.append((point_string, name, is_fam)) if not is_fam and name in self.ancestors: # Calculate the family nesting for tasks. families = list(self.ancestors[name]) families.sort(lambda x, y: (y in self.ancestors[x]) - (x in self.ancestors[y])) if "root" in families: families.remove("root") if name in families: families.remove(name) if not self.should_group_families: families = [] named_path = families + [name] id_named_paths.setdefault(point_string, {}) id_named_paths[point_string][name] = named_path prev_named_path = self._prev_id_named_paths.get( point_string, {}).get(name) if prev_named_path != named_path: # New task or location for the task, rebuild tree. should_rebuild_tree = True if task_row_ids_left: # Some previous task ids need deleting, so rebuild the tree. should_rebuild_tree = True for id_ in tetc_cached_ids_left: # These ids were not present in the summary - so clear them. self._id_tetc_cache.pop(id_) # Cache the current row point-string and names. row_id_iters_left = {} self.ttreestore.foreach(self._cache_row_id_iters, row_id_iters_left) point_strings = new_data.keys() point_strings.sort() # This basic sort is not always desirable. # Store a column index list for use with the 'TreeModel.set' method. columns = range(self.ttreestore.get_n_columns()) if should_rebuild_tree: # Carefully synchronise the tree with new information. # For each id, calculate the new path and add or replace that path # in the self.ttreestore. for i, point_string in enumerate(point_strings): try: p_data = new_fam_data[point_string]["root"] except KeyError: p_data = [None] * 11 p_path = (i,) p_row_id = (point_string, point_string) p_data = list(p_row_id) + p_data p_iter = self._update_model( self.ttreestore, columns, p_path, p_row_id, p_data, row_id_iters_left) task_named_paths = id_named_paths.get( point_string, {}).values() # Sorting here every time the treeview is updated makes # definition sort order the default "unsorted" order # (any column-click sorting is done on top of this). if self.cfg.use_defn_order and self.updater.ns_defn_order: task_named_paths.sort( key=lambda x: map( self.updater.dict_ns_defn_order.get, x)) else: task_named_paths.sort() family_num_children = {} # Store how many sub-paths are here. family_paths = {point_string: p_path} family_iters = {} for named_path in task_named_paths: # The families within a cycle point leading to a task. # For a task foo_bar in family FOOBAR in family FOO, it # would read ["FOO", "FOOBAR", "foo_bar"] in grouped mode # and simply ["foo_bar"] in non-grouped mode. name = named_path[-1] state = new_data[point_string][name][0] self._update_path_info(p_iter, state, name) f_iter = p_iter f_path = p_path fam = point_string for i, fam in enumerate(named_path[:-1]): # Construct family nesting for this task. if fam in family_iters: # Family already in tree f_iter = family_iters[fam] f_path = family_paths[fam] else: # Add family to tree try: f_data = new_fam_data[point_string][fam] except KeyError: f_data = [None] * 7 if i > 0: parent_fam = named_path[i - 1] else: # point_string is the implicit parent here. parent_fam = point_string family_num_children.setdefault(parent_fam, 0) family_num_children[parent_fam] += 1 f_row_id = (point_string, fam) f_data = list(f_row_id) + f_data # New path is parent_path + (siblings + 1). f_path = tuple( list(family_paths[parent_fam]) + [family_num_children[parent_fam] - 1]) f_iter = self._update_model( self.ttreestore, columns, f_path, f_row_id, f_data, row_id_iters_left) family_iters[fam] = f_iter family_paths[fam] = f_path self._update_path_info(f_iter, state, name) # Add task to tree using the family path we just found. parent_fam = fam family_num_children.setdefault(parent_fam, 0) family_num_children[parent_fam] += 1 t_path = tuple( list(f_path) + [family_num_children[parent_fam] - 1]) t_row_id = (point_string, name) t_data = list(t_row_id) + new_data[point_string][name] self._update_model( self.ttreestore, columns, t_path, t_row_id, t_data, row_id_iters_left) # Adding and updating finished - now we need to delete left overs. delete_items = row_id_iters_left.items() # Sort reversed by path, to get children before parents. delete_items.sort(key=lambda x: x[1][1], reverse=True) if delete_items: # Although we've cached the iters in row_id_iters_left, # they can't be relied upon to give sensible addresses. # We have to re-cache the iters for each task and family. row_id_iters = {} self.ttreestore.foreach( self._cache_row_id_iters, row_id_iters) for delete_row_id, _ in delete_items: real_location = row_id_iters.get(delete_row_id) if real_location is None: continue delete_iter = real_location[0] if self.ttreestore.iter_is_valid(delete_iter): self.ttreestore.remove(delete_iter) else: # not should_rebuild_tree # Update the tree in place - no row has been added or deleted. # Our row_id_iters_left cache is still valid. for point_string, name, is_fam in sorted(update_row_ids): try: if is_fam and name == point_string: data = new_fam_data[point_string]["root"] elif is_fam: data = new_fam_data[point_string][name] else: data = new_data[point_string][name] iter_ = row_id_iters_left[(point_string, name)][0] except KeyError: if not is_fam: raise # Families are not always shown, so this is OK. continue set_data = [point_string, name] + data set_args = itertools.chain(*zip(columns, set_data)) self.ttreestore.set(iter_, *set_args) if self.autoexpand: autoexpand_me = self._get_autoexpand_rows() for row_id in list(autoexpand_me): if row_id in expand_me: # User expanded row also meets auto-expand criteria. autoexpand_me.remove(row_id) expand_me += autoexpand_me self._last_autoexpand_me = autoexpand_me if model is None: return # This re-evaluates the filtering for every row in the model. # This could be more targeted. model.get_model().refilter() model.sort_column_changed() # Expand all the rows that were user-expanded or need auto-expansion. model.foreach(self._expand_row, expand_me) self._prev_id_named_paths = id_named_paths self._prev_data = new_data self._prev_fam_data = new_fam_data return False
def update_gui(self): new_data = {} state_summary = {} state_summary.update(self.state_summary) state_summary.update(self.fam_state_summary) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault(point_string, []) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault(name, []) tasks_by_name[name].append(point_string) # flat (a liststore would do) names = tasks_by_name.keys() names.sort() tvcs = self.led_treeview.get_columns() if not self.is_transposed: for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: self.led_liststore.append([name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False else: for point_string in self.point_strings: tasks_at_point_string = tasks_by_point_string[point_string] state_list = [] for name in self.task_list: task_id = TaskID.get(name, point_string) if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' if name in tasks_at_point_string: state = state_summary[task_id]['state'] state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots[dot_type]['empty']) try: self.led_liststore.append([point_string] + state_list + [point_string]) except ValueError: # A very laggy store can change the columns and raise this. return False self.led_treeview.columns_autosize() return False
def update_gui( self ): new_data = {} state_summary = {} state_summary.update( self.state_summary ) state_summary.update( self.fam_state_summary ) self.ledview_widgets() tasks_by_point_string = {} tasks_by_name = {} for id_ in state_summary: name, point_string = TaskID.split(id_) tasks_by_point_string.setdefault( point_string, [] ) tasks_by_point_string[point_string].append(name) tasks_by_name.setdefault( name, [] ) tasks_by_name[name].append(point_string) # flat (a liststore would do) names = tasks_by_name.keys() names.sort() tvcs = self.led_treeview.get_columns() if not self.is_transposed: for name in self.task_list: point_strings_for_tasks = tasks_by_name.get(name, []) if not point_strings_for_tasks: continue state_list = [] for point_string in self.point_strings: if point_string in point_strings_for_tasks: task_id = TaskID.get(name, point_string) state = state_summary[task_id]['state'] if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots['task']['empty']) try: self.led_liststore.append([name] + state_list) except ValueError: # A very laggy store can change the columns and raise this. return False else: for point_string in self.point_strings: tasks_at_point_string = tasks_by_point_string[point_string] state_list = [] for name in self.task_list: task_id = TaskID.get(name, point_string) if task_id in self.fam_state_summary: dot_type = 'family' else: dot_type = 'task' if name in tasks_at_point_string: state = state_summary[task_id]['state'] state_list.append(self.dots[dot_type][state]) else: state_list.append(self.dots[dot_type]['empty']) try: self.led_liststore.append( [point_string] + state_list + [point_string]) except ValueError: # A very laggy store can change the columns and raise this. return False self.led_treeview.columns_autosize() return False
def right_click_menu(self, event, task_id, type_='live task'): name, point_string = TaskID.split(task_id) menu = gtk.Menu() menu_root = gtk.MenuItem(task_id) menu_root.set_submenu(menu) timezoom_item_direct = gtk.MenuItem('Focus on ' + point_string) timezoom_item_direct.connect('activate', self.focused_timezoom_direct, point_string) # TODO - pre cylc-6 could focus on a range of points (was hours-based). # timezoom_item = gtk.MenuItem('Focus on Range') # timezoom_item.connect( # 'activate', self.focused_timezoom_popup, task_id) timezoom_reset_item = gtk.MenuItem('Focus Reset') timezoom_reset_item.connect('activate', self.focused_timezoom_direct, None) group_item = gtk.ImageMenuItem('Group') img = gtk.image_new_from_stock('group', gtk.ICON_SIZE_MENU) group_item.set_image(img) group_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.feet) group_item.connect('activate', self.grouping, name, True) ungroup_item = gtk.ImageMenuItem('UnGroup') img = gtk.image_new_from_stock('ungroup', gtk.ICON_SIZE_MENU) ungroup_item.set_image(img) ungroup_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.leaves) ungroup_item.connect('activate', self.grouping, name, False) ungroup_rec_item = gtk.ImageMenuItem('Recursive UnGroup') img = gtk.image_new_from_stock('ungroup', gtk.ICON_SIZE_MENU) ungroup_rec_item.set_image(img) ungroup_rec_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.leaves) ungroup_rec_item.connect('activate', self.grouping, name, False, True) menu.append(gtk.SeparatorMenuItem()) if type is not 'live task': insert_item = gtk.ImageMenuItem('Insert ...') img = gtk.image_new_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_MENU) insert_item.set_image(img) menu.append(insert_item) insert_item.connect( 'button-press-event', lambda *a: self.insert_task_popup( is_fam=(name in self.t.descendants), name=name, point_string=point_string)) menu.append(gtk.SeparatorMenuItem()) menu.append(timezoom_item_direct) menu.append(timezoom_reset_item) menu.append(gtk.SeparatorMenuItem()) menu.append(group_item) menu.append(ungroup_item) menu.append(ungroup_rec_item) if type_ == 'live task': is_fam = (name in self.t.descendants) if is_fam: if task_id not in self.t.fam_state_summary: return False t_state = self.t.fam_state_summary[task_id]['state'] else: if task_id not in self.t.state_summary: return False t_state = self.t.state_summary[task_id]['state'] default_menu = self.get_right_click_menu([task_id], [t_state], task_is_family=[is_fam], is_graph_view=True) dm_kids = default_menu.get_children() for item in reversed(dm_kids[:2]): # Put task name and URL at the top. default_menu.remove(item) menu.prepend(item) for item in dm_kids[2:]: # And the rest of the default menu at the bottom. default_menu.remove(item) menu.append(item) menu.show_all() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def update_gui(self): # TODO - check edges against resolved ones # (adding new ones, and nodes, if necessary) self.action_required = False if not self.global_summary: return self.oldest_point_string = ( self.global_summary['oldest cycle point string']) self.newest_point_string = ( self.global_summary['newest cycle point string']) if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl: # Get a graph out to the max runahead point. self.newest_point_string = ( self.global_summary['newest runahead cycle point string']) if self.focus_start_point_string: oldest = self.focus_start_point_string newest = self.focus_stop_point_string else: oldest = self.oldest_point_string newest = self.newest_point_string group_for_server = self.group if self.group == []: group_for_server = None ungroup_for_server = self.ungroup if self.ungroup == []: ungroup_for_server = None try: res = self.updater.client.get_info( 'get_graph_raw', start_point_string=oldest, stop_point_string=newest, group_nodes=group_for_server, ungroup_nodes=ungroup_for_server, ungroup_recursive=self.ungroup_recursive, group_all=self.group_all, ungroup_all=self.ungroup_all) except ClientError: if cylc.flags.debug: try: traceback.print_exc() except IOError: pass # Cannot print to terminal (session may be closed). return False self.have_leaves_and_feet = True gr_edges, suite_polling_tasks, self.leaves, self.feet = res gr_edges = [tuple(edge) for edge in gr_edges] fgcolor = gtk_rgb_to_hex( getattr(self.xdot.widget.style, 'fg', None)[gtk.STATE_NORMAL]) current_id = self.get_graph_id(gr_edges) if current_id != self.prev_graph_id: self.graphw = CGraphPlain(self.cfg.suite, suite_polling_tasks) self.graphw.add_edges(gr_edges, ignore_suicide=self.ignore_suicide) nodes_to_remove = set() # Remove nodes representing filtered-out tasks. if (self.updater.filter_name_string or self.updater.filter_states_excl): for node in self.graphw.nodes(): id_ = node.get_name() # Don't need to guard against special nodes here (yet). name, point_string = TaskID.split(id_) if name not in self.all_families: # This node is a task, not a family. if id_ in self.updater.filt_task_ids: nodes_to_remove.add(node) elif id_ not in self.updater.kept_task_ids: # A base node - these only appear in the graph. filter_string = self.updater.filter_name_string if (filter_string and filter_string not in name and not re.search(filter_string, name)): # A base node that fails the name filter. nodes_to_remove.add(node) elif id_ in self.fam_state_summary: # Remove family nodes if all members filtered out. remove = True for mem in self.descendants[name]: mem_id = TaskID.get(mem, point_string) if mem_id in self.updater.kept_task_ids: remove = False break if remove: nodes_to_remove.add(node) elif id_ in self.updater.full_fam_state_summary: # An updater-filtered-out family. nodes_to_remove.add(node) # Base node cropping. if self.crop: # Remove all base nodes. for node in (set(self.graphw.nodes()) - nodes_to_remove): if node.get_name() not in self.state_summary: nodes_to_remove.add(node) else: # Remove cycle points containing only base nodes. non_base_point_strings = set() point_string_nodes = {} for node in set(self.graphw.nodes()) - nodes_to_remove: node_id = node.get_name() name, point_string = TaskID.split(node_id) point_string_nodes.setdefault(point_string, []) point_string_nodes[point_string].append(node) if (node_id in self.state_summary or node_id in self.fam_state_summary): non_base_point_strings.add(point_string) pure_base_point_strings = (set(point_string_nodes) - non_base_point_strings) for point_string in pure_base_point_strings: for node in point_string_nodes[point_string]: nodes_to_remove.add(node) self.graphw.cylc_remove_nodes_from(list(nodes_to_remove)) # TODO - remove base nodes only connected to other base nodes? # Should these even exist any more? # Make family nodes octagons. for node in self.graphw.nodes(): node_id = node.get_name() try: name, point_string = TaskID.split(node_id) except ValueError: # Special node. continue if name in self.all_families: node.attr['shape'] = 'doubleoctagon' elif name.startswith('@'): node.attr['shape'] = 'none' if self.subgraphs_on: self.graphw.add_cycle_point_subgraphs(gr_edges, fgcolor) # Set base node style defaults fg_ghost = "%s%s" % (fgcolor, GHOST_TRANSP_HEX) for node in self.graphw.nodes(): node.attr['style'] = 'dotted' node.attr['color'] = fg_ghost node.attr['fontcolor'] = fg_ghost if not node.attr['URL'].startswith(self.PREFIX_BASE): node.attr['URL'] = self.PREFIX_BASE + node.attr['URL'] for id_ in self.state_summary: try: node = self.graphw.get_node(id_) except KeyError: continue self.set_live_node_attr(node, id_) for id_ in self.fam_state_summary: try: node = self.graphw.get_node(id_) except KeyError: # Node not in graph. continue self.set_live_node_attr(node, id_) self.graphw.graph_attr['rankdir'] = self.orientation if self.write_dot_frames: arg = os.path.join( self.suite_share_dir, 'frame' + '-' + str(self.graph_frame_count) + '.dot') self.graphw.write(arg) self.graph_frame_count += 1 self.update_xdot(no_zoom=(current_id == self.prev_graph_id)) self.prev_graph_id = current_id
def _write_environment_1(self, handle, job_conf): """Suite and task environment.""" handle.write("\n\n# CYLC SUITE ENVIRONMENT:") # write the static suite variables for var, val in sorted(self.suite_env.items()): handle.write("\nexport " + var + "=" + str(val)) if str(self.suite_env.get("CYLC_UTC")) == "True": handle.write("\nexport TZ=UTC") handle.write("\n") # override and write task-host-specific suite variables suite_work_dir = GLOBAL_CFG.get_derived_host_item( job_conf["suite name"], "suite work directory", job_conf["host"], job_conf["owner"] ) st_env = {} st_env["CYLC_SUITE_RUN_DIR"] = GLOBAL_CFG.get_derived_host_item( job_conf["suite name"], "suite run directory", job_conf["host"], job_conf["owner"] ) st_env["CYLC_SUITE_WORK_DIR"] = suite_work_dir st_env["CYLC_SUITE_SHARE_DIR"] = GLOBAL_CFG.get_derived_host_item( job_conf["suite name"], "suite share directory", job_conf["host"], job_conf["owner"] ) # DEPRECATED st_env["CYLC_SUITE_SHARE_PATH"] = "$CYLC_SUITE_SHARE_DIR" rsp = job_conf["remote suite path"] if rsp: st_env["CYLC_SUITE_DEF_PATH"] = rsp else: # replace home dir with '$HOME' for evaluation on the task host st_env["CYLC_SUITE_DEF_PATH"] = re.sub( os.environ["HOME"], "$HOME", self.suite_env["CYLC_SUITE_DEF_PATH_ON_SUITE_HOST"] ) for var, val in sorted(st_env.items()): handle.write("\nexport " + var + "=" + str(val)) task_work_dir = os.path.join(suite_work_dir, job_conf["work sub-directory"]) use_login_shell = GLOBAL_CFG.get_host_item("use login shell", job_conf["host"], job_conf["owner"]) comms = GLOBAL_CFG.get_host_item("task communication method", job_conf["host"], job_conf["owner"]) task_name, point_string = TaskID.split(job_conf["task id"]) handle.write("\n\n# CYLC TASK ENVIRONMENT:") handle.write("\nexport CYLC_TASK_COMMS_METHOD=" + comms) handle.write("\nexport CYLC_TASK_CYCLE_POINT=" + point_string) handle.write("\nexport CYLC_TASK_CYCLE_TIME=" + point_string) handle.write("\nexport CYLC_TASK_ID=" + job_conf["task id"]) handle.write("\nexport CYLC_TASK_IS_COLDSTART=" + str(job_conf["is cold-start"])) handle.write("\nexport CYLC_TASK_LOG_ROOT=" + job_conf["job file path"]) handle.write( "\nexport CYLC_TASK_MSG_MAX_TRIES=" + str(GLOBAL_CFG.get(["task messaging", "maximum number of tries"])) ) handle.write("\nexport CYLC_TASK_MSG_RETRY_INTVL=" + str(GLOBAL_CFG.get(["task messaging", "retry interval"]))) handle.write("\nexport CYLC_TASK_MSG_TIMEOUT=" + str(GLOBAL_CFG.get(["task messaging", "connection timeout"]))) handle.write("\nexport CYLC_TASK_NAME=" + task_name) handle.write('\nexport CYLC_TASK_NAMESPACE_HIERARCHY="' + " ".join(job_conf["namespace hierarchy"]) + '"') handle.write("\nexport CYLC_TASK_SSH_LOGIN_SHELL=" + str(use_login_shell)) handle.write("\nexport CYLC_TASK_SUBMIT_NUMBER=" + str(job_conf["absolute submit number"])) handle.write("\nexport CYLC_TASK_TRY_NUMBER=" + str(job_conf["try number"])) handle.write("\nexport CYLC_TASK_WORK_DIR=" + task_work_dir) # DEPRECATED handle.write("\nexport CYLC_TASK_WORK_PATH=$CYLC_TASK_WORK_DIR") handle.write("\nexport CYLC_JOB_PID=$$")
def get_stop_state_summary(suite, owner=None, hostname=None, lines=None): """Load the contents of the last 'state' file into summary maps.""" global_summary = {} task_summary = {} family_summary = {} if not lines: state_file_text = get_stop_state(suite, owner, hostname) if state_file_text is None: return global_summary, task_summary, family_summary lines = state_file_text.splitlines() if len(lines) == 0 or len(lines) < 3: return None for line in list(lines): if line.startswith('Remote command'): lines.remove(line) line0 = lines.pop(0) if line0.startswith('suite time') or \ line0.startswith('simulation time'): # backward compatibility with pre-5.4.11 state dumps global_summary["last_updated"] = time.time() else: # (line0 is run mode) line1 = lines.pop(0) while not line1.startswith("time :"): line1 = lines.pop(0) try: time_string = line1.rstrip().split(' : ')[1] unix_time_string = time_string.rsplit('(', 1)[1].rstrip(")") global_summary["last_updated"] = int(unix_time_string) except (TypeError, ValueError, IndexError): # back compat pre cylc-6 global_summary["last_updated"] = time.time() start = lines.pop(0).rstrip().rsplit(None, 1)[-1] stop = lines.pop(0).rstrip().rsplit(None, 1)[-1] if stop != "(none)": global_summary["will_stop_at"] = stop while lines: line = lines.pop(0) if line.startswith("class") or line.startswith("Begin task"): continue try: (task_id, info) = line.split(' : ') name, point_string = TaskID.split(task_id) except ValueError: continue except Exception as e: sys.stderr.write(str(e) + "\n") continue task_summary.setdefault(task_id, { "name": name, "point": point_string, "label": point_string }) # reconstruct state from a dumped state string items = dict([p.split("=") for p in info.split(', ')]) state = items.get("status") if state == 'submitting': # backward compabitility for state dumps generated prior to #787 state = 'ready' task_summary[task_id].update({"state": state}) task_summary[task_id].update({"spawned": items.get("spawned")}) global_summary["run_mode"] = "dead" for key in ["paused", "stopping", "will_pause_at", "will_stop_at"]: global_summary.setdefault(key, "") return global_summary, task_summary, family_summary
def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused, will_pause_at, stopping, will_stop_at, ns_defn_order, reloading): task_summary = {} global_summary = {} family_summary = {} task_states = {} fs = None for tlist in [tasks, tasks_rh]: for task in tlist: ts = task.get_state_summary() if fs: ts['state'] = fs task_summary[task.identity] = ts name, point_string = TaskID.split(task.identity) point_string = str(point_string) task_states.setdefault(point_string, {}) task_states[point_string][name] = ( task_summary[task.identity]['state']) fs = 'runahead' fam_states = {} all_states = [] for point_string, c_task_states in task_states.items(): # For each cycle point, construct a family state tree # based on the first-parent single-inheritance tree c_fam_task_states = {} config = SuiteConfig.get_inst() for key, parent_list in ( config.get_first_parent_ancestors().items()): state = c_task_states.get(key) if state is None: continue all_states.append(state) for parent in parent_list: if parent == key: continue c_fam_task_states.setdefault(parent, []) c_fam_task_states[parent].append(state) for fam, child_states in c_fam_task_states.items(): f_id = TaskID.get(fam, point_string) state = extract_group_state(child_states) if state is None: continue try: famcfg = config.cfg['runtime'][fam] except KeyError: famcfg = {} description = famcfg.get('description') title = famcfg.get('title') family_summary[f_id] = {'name': fam, 'description': description, 'title': title, 'label': point_string, 'state': state} all_states.sort() # Compute state_counts (total, and per cycle). state_count_totals = {} state_count_cycles = {} for point_string, name_states in task_states.items(): count = {} for name, state in name_states.items(): try: count[state] += 1 except KeyError: count[state] = 1 try: state_count_totals[state] += 1 except KeyError: state_count_totals[state] = 1 state_count_cycles[point_string] = count global_summary['oldest cycle point string'] = ( self.str_or_None(min_point)) global_summary['newest cycle point string'] = ( self.str_or_None(max_point)) global_summary['newest runahead cycle point string'] = ( self.str_or_None(max_point_rh)) if cylc.flags.utc: global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO else: global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO global_summary['last_updated'] = time.time() global_summary['run_mode'] = self.run_mode global_summary['paused'] = paused global_summary['stopping'] = stopping global_summary['will_pause_at'] = self.str_or_None(will_pause_at) global_summary['will_stop_at'] = self.str_or_None(will_stop_at) global_summary['states'] = all_states global_summary['namespace definition order'] = ns_defn_order global_summary['reloading'] = reloading global_summary['state totals'] = state_count_totals self._summary_update_time = time.time() # Replace the originals (atomic update, for access from other threads). self.task_summary = task_summary self.global_summary = global_summary self.family_summary = family_summary task_states = {} self.first_update_completed = True self.state_count_totals = state_count_totals self.state_count_cycles = state_count_cycles
def right_click_menu(self, event, task_id, type='live task'): name, point_string = TaskID.split(task_id) menu = gtk.Menu() menu_root = gtk.MenuItem(task_id) menu_root.set_submenu(menu) timezoom_item_direct = gtk.MenuItem('Focus on ' + point_string) timezoom_item_direct.connect( 'activate', self.focused_timezoom_direct, point_string) # TODO - pre cylc-6 could focus on a range of points (was hours-based). # timezoom_item = gtk.MenuItem('Focus on Range') # timezoom_item.connect( # 'activate', self.focused_timezoom_popup, task_id) timezoom_reset_item = gtk.MenuItem('Focus Reset') timezoom_reset_item.connect('activate', self.focused_timezoom_direct, None) group_item = gtk.ImageMenuItem('Group') img = gtk.image_new_from_stock('group', gtk.ICON_SIZE_MENU) group_item.set_image(img) group_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.feet) group_item.connect('activate', self.grouping, name, True) ungroup_item = gtk.ImageMenuItem('UnGroup') img = gtk.image_new_from_stock('ungroup', gtk.ICON_SIZE_MENU) ungroup_item.set_image(img) ungroup_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.leaves) ungroup_item.connect('activate', self.grouping, name, False) ungroup_rec_item = gtk.ImageMenuItem('Recursive UnGroup') img = gtk.image_new_from_stock('ungroup', gtk.ICON_SIZE_MENU) ungroup_rec_item.set_image(img) ungroup_rec_item.set_sensitive(not self.t.have_leaves_and_feet or name not in self.t.leaves) ungroup_rec_item.connect('activate', self.grouping, name, False, True) menu.append(gtk.SeparatorMenuItem()) if type is not 'live task': insert_item = gtk.ImageMenuItem('Insert ...') img = gtk.image_new_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_MENU) insert_item.set_image(img) menu.append(insert_item) insert_item.connect( 'button-press-event', lambda *a: self.insert_task_popup( is_fam=(name in self.t.descendants), name=name, point_string=point_string ) ) menu.append(gtk.SeparatorMenuItem()) menu.append(timezoom_item_direct) menu.append(timezoom_reset_item) menu.append(gtk.SeparatorMenuItem()) menu.append(group_item) menu.append(ungroup_item) menu.append(ungroup_rec_item) if type == 'live task': is_fam = (name in self.t.descendants) if is_fam: if task_id not in self.t.fam_state_summary: return False t_state = self.t.fam_state_summary[task_id]['state'] submit_num = None else: if task_id not in self.t.state_summary: return False t_state = self.t.state_summary[task_id]['state'] submit_num = self.t.state_summary[task_id]['submit_num'] default_menu = self.get_right_click_menu( task_id, t_state, task_is_family=is_fam, submit_num=submit_num) dm_kids = default_menu.get_children() for item in reversed(dm_kids[:2]): # Put task name and URL at the top. default_menu.remove(item) menu.prepend(item) for item in dm_kids[2:]: # And the rest of the default menu at the bottom. default_menu.remove(item) menu.append(item) menu.show_all() menu.popup(None, None, None, event.button, event.time) # TODO - popup menus are not automatically destroyed and can be # reused if saved; however, we need to reconstruct or at least # alter ours dynamically => should destroy after each use to # prevent a memory leak? But I'm not sure how to do this as yet.) return True
def _write_environment_1(self, handle, job_conf): """Suite and task environment.""" handle.write("\n\n# CYLC SUITE ENVIRONMENT:") # write the static suite variables for var, val in sorted(self.suite_env.items()): handle.write("\nexport " + var + "=" + str(val)) if str(self.suite_env.get('CYLC_UTC')) == 'True': handle.write("\nexport TZ=UTC") handle.write("\n") # override and write task-host-specific suite variables suite_work_dir = GLOBAL_CFG.get_derived_host_item( job_conf['suite name'], 'suite work directory', job_conf['host'], job_conf['owner']) st_env = {} st_env['CYLC_SUITE_RUN_DIR'] = GLOBAL_CFG.get_derived_host_item( job_conf['suite name'], 'suite run directory', job_conf['host'], job_conf['owner']) st_env['CYLC_SUITE_WORK_DIR'] = suite_work_dir st_env['CYLC_SUITE_SHARE_DIR'] = GLOBAL_CFG.get_derived_host_item( job_conf['suite name'], 'suite share directory', job_conf['host'], job_conf['owner']) # DEPRECATED st_env['CYLC_SUITE_SHARE_PATH'] = '$CYLC_SUITE_SHARE_DIR' rsp = job_conf['remote suite path'] if rsp: st_env['CYLC_SUITE_DEF_PATH'] = rsp else: # replace home dir with '$HOME' for evaluation on the task host st_env['CYLC_SUITE_DEF_PATH'] = re.sub( os.environ['HOME'], '$HOME', self.suite_env['CYLC_SUITE_DEF_PATH_ON_SUITE_HOST']) for var, val in sorted(st_env.items()): handle.write("\nexport " + var + "=" + str(val)) task_work_dir = os.path.join( suite_work_dir, job_conf['work sub-directory']) use_login_shell = GLOBAL_CFG.get_host_item( 'use login shell', job_conf['host'], job_conf['owner']) comms = GLOBAL_CFG.get_host_item( 'task communication method', job_conf['host'], job_conf['owner']) task_name, point_string = TaskID.split(job_conf['task id']) handle.write("\n\n# CYLC TASK ENVIRONMENT:") handle.write("\nexport CYLC_TASK_COMMS_METHOD=" + comms) handle.write("\nexport CYLC_TASK_CYCLE_POINT=" + point_string) handle.write("\nexport CYLC_TASK_CYCLE_TIME=" + point_string) handle.write("\nexport CYLC_TASK_ID=" + job_conf['task id']) handle.write( "\nexport CYLC_TASK_IS_COLDSTART=" + str(job_conf['is cold-start'])) handle.write( "\nexport CYLC_TASK_LOG_ROOT=" + job_conf['job file path']) handle.write( "\nexport CYLC_TASK_MSG_MAX_TRIES=" + str(GLOBAL_CFG.get(['task messaging', 'maximum number of tries']))) handle.write( "\nexport CYLC_TASK_MSG_RETRY_INTVL=%f" % GLOBAL_CFG.get(['task messaging', 'retry interval'])) handle.write( "\nexport CYLC_TASK_MSG_TIMEOUT=%f" % GLOBAL_CFG.get(['task messaging', 'connection timeout'])) handle.write("\nexport CYLC_TASK_NAME=" + task_name) handle.write( '\nexport CYLC_TASK_NAMESPACE_HIERARCHY="' + ' '.join(job_conf['namespace hierarchy']) + '"') handle.write( "\nexport CYLC_TASK_SSH_LOGIN_SHELL=" + str(use_login_shell)) handle.write( "\nexport CYLC_TASK_SUBMIT_NUMBER=" + str(job_conf['submit num'])) handle.write( "\nexport CYLC_TASK_TRY_NUMBER=" + str(job_conf['try number'])) handle.write("\nexport CYLC_TASK_WORK_DIR=" + task_work_dir) # DEPRECATED handle.write("\nexport CYLC_TASK_WORK_PATH=$CYLC_TASK_WORK_DIR") handle.write("\nexport %s=$$" % (TaskMessage.CYLC_JOB_PID))
def update(self): """Update data using data from self.updater.""" if not self.updater.connected: if not self.cleared: gobject.idle_add(self.clear_gui) self.cleared = True return False self.cleared = False if not self.action_required and ( self.last_update_time is not None and self.last_update_time >= self.updater.last_update_time): return False self.last_update_time = self.updater.last_update_time self.updater.no_update_event.set() self.state_summary = deepcopy(self.updater.state_summary) self.fam_state_summary = deepcopy(self.updater.fam_state_summary) self.ancestors_pruned = deepcopy(self.updater.ancestors_pruned) self.descendants = deepcopy(self.updater.descendants) self.updater.no_update_event.clear() self.point_strings = [] for id_ in self.state_summary: point_string = TaskID.split(id_)[1] if point_string not in self.point_strings: self.point_strings.append(point_string) try: self.point_strings.sort(key=int) except (TypeError, ValueError): # iso cycle points self.point_strings.sort() use_def_order = (self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on) if not self.should_group_families: # Display the full task list. self.task_list = deepcopy(self.updater.task_list) if use_def_order: self.task_list = [ task for task in self.updater.ns_defn_order if task in self.task_list ] else: self.task_list.sort() else: self.family_tree = {} self.task_list = deepcopy(self.updater.task_list) if use_def_order: self.task_list = [ task for task in self.updater.ns_defn_order if task in self.task_list ] else: self.task_list.sort() for task in self.task_list: # Generate dict of families and their associated tasks. item = self.ancestors_pruned[task][-2] if item not in self.task_list: if item not in self.family_tree: self.family_tree[item] = [] self.family_tree[item].append(task) for heading, tasks in self.family_tree.iteritems(): # Place associated tasks after headers. ind = min(self.task_list.index(task) for task in tasks) for task in tasks: if task in self.task_list: self.task_list.remove(task) self.task_list = (self.task_list[0:ind] + [heading] + tasks + self.task_list[ind:]) return True