def retrieve_state_summaries(self): """Retrieve suite summary.""" glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) (self.ancestors, self.ancestors_pruned, self.descendants, self.all_families) = self.suite_info_client.get_info( {'function': 'get_first_parent_ancestors'}, {'function': 'get_first_parent_ancestors', 'pruned': True}, {'function': 'get_first_parent_descendants'}, {'function': 'get_all_families'}) self.mode = glbl['run_mode'] if self.cfg.use_defn_order: nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) self.update_time_str = get_time_string_from_unix_time( glbl['last_updated']) self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() self.status = glbl['status_string'] self.is_reloading = glbl['reloading']
def formatTime(self, record, datefmt=None): """Formats the record time as an ISO date time with correct time zone. Note: This should become redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ return get_time_string_from_unix_time(record.created)
def retrieve_state_summaries(self): """Retrieve suite summary.""" ret = self.state_summary_client.get_suite_state_summary() glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) self.ancestors = self.suite_info_client.get_info( 'get_first_parent_ancestors') self.ancestors_pruned = self.suite_info_client.get_info( 'get_first_parent_ancestors', pruned=True) self.descendants = self.suite_info_client.get_info( 'get_first_parent_descendants') self.all_families = self.suite_info_client.get_info('get_all_families') self.mode = glbl['run_mode'] if self.cfg.use_defn_order: nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) self.update_time_str = get_time_string_from_unix_time( glbl['last_updated']) self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() self.status = glbl['status_string'] self.is_reloading = glbl['reloading']
def formatTime(self, record, datefmt=None): """Formats the record time as an ISO date time with correct time zone. Note: This should become redundant in Python 3, because "time.strftime" will handle time zone from "localtime" properly. """ return get_time_string_from_unix_time(record.created)
def retrieve_state_summaries(self): glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) self.ancestors = self.suite_info_client.get_info( 'get_first_parent_ancestors') self.ancestors_pruned = self.suite_info_client.get_info( 'get_first_parent_ancestors', True) self.descendants = self.suite_info_client.get_info( 'get_first_parent_descendants') self.all_families = self.suite_info_client.get_info('get_all_families') self.triggering_families = self.suite_info_client.get_info( 'get_triggering_families') self.mode = glbl['run_mode'] if self.cfg.use_defn_order and 'namespace definition order' in glbl: # (protect for compat with old suite daemons) nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) try: self.dt = get_time_string_from_unix_time(glbl['last_updated']) except (TypeError, ValueError): # Older suite... self.dt = glbl['last_updated'].isoformat() self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() # Prioritise which suite state string to display. # 1. Are we stopping, or some variant of 'running'? if glbl['stopping']: self.status = 'stopping' elif glbl['will_pause_at']: self.status = 'running to hold at ' + glbl['will_pause_at'] elif glbl['will_stop_at']: self.status = 'running to ' + glbl['will_stop_at'] else: self.status = 'running' # 2. Override with temporary held status. if glbl['paused']: self.status = 'held' # 3. Override running or held with reloading. if not self.status == 'stopping': try: if glbl['reloading']: self.status = 'reloading' except KeyError: # Back compat. pass
def retrieve_state_summaries(self): glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) self.ancestors = self.suite_info_client.get_info( 'get_first_parent_ancestors') self.ancestors_pruned = self.suite_info_client.get_info( 'get_first_parent_ancestors', True) self.descendants = self.suite_info_client.get_info( 'get_first_parent_descendants') self.all_families = self.suite_info_client.get_info('get_all_families') self.triggering_families = self.suite_info_client.get_info( 'get_triggering_families') self.mode = glbl['run_mode'] if self.cfg.use_defn_order and 'namespace definition order' in glbl: # (protect for compat with old suite daemons) nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) try: self.dt = get_time_string_from_unix_time(glbl['last_updated']) except (TypeError, ValueError): # Older suite... self.dt = glbl['last_updated'].isoformat() self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() # Prioritise which suite state string to display. # 1. Are we stopping, or some variant of 'running'? if glbl['stopping']: self.status = 'stopping' elif glbl['will_pause_at']: self.status = 'running to hold at ' + glbl['will_pause_at'] elif glbl['will_stop_at']: self.status = 'running to ' + glbl['will_stop_at'] else: self.status = 'running' # 2. Override with temporary held status. if glbl['paused']: self.status = 'held' # 3. Override running or held with reloading. if not self.status == 'stopping': try: if glbl['reloading']: self.status = 'reloading' except KeyError: # Back compat. pass
def retrieve_state_summaries(self): """Retrieve suite summary.""" glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) self.ancestors = self.suite_info_client.get_info( 'get_first_parent_ancestors') self.ancestors_pruned = self.suite_info_client.get_info( 'get_first_parent_ancestors', True) self.descendants = self.suite_info_client.get_info( 'get_first_parent_descendants') self.all_families = self.suite_info_client.get_info('get_all_families') self.triggering_families = self.suite_info_client.get_info( 'get_triggering_families') self.mode = glbl['run_mode'] if self.cfg.use_defn_order and 'namespace definition order' in glbl: # (protect for compat with old suite daemons) nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) try: self.update_time_str = get_time_string_from_unix_time( glbl['last_updated']) except (TypeError, ValueError): # Older suite... self.update_time_str = glbl['last_updated'].isoformat() self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() try: self.status = glbl['status_string'] except KeyError: # Back compat for suite daemons <= 6.9.1. self.status = get_suite_status_string(glbl['paused'], glbl['stopping'], glbl['will_pause_at'], glbl['will_stop_at']) try: self.is_reloading = glbl['reloading'] except KeyError: # Back compat. pass
def retrieve_state_summaries(self): """Retrieve suite summary.""" glbl, states, fam_states = ( self.state_summary_client.get_suite_state_summary()) self.ancestors = self.suite_info_client.get_info( 'get_first_parent_ancestors') self.ancestors_pruned = self.suite_info_client.get_info( 'get_first_parent_ancestors', True) self.descendants = self.suite_info_client.get_info( 'get_first_parent_descendants') self.all_families = self.suite_info_client.get_info('get_all_families') self.triggering_families = self.suite_info_client.get_info( 'get_triggering_families') self.mode = glbl['run_mode'] if self.cfg.use_defn_order and 'namespace definition order' in glbl: # (protect for compat with old suite daemons) nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo)))) try: self.update_time_str = get_time_string_from_unix_time( glbl['last_updated']) except (TypeError, ValueError): # Older suite... self.update_time_str = glbl['last_updated'].isoformat() self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() try: self.status = glbl['status_string'] except KeyError: # Back compat for suite daemons <= 6.9.1. self.status = get_suite_status_string( glbl['paused'], glbl['stopping'], glbl['will_pause_at'], glbl['will_stop_at']) try: self.is_reloading = glbl['reloading'] except KeyError: # Back compat. pass
def update_gui( self ): """Update the treeview with new task and family information. This redraws the treeview, but keeps a memory of user-expanded rows in 'expand_me' so that the tree is still expanded in the right places. If auto-expand is on, calculate which rows need auto-expansion and expand those as well. """ model = self.ttreeview.get_model() # Retrieve any user-expanded rows so that we can expand them later. expand_me = self._get_user_expanded_row_ids() daemon_time_zone_info = self.updater.global_summary.get( "daemon time zone info") new_data = {} new_fam_data = {} self.ttree_paths.clear() if "T" in self.updater.dt: last_update_date = self.updater.dt.split("T")[0] else: last_update_date = None tetc_cached_ids_left = set(self._id_tetc_cache) for summary, dest in [(self.updater.state_summary, new_data), (self.updater.fam_state_summary, new_fam_data)]: # Populate new_data and new_fam_data. for id in summary: name, point_string = cylc.TaskID.split( id ) if point_string not in dest: dest[ point_string ] = {} state = summary[ id ].get('state') # Populate task timing slots. t_info = {} tkeys = ['submitted_time_string', 'started_time_string', 'finished_time_string'] if id in self.fam_state_summary: # Family timing currently left empty. for dt in tkeys: t_info[dt] = "" t_info['mean_total_elapsed_time_string'] = "" else: meant = summary[id].get('mean total elapsed time') tstart = summary[id].get('started_time') tetc_string = None for dt in tkeys: try: t_info[dt] = summary[id][dt] except KeyError: # Pre cylc-6 back compat: no special "_string" items, # and the data was in string form already. odt = dt.replace("_string", "") try: t_info[dt] = summary[id][odt] except KeyError: if dt == 'finished_time_string': # Was succeeded_time. t_info[dt] = summary[id].get('succeeded_time') else: t_info[dt] = None if isinstance(t_info[dt], str): # Remove decimal fraction seconds. t_info[dt] = t_info[dt].split('.')[0] if (t_info['finished_time_string'] is None and isinstance(tstart, float) and (isinstance(meant, float) or isinstance(meant, int))): # Task not finished, but has started and has a meant; # so we can compute an expected time of completion. tetc_unix = tstart + meant tetc_string = ( self._id_tetc_cache.get(id, {}).get(tetc_unix)) if tetc_string is None: # We have to calculate it. tetc_string = get_time_string_from_unix_time( tetc_unix, custom_time_zone_info=daemon_time_zone_info ) self._id_tetc_cache[id] = {tetc_unix: tetc_string} t_info['finished_time_string'] = tetc_string estimated_t_finish = True else: estimated_t_finish = False if isinstance(meant, float) or isinstance(meant, int): if meant == 0: # This is a very fast (sub cylc-resolution) task. meant = 1 meant = int(meant) meant_minutes, meant_seconds = divmod(meant, 60) if meant_minutes != 0: meant_string = "PT%dM%dS" % ( meant_minutes, meant_seconds) else: meant_string = "PT%dS" % meant_seconds elif isinstance(meant,str): meant_string = meant else: meant_string = "*" t_info['mean_total_elapsed_time_string'] = meant_string for dt in tkeys: if t_info[dt] is not None: # Abbreviate time strings in context. t_info[dt] = ( self._alter_date_time_string_for_context( t_info[dt], last_update_date) ) else: # Or (no time info yet) use an asterix. t_info[dt] = "*" if estimated_t_finish: # TODO - this markup probably affects sort order? t_info['finished_time_string'] = "<i>%s?</i>" % ( t_info['finished_time_string']) # Use "*" (or "" for family rows) until slot is populated # and for pre cylc-6 back compat for host and job ID cols. job_id = summary[id].get('submit_method_id') host = summary[id].get('host') message = summary[ id ].get('latest_message') if message is not None and last_update_date is not None: message = message.replace(last_update_date + "T", "", 1) if id in self.fam_state_summary: dot_type = 'family' job_id = job_id or "" host = host or "" message = message or "" else: dot_type = 'task' job_id = job_id or "*" host = host or "*" message = message or "*" try: icon = self.dots[dot_type][state] except KeyError: icon = self.dots[dot_type]['unknown'] dest[point_string][name] = [ state, host, job_id, t_info['submitted_time_string'], t_info['started_time_string'], t_info['finished_time_string'], t_info['mean_total_elapsed_time_string'], message, icon ] for id in tetc_cached_ids_left: # These ids were not present in the summary - so clear them. self._id_tetc_cache.pop(id) tree_data = {} self.ttreestore.clear() point_strings = new_data.keys() point_strings.sort() for point_string in point_strings: f_data = [ None ] * 7 if "root" in new_fam_data[point_string]: f_data = new_fam_data[point_string]["root"] piter = self.ttreestore.append( None, [ point_string, point_string ] + f_data ) family_iters = {} name_iters = {} task_named_paths = [] for name in new_data[ point_string ].keys(): # The following line should filter by allowed families. families = list(self.ancestors[name]) families.sort(lambda x, y: (y in self.ancestors[x]) - (x in self.ancestors[y])) if "root" in families: families.remove("root") if name in families: families.remove(name) if not self.should_group_families: families = [] task_path = families + [name] task_named_paths.append(task_path) # Sorting here every time the treeview is updated makes # definition sort order the default "unsorted" order # (any column-click sorting is done on top of this). if self.cfg.use_defn_order and self.updater.ns_defn_order: task_named_paths.sort( key=lambda x: map( self.updater.dict_ns_defn_order.get, x ) ) else: task_named_paths.sort() for named_path in task_named_paths: name = named_path[-1] state = new_data[point_string][name][0] self._update_path_info( piter, state, name ) f_iter = piter for i, fam in enumerate(named_path[:-1]): # Construct family tree for this task. if fam in family_iters: # Family already in tree f_iter = family_iters[fam] else: # Add family to tree f_data = [ None ] * 7 if fam in new_fam_data[point_string]: f_data = new_fam_data[point_string][fam] f_iter = self.ttreestore.append( f_iter, [ point_string, fam ] + f_data ) family_iters[fam] = f_iter self._update_path_info( f_iter, state, name ) # Add task to tree self.ttreestore.append( f_iter, [ point_string, name ] + new_data[point_string][name]) if self.autoexpand: autoexpand_me = self._get_autoexpand_rows() for row_id in list(autoexpand_me): if row_id in expand_me: # User expanded row also meets auto-expand criteria. autoexpand_me.remove(row_id) expand_me += autoexpand_me self._last_autoexpand_me = autoexpand_me if model is None: return model.get_model().refilter() model.sort_column_changed() # Expand all the rows that were user-expanded or need auto-expansion. model.foreach( self._expand_row, expand_me ) return False
def update_gui(self): """Update the treeview with new task and family information. This redraws the treeview, but keeps a memory of user-expanded rows in 'expand_me' so that the tree is still expanded in the right places. If auto-expand is on, calculate which rows need auto-expansion and expand those as well. """ self.action_required = False # We've a view -> sort model -> filter model -> base model hierarchy. model = self.ttreeview.get_model() # Retrieve any user-expanded rows so that we can expand them later. # This is only really necessary for edge cases in tree reconstruction. expand_me = self._get_user_expanded_row_ids() try: time_zone_info = self.updater.global_summary.get("time zone info") except KeyError: # Back compat <= 7.5.0 time_zone_info = self.updater.global_summary.get( "daemon time zone info") # Store the state, times, messages, etc for tasks and families. new_data = {} new_fam_data = {} self.ttree_paths.clear() if "T" in self.updater.update_time_str: last_update_date = self.updater.update_time_str.split("T")[0] else: last_update_date = None tetc_cached_ids_left = set(self._id_tetc_cache) # Start figuring out if we can get away with not rebuilding the tree. id_named_paths = {} should_rebuild_tree = False update_row_ids = [] task_row_ids_left = set() for point_string, name_paths in self._prev_id_named_paths.items(): for name in name_paths: task_row_ids_left.add((point_string, name)) for summary, dest, prev, is_fam in [ (self.updater.state_summary, new_data, self._prev_data, False), (self.updater.fam_state_summary, new_fam_data, self._prev_fam_data, True)]: # Populate new_data and new_fam_data. for id_ in summary: name, point_string = TaskID.split(id_) if point_string not in dest: dest[point_string] = {} state = summary[id_].get('state') # Populate task timing slots. t_info = {} tkeys = ['submitted_time_string', 'started_time_string', 'finished_time_string'] if is_fam: # Family timing currently left empty. for dt in tkeys: t_info[dt] = "" t_info['mean_elapsed_time_string'] = "" t_info['progress'] = 0 else: meant = summary[id_].get('mean_elapsed_time') tstart = summary[id_].get('started_time') tetc_string = None for dt in tkeys: t_info[dt] = summary[id_][dt] # Compute percent progress. t_info['progress'] = 0 if (isinstance(tstart, float) and ( isinstance(meant, float) or isinstance(meant, int))): tetc_unix = tstart + meant tnow = time() if tstart > tnow: # Reportably possible via interraction with # cylc reset. t_info['progress'] = 0 elif tnow > tetc_unix: t_info['progress'] = 100 elif meant != 0: t_info['progress'] = int( 100 * (tnow - tstart) / (meant)) if (t_info['finished_time_string'] is None and isinstance(tstart, float) and (isinstance(meant, float) or isinstance(meant, int))): # Task not finished, but has started and has a meant; # so we can compute an expected time of completion. tetc_string = ( self._id_tetc_cache.get(id_, {}).get(tetc_unix)) if tetc_string is None: # We have to calculate it. tetc_string = get_time_string_from_unix_time( tetc_unix, custom_time_zone_info=time_zone_info) self._id_tetc_cache[id_] = {tetc_unix: tetc_string} t_info['finished_time_string'] = tetc_string estimated_t_finish = True else: estimated_t_finish = False if isinstance(meant, float) or isinstance(meant, int): if meant == 0: # This is a very fast (sub cylc-resolution) task. meant = 1 meant = int(meant) meant_minutes, meant_seconds = divmod(meant, 60) if meant_minutes != 0: meant_string = "PT%dM%dS" % ( meant_minutes, meant_seconds) else: meant_string = "PT%dS" % meant_seconds elif isinstance(meant, str): meant_string = meant else: meant_string = "*" t_info['mean_elapsed_time_string'] = meant_string for dt in tkeys: if t_info[dt] is None: # Or (no time info yet) use an asterix. t_info[dt] = "*" if estimated_t_finish: t_info['finished_time_string'] = "%s?" % ( t_info['finished_time_string']) # Use "*" (or "" for family rows) until slot is populated. job_id = summary[id_].get('submit_method_id') batch_sys_name = summary[id_].get('batch_sys_name') host = summary[id_].get('host') message = summary[id_].get('latest_message') if message is not None: if last_update_date is not None: message = message.replace( last_update_date + "T", "", 1) submit_num = summary[id_].get('submit_num') if submit_num: message = "job(%02d) " % submit_num + message if is_fam: dot_type = 'family' job_id = job_id or "" batch_sys_name = batch_sys_name or "" host = host or "" message = message or "" else: dot_type = 'task' job_id = job_id or "*" batch_sys_name = batch_sys_name or "*" host = host or "*" message = message or "*" icon = self.dots[dot_type][state] new_info = [ state, host, batch_sys_name, job_id, t_info['submitted_time_string'], t_info['started_time_string'], t_info['finished_time_string'], t_info['mean_elapsed_time_string'], message, icon, t_info['progress'] ] dest[point_string][name] = new_info # Did we already have this information? prev_info = prev.get(point_string, {}).get(name) if prev_info is None: # No entry for this task or family before, rebuild tree. should_rebuild_tree = True if prev_info != new_info: # Different info: add it to the list of to-be-updated-ids. if is_fam and name == "root": name = point_string update_row_ids.append((point_string, name, is_fam)) if not is_fam and name in self.ancestors: # Calculate the family nesting for tasks. families = list(self.ancestors[name]) families.sort(lambda x, y: (y in self.ancestors[x]) - (x in self.ancestors[y])) if "root" in families: families.remove("root") if name in families: families.remove(name) if not self.should_group_families: families = [] named_path = families + [name] id_named_paths.setdefault(point_string, {}) id_named_paths[point_string][name] = named_path prev_named_path = self._prev_id_named_paths.get( point_string, {}).get(name) if prev_named_path != named_path: # New task or location for the task, rebuild tree. should_rebuild_tree = True if task_row_ids_left: # Some previous task ids need deleting, so rebuild the tree. should_rebuild_tree = True for id_ in tetc_cached_ids_left: # These ids were not present in the summary - so clear them. self._id_tetc_cache.pop(id_) # Cache the current row point-string and names. row_id_iters_left = {} self.ttreestore.foreach(self._cache_row_id_iters, row_id_iters_left) point_strings = new_data.keys() point_strings.sort() # This basic sort is not always desirable. # Store a column index list for use with the 'TreeModel.set' method. columns = range(self.ttreestore.get_n_columns()) if should_rebuild_tree: # Carefully synchronise the tree with new information. # For each id, calculate the new path and add or replace that path # in the self.ttreestore. for i, point_string in enumerate(point_strings): try: p_data = new_fam_data[point_string]["root"] except KeyError: p_data = [None] * 11 p_path = (i,) p_row_id = (point_string, point_string) p_data = list(p_row_id) + p_data p_iter = self._update_model( self.ttreestore, columns, p_path, p_row_id, p_data, row_id_iters_left) task_named_paths = id_named_paths.get( point_string, {}).values() # Sorting here every time the treeview is updated makes # definition sort order the default "unsorted" order # (any column-click sorting is done on top of this). if self.cfg.use_defn_order and self.updater.ns_defn_order: task_named_paths.sort( key=lambda x: map( self.updater.dict_ns_defn_order.get, x)) else: task_named_paths.sort() family_num_children = {} # Store how many sub-paths are here. family_paths = {point_string: p_path} family_iters = {} for named_path in task_named_paths: # The families within a cycle point leading to a task. # For a task foo_bar in family FOOBAR in family FOO, it # would read ["FOO", "FOOBAR", "foo_bar"] in grouped mode # and simply ["foo_bar"] in non-grouped mode. name = named_path[-1] state = new_data[point_string][name][0] self._update_path_info(p_iter, state, name) f_iter = p_iter f_path = p_path fam = point_string for i, fam in enumerate(named_path[:-1]): # Construct family nesting for this task. if fam in family_iters: # Family already in tree f_iter = family_iters[fam] f_path = family_paths[fam] else: # Add family to tree try: f_data = new_fam_data[point_string][fam] except KeyError: f_data = [None] * 7 if i > 0: parent_fam = named_path[i - 1] else: # point_string is the implicit parent here. parent_fam = point_string family_num_children.setdefault(parent_fam, 0) family_num_children[parent_fam] += 1 f_row_id = (point_string, fam) f_data = list(f_row_id) + f_data # New path is parent_path + (siblings + 1). f_path = tuple( list(family_paths[parent_fam]) + [family_num_children[parent_fam] - 1]) f_iter = self._update_model( self.ttreestore, columns, f_path, f_row_id, f_data, row_id_iters_left) family_iters[fam] = f_iter family_paths[fam] = f_path self._update_path_info(f_iter, state, name) # Add task to tree using the family path we just found. parent_fam = fam family_num_children.setdefault(parent_fam, 0) family_num_children[parent_fam] += 1 t_path = tuple( list(f_path) + [family_num_children[parent_fam] - 1]) t_row_id = (point_string, name) t_data = list(t_row_id) + new_data[point_string][name] self._update_model( self.ttreestore, columns, t_path, t_row_id, t_data, row_id_iters_left) # Adding and updating finished - now we need to delete left overs. delete_items = row_id_iters_left.items() # Sort reversed by path, to get children before parents. delete_items.sort(key=lambda x: x[1][1], reverse=True) if delete_items: # Although we've cached the iters in row_id_iters_left, # they can't be relied upon to give sensible addresses. # We have to re-cache the iters for each task and family. row_id_iters = {} self.ttreestore.foreach( self._cache_row_id_iters, row_id_iters) for delete_row_id, _ in delete_items: real_location = row_id_iters.get(delete_row_id) if real_location is None: continue delete_iter = real_location[0] if self.ttreestore.iter_is_valid(delete_iter): self.ttreestore.remove(delete_iter) else: # not should_rebuild_tree # Update the tree in place - no row has been added or deleted. # Our row_id_iters_left cache is still valid. for point_string, name, is_fam in sorted(update_row_ids): try: if is_fam and name == point_string: data = new_fam_data[point_string]["root"] elif is_fam: data = new_fam_data[point_string][name] else: data = new_data[point_string][name] iter_ = row_id_iters_left[(point_string, name)][0] except KeyError: if not is_fam: raise # Families are not always shown, so this is OK. continue set_data = [point_string, name] + data set_args = itertools.chain(*zip(columns, set_data)) self.ttreestore.set(iter_, *set_args) if self.autoexpand: autoexpand_me = self._get_autoexpand_rows() for row_id in list(autoexpand_me): if row_id in expand_me: # User expanded row also meets auto-expand criteria. autoexpand_me.remove(row_id) expand_me += autoexpand_me self._last_autoexpand_me = autoexpand_me if model is None: return # This re-evaluates the filtering for every row in the model. # This could be more targeted. model.get_model().refilter() model.sort_column_changed() # Expand all the rows that were user-expanded or need auto-expansion. model.foreach(self._expand_row, expand_me) self._prev_id_named_paths = id_named_paths self._prev_data = new_data self._prev_fam_data = new_fam_data return False
def update(self): if self.god is None: gobject.idle_add( self.connection_lost ) return False try: new_err_content, new_err_size = self.log.get_err_content( prev_size=self.err_log_size, max_lines=self._err_num_log_lines) except (AttributeError, Pyro.errors.NamingError): # TODO: post-backwards compatibility concerns, remove this handling. new_err_content = "" new_err_size = self.err_log_size except Pyro.errors.ProtocolError: gobject.idle_add( self.connection_lost ) return False err_log_changed = (new_err_size != self.err_log_size) if err_log_changed: self.err_log_lines += new_err_content.splitlines() self.err_log_lines = self.err_log_lines[-self._err_num_log_lines:] self.err_log_size = new_err_size update_summaries = False try: summary_update_time = self.god.get_summary_update_time() if (summary_update_time is None or self._summary_update_time is None or summary_update_time != self._summary_update_time): self._summary_update_time = summary_update_time update_summaries = True except AttributeError as e: # TODO: post-backwards compatibility concerns, remove this handling. # Force an update for daemons using the old API. update_summaries = True except (Pyro.errors.ProtocolError, Pyro.errors.NamingError): gobject.idle_add( self.connection_lost ) return False if update_summaries: try: [glbl, states, fam_states] = self.god.get_state_summary() self._retrieve_hierarchy_info() # may change on reload except (Pyro.errors.ProtocolError, Pyro.errors.NamingError): gobject.idle_add( self.connection_lost ) return False if not glbl: self.task_list = [] return False if glbl['stopping']: self.status = 'stopping' elif glbl['paused']: self.status = 'held' elif glbl['will_pause_at']: self.status = 'hold at ' + glbl[ 'will_pause_at' ] elif glbl['will_stop_at']: self.status = 'running to ' + glbl[ 'will_stop_at' ] else: self.status = 'running' self.mode = glbl['run_mode'] if self.cfg.use_defn_order and 'namespace definition order' in glbl: # (protect for compat with old suite daemons) nsdo = glbl['namespace definition order'] if self.ns_defn_order != nsdo: self.ns_defn_order = nsdo self.dict_ns_defn_order = dict(zip(nsdo, range(0,len(nsdo)))) try: self.dt = get_time_string_from_unix_time(glbl['last_updated']) except (TypeError, ValueError): # Older suite... self.dt = glbl['last_updated'].isoformat() self.global_summary = glbl if self.restricted_display: states = self.filter_for_restricted_display(states) self.full_state_summary = states self.full_fam_state_summary = fam_states self.refilter() if update_summaries or err_log_changed: return True return False
def delay_timeout_as_str(self): """Return a string in the form "delay (after timeout)".""" return r"%s (after %s)" % (get_seconds_as_interval_string( self.delay), get_time_string_from_unix_time(self.timeout))
def formatTime(self, record, datefmt=None): """Formats the time as an iso8601 datetime.""" return get_time_string_from_unix_time(record.created)
def timeout_as_str(self): """Return the timeout as an ISO8601 date-time string.""" return get_time_string_from_unix_time(self.timeout)
def formatTime(self, record, datefmt=None): return get_time_string_from_unix_time(record.created)
def formatTime(self, record, datefmt=None): """Formats the time as an iso8601 datetime.""" return get_time_string_from_unix_time(record.created)
def reconnect(self): """Try to reconnect to the suite daemon.""" if cylc.flags.debug: print >> sys.stderr, " reconnection...", # Reset comms clients. self.suite_log_client.reset() self.state_summary_client.reset() self.suite_info_client.reset() self.suite_command_client.reset() try: self.daemon_version = self.suite_info_client.get_info( 'get_cylc_version') except (ConnectionError) as exc: # Failed to (re)connect # Suite not running, starting up or just stopped. if cylc.flags.debug: traceback.print_exc() # Use info bar to display stop summary if available. # Otherwise, just display the reconnect count down. if self.cfg.suite and self.stop_summary is None: stop_summary = get_stop_state_summary( cat_state(self.cfg.suite, self.cfg.host, self.cfg.owner)) self.last_update_time = time() if stop_summary != self.stop_summary: self.stop_summary = stop_summary self.status = SUITE_STATUS_STOPPED gobject.idle_add( self.info_bar.set_stop_summary, stop_summary) try: update_time_str = get_time_string_from_unix_time( self.stop_summary[0]["last_updated"]) except (AttributeError, IndexError, KeyError, TypeError): update_time_str = None gobject.idle_add( self.info_bar.set_update_time, update_time_str, self.info_bar.DISCONNECTED_TEXT) return except ConnectionDeniedError as exc: if cylc.flags.debug: traceback.print_exc() if not self.connect_fail_warned: self.connect_fail_warned = True gobject.idle_add( self.warn, "ERROR: %s\n\nIncorrect suite passphrase?" % exc) return except Exception as exc: if cylc.flags.debug: traceback.print_exc() if not self.connect_fail_warned: self.connect_fail_warned = True gobject.idle_add(self.warn, str(exc)) return gobject.idle_add( self.app_window.set_title, "%s - %s:%s" % ( self.cfg.suite, self.suite_info_client.host, self.suite_info_client.port)) if cylc.flags.debug: print >> sys.stderr, "succeeded" # Connected. self.connected = True # This status will be very transient: self.set_status(SUITE_STATUS_CONNECTED) self.connect_fail_warned = False self.connect_schd.stop() if cylc.flags.debug: print >> sys.stderr, ( "succeeded: daemon v %s" % self.daemon_version) if (self.daemon_version != CYLC_VERSION and not self.version_mismatch_warned): # (warn only once - reconnect() will be called multiple times # during initialisation of daemons at <= 6.4.0 (for which the state # summary object is not connected until all tasks are loaded). gobject.idle_add( self.warn, "Warning: cylc version mismatch!\n\n" + "Suite running with %r.\n" % self.daemon_version + "gcylc at %r.\n" % CYLC_VERSION) self.version_mismatch_warned = True self.stop_summary = None self.err_log_lines = [] self.err_log_size = 0 self.last_update_time = time()
def reconnect(self): """Try to reconnect to the suite daemon.""" if cylc.flags.debug: print >> sys.stderr, " reconnection...", # Reset comms clients. self.suite_log_client.reset() self.state_summary_client.reset() self.suite_info_client.reset() self.suite_command_client.reset() try: self.daemon_version = self.suite_info_client.get_info( 'get_cylc_version') except ConnectionDeniedError as exc: if cylc.flags.debug: traceback.print_exc() if not self.connect_fail_warned: self.connect_fail_warned = True gobject.idle_add( self.warn, "ERROR: %s\n\nIncorrect suite passphrase?" % exc) return except ConnectionError as exc: # Failed to (re)connect # Suite not running, starting up or just stopped. if cylc.flags.debug: traceback.print_exc() # Use info bar to display stop summary if available. # Otherwise, just display the reconnect count down. if self.cfg.suite and self.stop_summary is None: stop_summary = get_stop_state_summary( cat_state(self.cfg.suite, self.cfg.host, self.cfg.owner)) self.last_update_time = time() if stop_summary != self.stop_summary: self.stop_summary = stop_summary self.status = SUITE_STATUS_STOPPED gobject.idle_add( self.info_bar.set_stop_summary, stop_summary) try: update_time_str = get_time_string_from_unix_time( self.stop_summary[0]["last_updated"]) except (AttributeError, IndexError, KeyError, TypeError): update_time_str = None gobject.idle_add( self.info_bar.set_update_time, update_time_str, self.info_bar.DISCONNECTED_TEXT) return except Exception as exc: if cylc.flags.debug: traceback.print_exc() if not self.connect_fail_warned: self.connect_fail_warned = True gobject.idle_add(self.warn, str(exc)) return gobject.idle_add( self.app_window.set_title, "%s - %s:%s" % ( self.cfg.suite, self.suite_info_client.host, self.suite_info_client.port)) if cylc.flags.debug: print >> sys.stderr, "succeeded" # Connected. self.connected = True # This status will be very transient: self.set_status(SUITE_STATUS_CONNECTED) self.connect_fail_warned = False self.connect_schd.stop() if cylc.flags.debug: print >> sys.stderr, ( "succeeded: daemon v %s" % self.daemon_version) if (self.daemon_version != CYLC_VERSION and not self.version_mismatch_warned): # (warn only once - reconnect() will be called multiple times # during initialisation of daemons at <= 6.4.0 (for which the state # summary object is not connected until all tasks are loaded). gobject.idle_add( self.warn, "Warning: cylc version mismatch!\n\n" + "Suite running with %r.\n" % self.daemon_version + "gcylc at %r.\n" % CYLC_VERSION) self.version_mismatch_warned = True self.stop_summary = None self.err_log_lines = [] self.err_log_size = 0 self.last_update_time = time()
def timeout_as_str(self): """Return the timeout as an ISO8601 date-time string.""" return get_time_string_from_unix_time(self.timeout)
def delay_timeout_as_str(self): """Return a string in the form "delay (after timeout)".""" return r"%s (after %s)" % ( get_seconds_as_interval_string(self.delay), get_time_string_from_unix_time(self.timeout))