def shorten_tabs(self): """Try to shorten tab labels by filtering out common substrings. Approach: Find longest common substring and replace that with ellipses in the name. Also, find longest common *prefix* and filter that out as well. Since tab titles start with the test name, and several tests are commonly loaded as well, this double substring search helps cut off the (common) test name in the case where the longest substring is in the middle of the tab name.""" titles = [] long_titles = [] for i in range(self.viewArea.count()): titles.append(self.viewArea.widget(i).title) long_titles.append(self.viewArea.widget(i).long_title) substr = util.long_substr(titles) prefix = util.long_substr(titles, prefix_only=True) for i, t in enumerate(titles): if len(substr) > 0: text = t.replace(substr, "...") if len(prefix) > 0 and prefix != substr: text = text.replace(prefix, "...").replace("......", "...") if len(substr) == 0 or text == "...": text = t self.viewArea.setTabText(i, text) self.viewArea.setTabToolTip(i, long_titles[i])
def shorten_tabs(self): """Try to shorten tab labels by filtering out common substrings. Approach: Find longest common substring and replace that with ellipses in the name. Also, find longest common *prefix* and filter that out as well. Since tab titles start with the test name, and several tests are commonly loaded as well, this double substring search helps cut off the (common) test name in the case where the longest substring is in the middle of the tab name.""" titles = [] long_titles = [] indexes = [] for i in range(self.viewArea.count()): if self.viewArea.widget(i).title == ResultWidget.default_title: continue titles.append(self.viewArea.widget(i).title) long_titles.append(self.viewArea.widget(i).long_title) indexes.append(i) substr = util.long_substr(titles) prefix = util.long_substr(titles, prefix_only=True) for i,t,lt in zip(indexes,titles,long_titles): if len(substr) > 0: text = t.replace(substr, "...") if len(prefix) > 0 and prefix != substr: text = text.replace(prefix, "...").replace("......", "...") if len(substr) == 0 or text == "...": text = t self.viewArea.setTabText(i, text) self.viewArea.setTabToolTip(i, lt)
def _filter_labels(self, labels): for s,d in self.replace_legend.items(): labels = [l.replace(s,d) for l in labels] for r in self.filter_regexp: labels = [re.sub(r, "", l) for l in labels] if self.filter_legend and labels: substr = long_substr(labels) if len(substr) > 3 and substr != " - ": labels = [l.replace(substr, '') for l in labels] prefix = long_substr(labels, prefix_only=True) if prefix and len(prefix) < len(labels[0]): labels = [l.replace(prefix, '') for l in labels] return labels
def combine(self, results, config): """Combines several result sets into one box plot by grouping them on unique data file name parts and then combining each group into a single data set.""" self.config = config # Group the result sets into the groups that will appear as new data # sets. This is done on the file name, by first removing the file # extension and the longest common prefix from all the loaded file # names, then removing the first word boundary-delimited sequence of # digits. # # The idea is that the data files will be named by a common prefix, with # the distinguishing attribute (for instance configured qdisc) at the # end, followed by a number signifying test iteration. So for instance # given the filenames: # # rrul-fq_codel-01.flent.gz # rrul-fq_codel-02.flent.gz # rrul-fq_codel-03.flent.gz # rrul-pfifo_fast-01.flent.gz # rrul-pfifo_fast-02.flent.gz # rrul-pfifo_fast-03.flent.gz # # two new data sets will be created ('fq_codel' and 'pfifo_fast'), each # with three data points created from each of the data files. The # function used to map the data points of each result set into a single # data point is specified in the test config, and can be one of: # # mean, median, min, max : resp value computed from all valid data points # span: max()-min() from all data points # mean_span: mean of all data points' difference from the min value # mean_zero: mean value with missing data points interpreted as 0 rather # than being filtered out groups = OrderedDict() new_results, regexps, names = [], [], [] filenames = [r.meta('DATA_FILENAME').replace(r.SUFFIX, '') for r in results] for r in self.filter_regexps: regexps.append(re.compile(r)) if self.filter_serial: regexps.append(self.serial_regex) if self.filter_prefix: prefix = long_substr(filenames, prefix_only=True) names = [n.replace(prefix, "", 1) for n in filenames] else: names = filenames for i, n in enumerate(names): for r in regexps: n = r.sub("", n, count=1) if n in groups: groups[n].append(results[i]) else: groups[n] = [results[i]] self.orig_series = [s for s in config['series'] if not s['data'] in self.filter_series] self.orig_name = results[0].meta('NAME') # Do the main combine - group() is defined by subclasses. new_results = self.group(groups, config) # We've already been applying the cutoff value on combine, make sure the # plotting functions don't do that also. config['cutoff'] = None return new_results
def combine(self, results, config): """Combines several result sets into one box plot by grouping them on unique data file name parts and then combining each group into a single data set.""" self.config = config # Group the result sets into the groups that will appear as new data # sets. This is done on the file name, by first removing the file # extension and the longest common prefix from all the loaded file # names, then removing the first word boundary-delimited sequence of # digits. # # The idea is that the data files will be named by a common prefix, with # the distinguishing attribute (for instance configured qdisc) at the # end, followed by a number signifying test iteration. So for instance # given the filenames: # # rrul-fq_codel-01.flent.gz # rrul-fq_codel-02.flent.gz # rrul-fq_codel-03.flent.gz # rrul-pfifo_fast-01.flent.gz # rrul-pfifo_fast-02.flent.gz # rrul-pfifo_fast-03.flent.gz # # two new data sets will be created ('fq_codel' and 'pfifo_fast'), each # with three data points created from each of the data files. The # function used to map the data points of each result set into a single # data point is specified in the test config, and can be one of: # # mean, median, min, max : resp value computed from all valid data points # span: max()-min() from all data points # mean_span: mean of all data points' difference from the min value # mean_zero: mean value with missing data points interpreted as 0 rather # than being filtered out groups = OrderedDict() new_results, regexps, names = [], [], [] filenames = [ r.meta('DATA_FILENAME').replace(r.SUFFIX, '') for r in results ] for r in self.filter_regexps: regexps.append(re.compile(r)) if self.filter_serial: regexps.append(self.serial_regex) if self.filter_prefix: prefix = long_substr(filenames, prefix_only=True) names = [n.replace(prefix, "", 1) for n in filenames] else: names = filenames for i, n in enumerate(names): for r in regexps: n = r.sub("", n, count=1) if n in groups: groups[n].append(results[i]) else: groups[n] = [results[i]] self.orig_series = config['series'] self.orig_name = results[0].meta('NAME') # Do the main combine - group() is defined by subclasses. new_results = self.group(groups, config) # We've already been applying the cutoff value on combine, make sure the # plotting functions don't do that also. config['cutoff'] = None return new_results