示例#1
0
 def analyze_1_test(self, test, antagonists):
     self.passthru.append('test=%s' % test)
     metric = self.cgiform.getvalue('metric', '')
     if metric:
         self.passthru.append('metric=%s' % metric)
     else:
         metric = perf.benchmark_main_metric(test)
         assert metric, "no default metric for test %s" % test
     self.kernels, self.test_tag = self.jobs_selector(
         test, self.job_table, self.kernel_dates)
     self.collect_all_testruns(self.kernels, test + self.test_tag)
     if not self.platforms_filter and (metric == '*'
                                       or antagonists is not None):
         # choose default platform
         self.platforms_filter = all_tested_platforms(self.test_runs)[0:1]
         self.passthru.append('platforms=%s' %
                              ','.join(self.platforms_filter))
     if antagonists is not None:
         antagonists = antagonists.split(',')
         if len(antagonists) == 1 and antagonists != ['*']:
             self.relative = False
         self.analyze_twoway_antagonists_1_test_1_platform(
             test, metric, self.platforms_filter[0], antagonists)
     elif metric == '*':
         platform = self.platforms_filter[0]
         self.analyze_all_metrics_1_platform(test, platform)
     else:
         self.analyze_1_metric_all_platforms(test, metric)
示例#2
0
    def analyze_variants_all_tests_1_platform(self, platform, vary):
        # generate one graph image for results of all benchmarks
        # on one platform and one kernel, comparing effects of
        # two or more combos of kernel options (test run attributes)
        #   (numa_fake,stale_page,kswapd_merge,sched_idle, etc)
        kernel = self.cgiform.getvalue('kernel', 'some_kernel')
        self.passthru.append('kernel=%s' % kernel)

        # two or more vary_groups, one for each plotted line,
        # each group begins with vary= and ends with next  &
        # each group has comma-separated list of test attribute key=val pairs
        #    eg   vary=keyval1,keyval2&vary=keyval3,keyval4
        vary_groups = [
            dict(pair.split('=', 1) for pair in vary_group.split(','))
            for vary_group in vary
        ]

        test = self.benchmarks[0]  # pick any test in all jobs
        kernels, test_tag = self.jobs_selector(test, self.job_table,
                                               self.kernel_dates)

        linekeys = {}
        plot_data = {}
        baselines = {}
        for i, vary_group in enumerate(vary_groups):
            group_attributes = self.test_attributes.copy()
            group_attributes.update(vary_group)
            linekey = ','.join('%s=%s' % (attr, vary_group[attr])
                               for attr in vary_group)
            linekeys[i] = linekey
            data = {}
            for benchmark in self.benchmarks:
                metric = perf.benchmark_main_metric(benchmark)
                runs = collect_testruns(self.job_table[kernel],
                                        benchmark + test_tag, group_attributes,
                                        self.platforms_filter, 'by_hosts'
                                        in self.toggles, self.no_antag)
                vals = []
                for testrunx in runs[platform]:
                    vals += perf.get_metric_at_point([testrunx], metric)
                if vals:
                    if benchmark not in baselines:
                        baselines[benchmark], stddev = plotgraph.avg_dev(vals)
                    vals = [val / baselines[benchmark] for val in vals]
                    data[benchmark] = vals
            plot_data[i] = data

        title = "%s on %s" % (kernel, platform)
        for attr in self.test_attributes:
            title += ', %s=%s' % (attr, self.test_attributes[attr])
        if 'table' in self.cgiform:
            self.table_for_variants_all_tests(title,
                                              plot_data,
                                              linekeys,
                                              range(len(linekeys)),
                                              filtered_passthru=self.passthru,
                                              test_tag=test_tag)
        else:
            graph_variants_all_tests(title, plot_data, linekeys, self.size,
                                     'dark' in self.toggles)
示例#3
0
    def analyze_variants_all_tests_1_platform(self, platform, vary):
        # generate one graph image for results of all benchmarks
        # on one platform and one kernel, comparing effects of
        # two or more combos of kernel options (test run attributes)
        #   (numa_fake,stale_page,kswapd_merge,sched_idle, etc)
        kernel = self.cgiform.getvalue("kernel", "some_kernel")
        self.passthru.append("kernel=%s" % kernel)

        # two or more vary_groups, one for each plotted line,
        # each group begins with vary= and ends with next  &
        # each group has comma-separated list of test attribute key=val pairs
        #    eg   vary=keyval1,keyval2&vary=keyval3,keyval4
        vary_groups = [dict(pair.split("=", 1) for pair in vary_group.split(",")) for vary_group in vary]

        test = self.benchmarks[0]  # pick any test in all jobs
        kernels, test_tag = self.jobs_selector(test, self.job_table, self.kernel_dates)

        linekeys = {}
        plot_data = {}
        baselines = {}
        for i, vary_group in enumerate(vary_groups):
            group_attributes = self.test_attributes.copy()
            group_attributes.update(vary_group)
            linekey = ",".join("%s=%s" % (attr, vary_group[attr]) for attr in vary_group)
            linekeys[i] = linekey
            data = {}
            for benchmark in self.benchmarks:
                metric = perf.benchmark_main_metric(benchmark)
                runs = collect_testruns(
                    self.job_table[kernel],
                    benchmark + test_tag,
                    group_attributes,
                    self.platforms_filter,
                    "by_hosts" in self.toggles,
                    self.no_antag,
                )
                vals = []
                for testrunx in runs[platform]:
                    vals += perf.get_metric_at_point([testrunx], metric)
                if vals:
                    if benchmark not in baselines:
                        baselines[benchmark], stddev = plotgraph.avg_dev(vals)
                    vals = [val / baselines[benchmark] for val in vals]
                    data[benchmark] = vals
            plot_data[i] = data

        title = "%s on %s" % (kernel, platform)
        for attr in self.test_attributes:
            title += ", %s=%s" % (attr, self.test_attributes[attr])
        if "table" in self.cgiform:
            self.table_for_variants_all_tests(
                title, plot_data, linekeys, range(len(linekeys)), filtered_passthru=self.passthru, test_tag=test_tag
            )
        else:
            graph_variants_all_tests(title, plot_data, linekeys, self.size, "dark" in self.toggles)
示例#4
0
 def analyze_1_test(self, test, antagonists):
     self.passthru.append("test=%s" % test)
     metric = self.cgiform.getvalue("metric", "")
     if metric:
         self.passthru.append("metric=%s" % metric)
     else:
         metric = perf.benchmark_main_metric(test)
         assert metric, "no default metric for test %s" % test
     self.kernels, self.test_tag = self.jobs_selector(test, self.job_table, self.kernel_dates)
     self.collect_all_testruns(self.kernels, test + self.test_tag)
     if not self.platforms_filter and (metric == "*" or antagonists is not None):
         # choose default platform
         self.platforms_filter = all_tested_platforms(self.test_runs)[0:1]
         self.passthru.append("platforms=%s" % ",".join(self.platforms_filter))
     if antagonists is not None:
         antagonists = antagonists.split(",")
         if len(antagonists) == 1 and antagonists != ["*"]:
             self.relative = False
         self.analyze_twoway_antagonists_1_test_1_platform(test, metric, self.platforms_filter[0], antagonists)
     elif metric == "*":
         platform = self.platforms_filter[0]
         self.analyze_all_metrics_1_platform(test, platform)
     else:
         self.analyze_1_metric_all_platforms(test, metric)