Ejemplo n.º 1
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Ejemplo n.º 2
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Ejemplo n.º 3
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('stylebench'):
         return self.stylebench_score(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Ejemplo n.º 4
0
    def __call__(self):
        suites = []
        test_results = {
            'framework': {
                'name': self.results.results[0].framework,
            },
            'suites': suites,
        }

        for test in self.results.results:
            # serialize test results
            tsresult = None
            if not test.using_xperf:
                subtests = []
                suite = {
                    'name': test.name(),
                    'subtests': subtests,
                }

                if self.results.extra_options:
                    suite['extraOptions'] = self.results.extra_options

                suites.append(suite)
                vals = []
                replicates = {}

                # TODO: counters!!!! we don't have any, but they suffer the
                # same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            page = test.name()
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{
                                    'index': 0,
                                    'page': test.name(),
                                    'runs': val
                                }]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        replicates.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(suite['name'],
                                      test.test_config['filters'])
                    vals.extend([[i['value'], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            # no real subtests
                            page = test.name()
                        subtest = {
                            'name': page,
                            'value': val['filtered'],
                            'replicates': replicates[page],
                        }
                        subtests.append(subtest)
                        if test.test_config.get('lower_is_better') is not None:
                            subtest['lowerIsBetter'] = \
                                test.test_config['lower_is_better']
                        if test.test_config.get('alert_threshold') is not None:
                            subtest['alertThreshold'] = \
                                test.test_config['alert_threshold']
                        if test.test_config.get('unit'):
                            subtest['unit'] = test.test_config['unit']

                # if there is more than one subtest, calculate a summary result
                if len(subtests) > 1:
                    suite['value'] = self.construct_results(
                        vals, testname=test.name())
                if test.test_config.get('lower_is_better') is not None:
                    suite['lowerIsBetter'] = \
                        test.test_config['lower_is_better']
                if test.test_config.get('alert_threshold') is not None:
                    suite['alertThreshold'] = \
                        test.test_config['alert_threshold']

            # counters results_aux data
            counter_subtests = []
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    # responsiveness has it's own metric, not the mean
                    # TODO: consider doing this for all counters
                    if 'responsiveness' is name:
                        subtest = {
                            'name': name,
                            'value': filter.responsiveness_Metric(vals)
                        }
                        counter_subtests.append(subtest)
                        continue

                    subtest = {
                        'name': name,
                        'value': 0.0,
                    }
                    counter_subtests.append(subtest)

                    if test.using_xperf:
                        if len(vals) > 0:
                            subtest['value'] = vals[0]
                    else:
                        # calculate mean value
                        if len(vals) > 0:
                            varray = [float(v) for v in vals]
                            subtest['value'] = filter.mean(varray)
            if counter_subtests:
                suites.append({
                    'name': test.name(),
                    'subtests': counter_subtests
                })
        return test_results
Ejemplo n.º 5
0
    def __call__(self):
        suites = []
        test_results = {
            'framework': {
                'name': self.results.results[0].framework,
            },
            'suites': suites,
        }

        for test in self.results.results:
            # serialize test results
            tsresult = None
            if not test.using_xperf:
                subtests = []
                suite = {
                    'name': test.name(),
                    'subtests': subtests,
                }

                if self.results.extra_options:
                    suite['extraOptions'] = self.results.extra_options

                suites.append(suite)
                vals = []
                replicates = {}

                # TODO: counters!!!! we don't have any, but they suffer the
                # same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            page = test.name()
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{'index': 0, 'page': test.name(),
                                              'runs': val}]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        replicates.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(suite['name'],
                                      test.test_config['filters'])
                    vals.extend([[i['value'], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            # no real subtests
                            page = test.name()
                        subtest = {
                            'name': page,
                            'value': val['filtered'],
                            'replicates': replicates[page],
                        }
                        subtests.append(subtest)
                        if test.test_config.get('lower_is_better') is not None:
                            subtest['lowerIsBetter'] = \
                                test.test_config['lower_is_better']
                        if test.test_config.get('alert_threshold') is not None:
                            subtest['alertThreshold'] = \
                                test.test_config['alert_threshold']
                        if test.test_config.get('unit'):
                            subtest['unit'] = test.test_config['unit']

                # if there is more than one subtest, calculate a summary result
                if len(subtests) > 1:
                    suite['value'] = self.construct_results(
                        vals, testname=test.name())
                if test.test_config.get('lower_is_better') is not None:
                    suite['lowerIsBetter'] = \
                        test.test_config['lower_is_better']
                if test.test_config.get('alert_threshold') is not None:
                    suite['alertThreshold'] = \
                        test.test_config['alert_threshold']

            # counters results_aux data
            counter_subtests = []
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    # responsiveness has it's own metric, not the mean
                    # TODO: consider doing this for all counters
                    if 'responsiveness' is name:
                        subtest = {
                            'name': name,
                            'value': filter.responsiveness_Metric(vals)
                        }
                        counter_subtests.append(subtest)
                        continue

                    subtest = {
                        'name': name,
                        'value': 0.0,
                    }
                    counter_subtests.append(subtest)

                    if test.using_xperf:
                        if len(vals) > 0:
                            subtest['value'] = vals[0]
                    else:
                        # calculate mean value
                        if len(vals) > 0:
                            varray = [float(v) for v in vals]
                            subtest['value'] = filter.mean(varray)
            if counter_subtests:
                suites.append({'name': test.name(),
                               'subtests': counter_subtests})
        return test_results