Exemplo n.º 1
0
    def parseSunspiderOutput(self, test):
        _subtests = {}
        data = test.measurements['sunspider']
        for page_cycle in data:
            for sub, replicates in page_cycle[0].iteritems():
                # for each pagecycle, build a list of subtests and append all related replicates
                if sub not in _subtests.keys():
                    # subtest not added yet, first pagecycle, so add new one
                    _subtests[sub] = {
                        'unit': test.subtest_unit,
                        'alertThreshold': float(test.alert_threshold),
                        'lowerIsBetter': test.subtest_lower_is_better,
                        'name': sub,
                        'replicates': []
                    }
                _subtests[sub]['replicates'].extend(
                    [round(x, 3) for x in replicates])

        subtests = []
        vals = []

        names = _subtests.keys()
        names.sort(reverse=True)
        for name in names:
            _subtests[name]['value'] = filter.mean(
                _subtests[name]['replicates'])
            subtests.append(_subtests[name])

            vals.append([_subtests[name]['value'], name])

        return subtests, vals
Exemplo n.º 2
0
def parseGraphResultsByDate(data, start, end):
    low = sys.maxint
    high = 0
    count = 0
    runs = data['test_runs']
    vals = []
    dataid = 4  # 3 for average, 4 for geomean
    for run in runs:
        if run[2] >= start and run[2] <= end:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {
        'low': low,
        'high': high,
        'avg': average,
        'geomean': geomean,
        'count': count,
        'data': vals
    }
Exemplo n.º 3
0
def parseGraphResultsByChangeset(data, changeset):
    low = sys.maxint
    high = 0
    count = 0
    runs = data['test_runs']
    vals = []
    dataid = 7  # 3 for average, 7 for geomean
    for run in runs:
        push = run[1]
        cset = push[2]
        if cset == changeset:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {
        'low': low,
        'high': high,
        'avg': average,
        'geomean': geomean,
        'count': count,
        'data': vals
    }
Exemplo n.º 4
0
 def construct_summary(self, vals, testname):
     if testname.startswith('raptor-v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('raptor-kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('raptor-jetstream'):
         return self.benchmark_score(vals)
     elif testname.startswith('raptor-speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('raptor-stylebench'):
         return self.stylebench_score(vals)
     elif testname.startswith('raptor-sunspider'):
         return self.sunspider_score(vals)
     elif testname.startswith('raptor-unity-webgl'):
         return self.unity_webgl_score(vals)
     elif testname.startswith('raptor-webaudio'):
         return self.webaudio_score(vals)
     elif testname.startswith('raptor-assorted-dom'):
         return self.assorted_dom_score(vals)
     elif testname.startswith('raptor-wasm-misc'):
         return self.wasm_misc_score(vals)
     elif len(vals) > 1:
         return round(filter.geometric_mean([i for i, j in vals]), 2)
     else:
         return round(filter.mean([i for i, j in vals]), 2)
Exemplo n.º 5
0
def meanFilter():
    global photoPath
    global meanKernelSize
    Mean = myfilter.mean(photoPath, meanKernelSize)
    if Mean is None:
        return
    ## 显示结果
    plt.plot([])
    plt.imshow(cv2.cvtColor(Mean, cv2.COLOR_BGR2RGB))
    plt.show()
    return
Exemplo n.º 6
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return self.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Exemplo n.º 7
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Exemplo n.º 8
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('stylebench'):
         return self.stylebench_score(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Exemplo n.º 9
0
 def construct_results(self, vals, testname):
     if testname.startswith('raptor-v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('raptor-kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('raptor-jetstream'):
         return self.benchmark_score(vals)
     elif testname.startswith('raptor-speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('raptor-stylebench'):
         return self.stylebench_score(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Exemplo n.º 10
0
def parseGraphResultsByDate(data, start, end):
    low = sys.maxint
    high = 0
    count = 0
    runs = data["test_runs"]
    vals = []
    dataid = 4  # 3 for average, 4 for geomean
    for run in runs:
        if run[2] >= start and run[2] <= end:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {"low": low, "high": high, "avg": average, "geomean": geomean, "count": count, "data": vals}
Exemplo n.º 11
0
def parseGraphResultsByChangeset(data, changeset):
    low = sys.maxint
    high = 0
    count = 0
    runs = data["test_runs"]
    vals = []
    dataid = 7  # 3 for average, 7 for geomean
    for run in runs:
        push = run[1]
        cset = push[2]
        if cset == changeset:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {"low": low, "high": high, "avg": average, "geomean": geomean, "count": count, "data": vals}
Exemplo n.º 12
0
 def ares6_score(cls, val_list):
     """
     ares6_score: reported as 'geomean'
     """
     results = [i for i, j in val_list if j == 'geomean']
     return filter.mean(results)
Exemplo n.º 13
0
    def __call__(self):
        # platform
        machine = self.test_machine()

        # build information
        browser_config = self.results.browser_config

        test_results = []

        for test in self.results.results:
            test_result = {
                'test_machine': {},
                'testrun': {},
                'results': {},
                'talos_counters': {},
                'test_build': {}
            }

            test_result['testrun']['suite'] = test.name()
            test_result['testrun']['options'] = self.run_options(test)
            test_result['testrun']['date'] = self.results.date

            # serialize test results
            results = {}
            tsresult = None
            summary = {"suite": 0, "subtests": {}}
            if not test.using_xperf:
                vals = []

                # TODO: counters!!!! we don't have any, but they suffer the same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            results.setdefault(test.name(), []).extend(val)
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{'index': 0, 'page': test.name(),
                                              'runs': val}]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        else:
                            results.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(test_result['testrun']['suite'],
                                      test.test_config['filters'])
                    vals.extend([[i['filtered'], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            summary['subtests'][test.name()] = val
                        else:
                            summary['subtests'][page] = val


                suite_summary = self.construct_results(vals,
                                                       testname=test.name())
                summary['suite'] = suite_summary
                test_result['summary'] = summary

                for result, values in results.items():
                    test_result['results'][result] = values

            # counters results_aux data
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    if test.using_xperf:
                        test_result['talos_counters'][name] = {"mean": vals[0]}
                    else:
                        # calculate mean and max value
                        varray = []
                        counter_mean = 0
                        counter_max = 0
                        if len(vals) > 0:
                            for v in vals:
                                varray.append(float(v))
                            counter_mean = "%.2f" % filter.mean(varray)
                            counter_max = "%.2f" % max(varray)
                        test_result['talos_counters'][name] = {
                            "mean": counter_mean,
                            "max": counter_max
                        }

            if browser_config['develop'] and not browser_config['sourcestamp']:
                browser_config['sourcestamp'] = ''

            test_result['test_build'] = {
                'version': browser_config['browser_version'],
                'revision': browser_config['sourcestamp'],
                'id': browser_config['buildid'],
                'branch': browser_config['branch_name'],
                'name': browser_config['browser_name']
            }

            test_result['test_machine'] = {
                'platform': machine['platform'],
                'osversion': machine['osversion'],
                'os': machine['os'],
                'name': machine['name']
            }

            test_results.append(test_result)
        return test_results
Exemplo n.º 14
0
    def __call__(self):
        suites = []
        test_results = {"framework": {"name": "talos"}, "suites": suites}

        for test in self.results.results:
            # serialize test results
            tsresult = None
            if not test.using_xperf:
                subtests = []
                suite = {"name": test.name(), "subtests": subtests}
                suites.append(suite)
                vals = []
                replicates = {}

                # TODO: counters!!!! we don't have any, but they suffer the
                # same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == "NULL":
                            page = test.name()
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{"index": 0, "page": test.name(), "runs": val}]
                            else:
                                r = tsresult.results[0]
                                if r["page"] == test.name():
                                    r["runs"].extend(val)
                        replicates.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = result.values(suite["name"], test.test_config["filters"])
                    vals.extend([[i["value"], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == "NULL":
                            # no real subtests
                            page = test.name()
                        subtest = {"name": page, "value": val["filtered"], "replicates": replicates[page]}
                        subtests.append(subtest)
                        if test.test_config.get("lower_is_better") is not None:
                            subtest["lowerIsBetter"] = test.test_config["lower_is_better"]
                        if test.test_config.get("unit"):
                            subtest["unit"] = test.test_config["unit"]

                # if there is more than one subtest, calculate a summary result
                if len(subtests) > 1:
                    suite["value"] = self.construct_results(vals, testname=test.name())
                if test.test_config.get("lower_is_better") is not None:
                    suite["lowerIsBetter"] = test.test_config["lower_is_better"]

            # counters results_aux data
            counter_subtests = []
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if "mainthreadio" in name:
                        continue

                    subtest = {"name": name, "value": 0.0}
                    counter_subtests.append(subtest)

                    if test.using_xperf:
                        subtest["value"] = vals[0]
                    else:
                        # calculate mean value
                        if len(vals) > 0:
                            varray = [float(v) for v in vals]
                            subtest["value"] = filter.mean(varray)
            if counter_subtests:
                suites.append({"name": test.name(), "subtests": counter_subtests})
        return test_results
Exemplo n.º 15
0
 def webaudio_score(cls, val_list):
     """
     webaudio_score: self reported as 'Geometric Mean'
     """
     results = [i for i, j in val_list if j == 'Geometric Mean']
     return filter.mean(results)
Exemplo n.º 16
0
 def benchmark_score(cls, val_list):
     """
     benchmark_score: ares6/jetstream self reported as 'geomean'
     """
     results = [i for i, j in val_list if j == 'geomean']
     return filter.mean(results)
Exemplo n.º 17
0
    def __call__(self):
        suites = []
        test_results = {
            'framework': {
                'name': self.results.results[0].framework,
            },
            'suites': suites,
        }

        for test in self.results.results:
            # serialize test results
            tsresult = None
            if not test.using_xperf:
                subtests = []
                suite = {
                    'name': test.name(),
                    'subtests': subtests,
                }

                if self.results.extra_options:
                    suite['extraOptions'] = self.results.extra_options

                suites.append(suite)
                vals = []
                replicates = {}

                # TODO: counters!!!! we don't have any, but they suffer the
                # same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            page = test.name()
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{
                                    'index': 0,
                                    'page': test.name(),
                                    'runs': val
                                }]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        replicates.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(suite['name'],
                                      test.test_config['filters'])
                    vals.extend([[i['value'], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            # no real subtests
                            page = test.name()
                        subtest = {
                            'name': page,
                            'value': val['filtered'],
                            'replicates': replicates[page],
                        }
                        subtests.append(subtest)
                        if test.test_config.get('lower_is_better') is not None:
                            subtest['lowerIsBetter'] = \
                                test.test_config['lower_is_better']
                        if test.test_config.get('alert_threshold') is not None:
                            subtest['alertThreshold'] = \
                                test.test_config['alert_threshold']
                        if test.test_config.get('unit'):
                            subtest['unit'] = test.test_config['unit']

                # if there is more than one subtest, calculate a summary result
                if len(subtests) > 1:
                    suite['value'] = self.construct_results(
                        vals, testname=test.name())
                if test.test_config.get('lower_is_better') is not None:
                    suite['lowerIsBetter'] = \
                        test.test_config['lower_is_better']
                if test.test_config.get('alert_threshold') is not None:
                    suite['alertThreshold'] = \
                        test.test_config['alert_threshold']

            # counters results_aux data
            counter_subtests = []
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    # responsiveness has it's own metric, not the mean
                    # TODO: consider doing this for all counters
                    if 'responsiveness' is name:
                        subtest = {
                            'name': name,
                            'value': filter.responsiveness_Metric(vals)
                        }
                        counter_subtests.append(subtest)
                        continue

                    subtest = {
                        'name': name,
                        'value': 0.0,
                    }
                    counter_subtests.append(subtest)

                    if test.using_xperf:
                        if len(vals) > 0:
                            subtest['value'] = vals[0]
                    else:
                        # calculate mean value
                        if len(vals) > 0:
                            varray = [float(v) for v in vals]
                            subtest['value'] = filter.mean(varray)
            if counter_subtests:
                suites.append({
                    'name': test.name(),
                    'subtests': counter_subtests
                })
        return test_results
Exemplo n.º 18
0
    def __call__(self):
        # platform
        machine = self.test_machine()

        # build information
        browser_config = self.results.browser_config

        test_results = []

        for test in self.results.results:
            test_result = {
                'test_machine': {},
                'testrun': {},
                'results': {},
                'talos_counters': {},
                'test_build': {}
            }

            test_result['testrun']['suite'] = test.name()
            test_result['testrun']['options'] = self.run_options(test)
            test_result['testrun']['date'] = self.results.date

            # serialize test results
            results = {}
            tsresult = None
            summary = {"suite": 0, "subtests": {}}
            if not test.using_xperf:
                vals = []

                # TODO: counters!!!! we don't have any, but they suffer the same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            results.setdefault(test.name(), []).extend(val)
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{
                                    'index': 0,
                                    'page': test.name(),
                                    'runs': val
                                }]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        else:
                            results.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(test_result['testrun']['suite'],
                                      test.test_config['filters'])
                    vals.extend([[i['filtered'], j]
                                 for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            summary['subtests'][test.name()] = val
                        else:
                            summary['subtests'][page] = val

                suite_summary = self.construct_results(vals,
                                                       testname=test.name())
                summary['suite'] = suite_summary
                test_result['summary'] = summary

                for result, values in results.items():
                    test_result['results'][result] = values

            # counters results_aux data
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    if test.using_xperf:
                        test_result['talos_counters'][name] = {"mean": vals[0]}
                    else:
                        # calculate mean and max value
                        varray = []
                        counter_mean = 0
                        counter_max = 0
                        if len(vals) > 0:
                            for v in vals:
                                varray.append(float(v))
                            counter_mean = "%.2f" % filter.mean(varray)
                            counter_max = "%.2f" % max(varray)
                        test_result['talos_counters'][name] = {
                            "mean": counter_mean,
                            "max": counter_max
                        }

            if browser_config['develop'] and not browser_config['sourcestamp']:
                browser_config['sourcestamp'] = ''

            test_result['test_build'] = {
                'version': browser_config['browser_version'],
                'revision': browser_config['sourcestamp'],
                'id': browser_config['buildid'],
                'branch': browser_config['branch_name'],
                'name': browser_config['browser_name']
            }

            test_result['test_machine'] = {
                'platform': machine['platform'],
                'osversion': machine['osversion'],
                'os': machine['os'],
                'name': machine['name']
            }

            test_results.append(test_result)
        return test_results
Exemplo n.º 19
0
 def wasm_godot_score(cls, val_list):
     """
     wasm_godot_score: first-interactive mean
     """
     results = [i for i, j in val_list if j == 'first-interactive']
     return filter.mean(results)
Exemplo n.º 20
0
    def __call__(self):
        suites = []
        test_results = {
            'framework': {
                'name': self.results.results[0].framework,
            },
            'suites': suites,
        }

        for test in self.results.results:
            # serialize test results
            tsresult = None
            if not test.using_xperf:
                subtests = []
                suite = {
                    'name': test.name(),
                    'subtests': subtests,
                }

                if self.results.extra_options:
                    suite['extraOptions'] = self.results.extra_options

                suites.append(suite)
                vals = []
                replicates = {}

                # TODO: counters!!!! we don't have any, but they suffer the
                # same
                for result in test.results:
                    # XXX this will not work for manifests which list
                    # the same page name twice. It also ignores cycles
                    for page, val in result.raw_values():
                        if page == 'NULL':
                            page = test.name()
                            if tsresult is None:
                                tsresult = r = TalosResults.Results()
                                r.results = [{'index': 0, 'page': test.name(),
                                              'runs': val}]
                            else:
                                r = tsresult.results[0]
                                if r['page'] == test.name():
                                    r['runs'].extend(val)
                        replicates.setdefault(page, []).extend(val)

                tresults = [tsresult] if tsresult else test.results

                for result in tresults:
                    filtered_results = \
                        result.values(suite['name'],
                                      test.test_config['filters'])
                    vals.extend([[i['value'], j] for i, j in filtered_results])
                    for val, page in filtered_results:
                        if page == 'NULL':
                            # no real subtests
                            page = test.name()
                        subtest = {
                            'name': page,
                            'value': val['filtered'],
                            'replicates': replicates[page],
                        }
                        subtests.append(subtest)
                        if test.test_config.get('lower_is_better') is not None:
                            subtest['lowerIsBetter'] = \
                                test.test_config['lower_is_better']
                        if test.test_config.get('alert_threshold') is not None:
                            subtest['alertThreshold'] = \
                                test.test_config['alert_threshold']
                        if test.test_config.get('unit'):
                            subtest['unit'] = test.test_config['unit']

                # if there is more than one subtest, calculate a summary result
                if len(subtests) > 1:
                    suite['value'] = self.construct_results(
                        vals, testname=test.name())
                if test.test_config.get('lower_is_better') is not None:
                    suite['lowerIsBetter'] = \
                        test.test_config['lower_is_better']
                if test.test_config.get('alert_threshold') is not None:
                    suite['alertThreshold'] = \
                        test.test_config['alert_threshold']

            # counters results_aux data
            counter_subtests = []
            for cd in test.all_counter_results:
                for name, vals in cd.items():
                    # We want to add the xperf data as talos_counters
                    # exclude counters whose values are tuples (bad for
                    # graphserver)
                    if len(vals) > 0 and isinstance(vals[0], list):
                        continue

                    # mainthread IO is a list of filenames and accesses, we do
                    # not report this as a counter
                    if 'mainthreadio' in name:
                        continue

                    # responsiveness has it's own metric, not the mean
                    # TODO: consider doing this for all counters
                    if 'responsiveness' is name:
                        subtest = {
                            'name': name,
                            'value': self.responsiveness_Metric(vals)
                        }
                        counter_subtests.append(subtest)
                        continue

                    subtest = {
                        'name': name,
                        'value': 0.0,
                    }
                    counter_subtests.append(subtest)

                    if test.using_xperf:
                        if len(vals) > 0:
                            subtest['value'] = vals[0]
                    else:
                        # calculate mean value
                        if len(vals) > 0:
                            varray = [float(v) for v in vals]
                            subtest['value'] = filter.mean(varray)
            if counter_subtests:
                suites.append({'name': test.name(),
                               'subtests': counter_subtests})
        return test_results
Exemplo n.º 21
0
 def wasm_misc_score(cls, val_list):
     """
     wasm_misc_score: self reported as '__total__'
     """
     results = [i for i, j in val_list if j == '__total__']
     return filter.mean(results)