예제 #1
0
 def construct_summary(self, vals, testname):
     if testname.startswith('raptor-v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('raptor-kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('raptor-jetstream'):
         return self.benchmark_score(vals)
     elif testname.startswith('raptor-speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('raptor-stylebench'):
         return self.stylebench_score(vals)
     elif testname.startswith('raptor-sunspider'):
         return self.sunspider_score(vals)
     elif testname.startswith('raptor-unity-webgl'):
         return self.unity_webgl_score(vals)
     elif testname.startswith('raptor-webaudio'):
         return self.webaudio_score(vals)
     elif testname.startswith('raptor-assorted-dom'):
         return self.assorted_dom_score(vals)
     elif testname.startswith('raptor-wasm-misc'):
         return self.wasm_misc_score(vals)
     elif testname.startswith('raptor-wasm-godot'):
         return self.wasm_godot_score(vals)
     elif testname.startswith('raptor-youtube-playback'):
         return self.youtube_playback_performance_score(vals)
     elif testname.startswith('supporting_data'):
         return self.supporting_data_total(vals)
     elif len(vals) > 1:
         return round(filters.geometric_mean([i for i, j in vals]), 2)
     else:
         return round(filters.mean([i for i, j in vals]), 2)
예제 #2
0
    def parseSunspiderOutput(self, test):
        _subtests = {}
        data = test.measurements['sunspider']
        for page_cycle in data:
            for sub, replicates in page_cycle[0].iteritems():
                # for each pagecycle, build a list of subtests and append all related replicates
                if sub not in _subtests.keys():
                    # subtest not added yet, first pagecycle, so add new one
                    _subtests[sub] = {
                        'unit': test.subtest_unit,
                        'alertThreshold': float(test.alert_threshold),
                        'lowerIsBetter': test.subtest_lower_is_better,
                        'name': sub,
                        'replicates': []
                    }
                _subtests[sub]['replicates'].extend(
                    [round(x, 3) for x in replicates])

        subtests = []
        vals = []

        names = _subtests.keys()
        names.sort(reverse=True)
        for name in names:
            _subtests[name]['value'] = filters.mean(
                _subtests[name]['replicates'])
            subtests.append(_subtests[name])

            vals.append([_subtests[name]['value'], name])

        return subtests, vals
예제 #3
0
 def youtube_playback_performance_score(cls, val_list):
     """Calculate percentage of failed tests."""
     results = [i for i, j in val_list]
     return round(filters.mean(results), 2)
예제 #4
0
 def wasm_godot_score(cls, val_list):
     """
     wasm_godot_score: first-interactive mean
     """
     results = [i for i, j in val_list if j == 'first-interactive']
     return filters.mean(results)
예제 #5
0
 def wasm_misc_score(cls, val_list):
     """
     wasm_misc_score: self reported as '__total__'
     """
     results = [i for i, j in val_list if j == '__total__']
     return filters.mean(results)
예제 #6
0
 def unity_webgl_score(cls, val_list):
     """
     unity_webgl_score: self reported as 'Geometric Mean'
     """
     results = [i for i, j in val_list if j == 'Geometric Mean']
     return filters.mean(results)
예제 #7
0
 def benchmark_score(cls, val_list):
     """
     benchmark_score: ares6/jetstream self reported as 'geomean'
     """
     results = [i for i, j in val_list if j == 'geomean']
     return filters.mean(results)