def construct_summary(self, vals, testname): if testname.startswith('raptor-v8_7'): return self.v8_Metric(vals) elif testname.startswith('raptor-kraken'): return self.JS_Metric(vals) elif testname.startswith('raptor-jetstream'): return self.benchmark_score(vals) elif testname.startswith('raptor-speedometer'): return self.speedometer_score(vals) elif testname.startswith('raptor-stylebench'): return self.stylebench_score(vals) elif testname.startswith('raptor-sunspider'): return self.sunspider_score(vals) elif testname.startswith('raptor-unity-webgl'): return self.unity_webgl_score(vals) elif testname.startswith('raptor-webaudio'): return self.webaudio_score(vals) elif testname.startswith('raptor-assorted-dom'): return self.assorted_dom_score(vals) elif testname.startswith('raptor-wasm-misc'): return self.wasm_misc_score(vals) elif testname.startswith('raptor-wasm-godot'): return self.wasm_godot_score(vals) elif testname.startswith('raptor-youtube-playback'): return self.youtube_playback_performance_score(vals) elif testname.startswith('supporting_data'): return self.supporting_data_total(vals) elif len(vals) > 1: return round(filters.geometric_mean([i for i, j in vals]), 2) else: return round(filters.mean([i for i, j in vals]), 2)
def speedometer_score(cls, val_list): """ speedometer_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888 """ correctionFactor = 3 results = [i for i, j in val_list] # speedometer has 16 tests, each of these are made of up 9 subtests # and a sum of the 9 values. We receive 160 values, and want to use # the 16 test values, not the sub test values. if len(results) != 160: raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results)) results = results[9::10] score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor return score
def stylebench_score(cls, val_list): """ stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888 """ correctionFactor = 3 results = [i for i, j in val_list] # stylebench has 5 tests, each of these are made of up 5 subtests # # * Adding classes. # * Removing classes. # * Mutating attributes. # * Adding leaf elements. # * Removing leaf elements. # # which are made of two subtests each (sync/async) and repeated 5 times # each, thus, the list here looks like: # # [Test name/Adding classes - 0/ Sync; <x>] # [Test name/Adding classes - 0/ Async; <y>] # [Test name/Adding classes - 0; <x> + <y>] # [Test name/Removing classes - 0/ Sync; <x>] # [Test name/Removing classes - 0/ Async; <y>] # [Test name/Removing classes - 0; <x> + <y>] # ... # [Test name/Adding classes - 1 / Sync; <x>] # [Test name/Adding classes - 1 / Async; <y>] # [Test name/Adding classes - 1 ; <x> + <y>] # ... # [Test name/Removing leaf elements - 4; <x> + <y>] # [Test name; <sum>] <- This is what we want. # # So, 5 (subtests) * # 5 (repetitions) * # 3 (entries per repetition (sync/async/sum)) = # 75 entries for test before the sum. # # We receive 76 entries per test, which ads up to 380. We want to use # the 5 test entries, not the rest. if len(results) != 380: raise Exception("StyleBench has 380 entries, found: %s instead" % len(results)) results = results[75::76] score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor return score
def v8_Metric(cls, val_list): results = [i for i, j in val_list] score = 100 * filters.geometric_mean(results) return score
def assorted_dom_score(cls, val_list): results = [i for i, j in val_list] return round(filters.geometric_mean(results), 2)