def stylebench_score(cls, val_list): """ stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888 """ correctionFactor = 3 results = [i for i, j in val_list] # stylebench has 5 tests, each of these are made of up 5 subtests # # * Adding classes. # * Removing classes. # * Mutating attributes. # * Adding leaf elements. # * Removing leaf elements. # # which are made of two subtests each (sync/async) and repeated 5 times # each, thus, the list here looks like: # # [Test name/Adding classes - 0/ Sync; <x>] # [Test name/Adding classes - 0/ Async; <y>] # [Test name/Adding classes - 0; <x> + <y>] # [Test name/Removing classes - 0/ Sync; <x>] # [Test name/Removing classes - 0/ Async; <y>] # [Test name/Removing classes - 0; <x> + <y>] # ... # [Test name/Adding classes - 1 / Sync; <x>] # [Test name/Adding classes - 1 / Async; <y>] # [Test name/Adding classes - 1 ; <x> + <y>] # ... # [Test name/Removing leaf elements - 4; <x> + <y>] # [Test name; <sum>] <- This is what we want. # # So, 5 (subtests) * # 5 (repetitions) * # 3 (entries per repetition (sync/async/sum)) = # 75 entries for test before the sum. # # We receive 76 entries per test, which ads up to 380. We want to use # the 5 test entries, not the rest. if len(results) != 380: raise Exception( "StyleBench requires 380 entries, found: %s instead" % len(results) ) results = results[75::76] # pylint --py3k W1619 score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor return score
def speedometer_score(cls, val_list): """ speedometer_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888 """ correctionFactor = 3 results = [i for i, j in val_list] # speedometer has 16 tests, each of these are made of up 9 subtests # and a sum of the 9 values. We receive 160 values, and want to use # the 16 test values, not the sub test values. if len(results) != 160: raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results)) results = results[9::10] score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor return score
def construct_results(self, vals, testname): if 'responsiveness' in testname: return filter.responsiveness_Metric([val for (val, page) in vals]) elif testname.startswith('v8_7'): return self.v8_Metric(vals) elif testname.startswith('kraken'): return self.JS_Metric(vals) elif testname.startswith('ares6'): return self.benchmark_score(vals) elif testname.startswith('jetstream'): return self.benchmark_score(vals) elif testname.startswith('speedometer'): return self.speedometer_score(vals) elif testname.startswith('stylebench'): return self.stylebench_score(vals) elif len(vals) > 1: return filter.geometric_mean([i for i, j in vals]) else: return filter.mean([i for i, j in vals])
def construct_results(self, vals, testname): if "responsiveness" in testname: return filter.responsiveness_Metric([val for (val, page) in vals]) elif testname.startswith("v8_7"): return self.v8_Metric(vals) elif testname.startswith("kraken"): return self.JS_Metric(vals) elif testname.startswith("ares6"): return self.benchmark_score(vals) elif testname.startswith("jetstream"): return self.benchmark_score(vals) elif testname.startswith("speedometer"): return self.speedometer_score(vals) elif testname.startswith("stylebench"): return self.stylebench_score(vals) elif len(vals) > 1: return filter.geometric_mean([i for i, j in vals]) else: return filter.mean([i for i, j in vals])
def v8_Metric(cls, val_list): results = [i for i, j in val_list] score = 100 * filter.geometric_mean(results) return score