Example #1
0
 def v8_Metric(cls, val_list):
     """v8 benchmark score"""
     reference = {'Crypto': 266181.,
                  'DeltaBlue': 66118.,
                  'EarlyBoyer': 666463.,
                  'NavierStokes': 1484000.,
                  'RayTrace': 739989.,
                  'RegExp': 910985.,
                  'Richards': 35302.,
                  'Splay': 81491.
                  }
     tests = [('Crypto', ['Encrypt', 'Decrypt']),
              ('DeltaBlue', ['DeltaBlue']),
              ('EarlyBoyer', ['Earley', 'Boyer']),
              ('NavierStokes', ['NavierStokes']),
              ('RayTrace', ['RayTrace']),
              ('RegExp', ['RegExp']),
              ('Richards', ['Richards']),
              ('Splay', ['Splay'])]
     results = dict([(j, i) for i, j in val_list])
     scores = []
     utils.info("v8 benchmark")
     for test, benchmarks in tests:
         vals = [results[benchmark] for benchmark in benchmarks]
         mean = filter.geometric_mean(vals)
         score = reference[test] / mean
         scores.append(score)
         utils.info(" %s: %s", test, score * 100)
     score =  100 * filter.geometric_mean(scores)
     utils.info("Score: %s", score)
     return score
Example #2
0
 def v8_Metric(cls, val_list):
     """v8 benchmark score"""
     reference = {'Crypto': 266181.,
                  'DeltaBlue': 66118.,
                  'EarlyBoyer': 666463.,
                  'NavierStokes': 1484000.,
                  'RayTrace': 739989.,
                  'RegExp': 910985.,
                  'Richards': 35302.,
                  'Splay': 81491.
                  }
     tests = [('Crypto', ['Encrypt', 'Decrypt']),
              ('DeltaBlue', ['DeltaBlue']),
              ('EarlyBoyer', ['Earley', 'Boyer']),
              ('NavierStokes', ['NavierStokes']),
              ('RayTrace', ['RayTrace']),
              ('RegExp', ['RegExp']),
              ('Richards', ['Richards']),
              ('Splay', ['Splay'])]
     results = dict([(j, i) for i, j in val_list])
     scores = []
     utils.info("v8 benchmark")
     for test, benchmarks in tests:
         vals = [results[benchmark] for benchmark in benchmarks]
         mean = filter.geometric_mean(vals)
         score = reference[test] / mean
         scores.append(score)
         utils.info(" %s: %s", test, score * 100)
     score =  100 * filter.geometric_mean(scores)
     utils.info("Score: %s", score)
     return score
def test_gauss(filename, result_dir):
    """2.3.3 Guassian noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # generate guassian noise
    mean, var = 0, 40
    noisy = add_noise(im, 'gauss', mean=mean, var=var)
    savewith(noisy, 'gauss-%d-%d.png' % (mean, var))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'gauss-arithmetic.png')

    # geometric mean filtering
    result = geometric_mean(noisy, (3, 3))
    savewith(result, 'gauss-geometric.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'gauss-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), -1.5)
    savewith(result, 'gauss-contraharmonic.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'gauss-median.png')
Example #4
0
def parseGraphResultsByChangeset(data, changeset):
    low = sys.maxint
    high = 0
    count = 0
    runs = data['test_runs']
    vals = []
    dataid = 7  # 3 for average, 7 for geomean
    for run in runs:
        push = run[1]
        cset = push[2]
        if cset == changeset:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {
        'low': low,
        'high': high,
        'avg': average,
        'geomean': geomean,
        'count': count,
        'data': vals
    }
def test_gauss(filename, result_dir):
    """2.3.3 Guassian noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # generate guassian noise
    mean, var = 0, 40
    noisy = add_noise(im, 'gauss', mean=mean, var=var)
    savewith(noisy, 'gauss-%d-%d.png' % (mean, var))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'gauss-arithmetic.png')

    # geometric mean filtering
    result = geometric_mean(noisy, (3, 3))
    savewith(result, 'gauss-geometric.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'gauss-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), -1.5)
    savewith(result, 'gauss-contraharmonic.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'gauss-median.png')
Example #6
0
 def construct_summary(self, vals, testname):
     if testname.startswith('raptor-v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('raptor-kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('raptor-jetstream'):
         return self.benchmark_score(vals)
     elif testname.startswith('raptor-speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('raptor-stylebench'):
         return self.stylebench_score(vals)
     elif testname.startswith('raptor-sunspider'):
         return self.sunspider_score(vals)
     elif testname.startswith('raptor-unity-webgl'):
         return self.unity_webgl_score(vals)
     elif testname.startswith('raptor-webaudio'):
         return self.webaudio_score(vals)
     elif testname.startswith('raptor-assorted-dom'):
         return self.assorted_dom_score(vals)
     elif testname.startswith('raptor-wasm-misc'):
         return self.wasm_misc_score(vals)
     elif len(vals) > 1:
         return round(filter.geometric_mean([i for i, j in vals]), 2)
     else:
         return round(filter.mean([i for i, j in vals]), 2)
Example #7
0
def parseGraphResultsByDate(data, start, end):
    low = sys.maxint
    high = 0
    count = 0
    runs = data['test_runs']
    vals = []
    dataid = 4  # 3 for average, 4 for geomean
    for run in runs:
        if run[2] >= start and run[2] <= end:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {
        'low': low,
        'high': high,
        'avg': average,
        'geomean': geomean,
        'count': count,
        'data': vals
    }
Example #8
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return self.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Example #9
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('tcanvasmark'):
         return self.CanvasMark_Metric(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Example #10
0
 def construct_results(self, vals, testname):
     if 'responsiveness' in testname:
         return filter.responsiveness_Metric([val for (val, page) in vals])
     elif testname.startswith('v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('stylebench'):
         return self.stylebench_score(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Example #11
0
    def speedometer_score(cls, val_list):
        """
        speedometer_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888
        """
        correctionFactor = 3
        results = [i for i, j in val_list]
        # speedometer has 16 tests, each of these are made of up 9 subtests
        # and a sum of the 9 values.  We receive 160 values, and want to use
        # the 16 test values, not the sub test values.
        if len(results) != 160:
            raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results))

        results = results[9::10]
        score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
        return score
Example #12
0
 def construct_results(self, vals, testname):
     if testname.startswith('raptor-v8_7'):
         return self.v8_Metric(vals)
     elif testname.startswith('raptor-kraken'):
         return self.JS_Metric(vals)
     elif testname.startswith('raptor-jetstream'):
         return self.benchmark_score(vals)
     elif testname.startswith('raptor-speedometer'):
         return self.speedometer_score(vals)
     elif testname.startswith('raptor-stylebench'):
         return self.stylebench_score(vals)
     elif len(vals) > 1:
         return filter.geometric_mean([i for i, j in vals])
     else:
         return filter.mean([i for i, j in vals])
Example #13
0
    def stylebench_score(cls, val_list):
        """
        stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888
        """
        correctionFactor = 3
        results = [i for i, j in val_list]
        # stylebench has 4 tests, each of these are made of up 12 subtests
        # and a sum of the 12 values.  We receive 52 values, and want to use
        # the 4 test values, not the sub test values.
        if len(results) != 52:
            raise Exception("StyleBench has 52 subtests, found: %s instead" % len(results))

        results = results[12::13]
        score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
        return score
Example #14
0
    def stylebench_score(cls, val_list):
        """
        stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888
        """
        correctionFactor = 3
        results = [i for i, j in val_list]

        # stylebench has 5 tests, each of these are made of up 5 subtests
        #
        #   * Adding classes.
        #   * Removing classes.
        #   * Mutating attributes.
        #   * Adding leaf elements.
        #   * Removing leaf elements.
        #
        # which are made of two subtests each (sync/async) and repeated 5 times
        # each, thus, the list here looks like:
        #
        #   [Test name/Adding classes - 0/ Sync; <x>]
        #   [Test name/Adding classes - 0/ Async; <y>]
        #   [Test name/Adding classes - 0; <x> + <y>]
        #   [Test name/Removing classes - 0/ Sync; <x>]
        #   [Test name/Removing classes - 0/ Async; <y>]
        #   [Test name/Removing classes - 0; <x> + <y>]
        #   ...
        #   [Test name/Adding classes - 1 / Sync; <x>]
        #   [Test name/Adding classes - 1 / Async; <y>]
        #   [Test name/Adding classes - 1 ; <x> + <y>]
        #   ...
        #   [Test name/Removing leaf elements - 4; <x> + <y>]
        #   [Test name; <sum>] <- This is what we want.
        #
        # So, 5 (subtests) *
        #     5 (repetitions) *
        #     3 (entries per repetition (sync/async/sum)) =
        #     75 entries for test before the sum.
        #
        # We receive 76 entries per test, which ads up to 380. We want to use
        # the 5 test entries, not the rest.
        if len(results) != 380:
            raise Exception("StyleBench has 380 entries, found: %s instead" %
                            len(results))

        results = results[75::76]
        score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
        return score
Example #15
0
def parseGraphResultsByDate(data, start, end):
    low = sys.maxint
    high = 0
    count = 0
    runs = data["test_runs"]
    vals = []
    dataid = 4  # 3 for average, 4 for geomean
    for run in runs:
        if run[2] >= start and run[2] <= end:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {"low": low, "high": high, "avg": average, "geomean": geomean, "count": count, "data": vals}
Example #16
0
def parseGraphResultsByChangeset(data, changeset):
    low = sys.maxint
    high = 0
    count = 0
    runs = data["test_runs"]
    vals = []
    dataid = 7  # 3 for average, 7 for geomean
    for run in runs:
        push = run[1]
        cset = push[2]
        if cset == changeset:
            vals.append(run[dataid])
            if run[dataid] < low:
                low = run[dataid]
            if run[dataid] > high:
                high = run[dataid]
            count += 1

    average = 0
    geomean = 0
    if count > 0:
        average = filter.mean(vals)
        geomean = filter.geometric_mean(vals)
    return {"low": low, "high": high, "avg": average, "geomean": geomean, "count": count, "data": vals}
Example #17
0
 def v8_Metric(cls, val_list):
     results = [i for i, j in val_list]
     score = 100 * filter.geometric_mean(results)
     return score
Example #18
0
 def v8_Metric(cls, val_list):
     results = [i for i, j in val_list]
     score = 100 * filter.geometric_mean(results)
     return score
Example #19
0
 def assorted_dom_score(cls, val_list):
     results = [i for i, j in val_list]
     return round(filter.geometric_mean(results), 2)