def rules(self, out, benchmarks, bmSuiteArgs):
     result_pattern = r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$" # pylint: disable=line-too-long
     return [
       mx_benchmark.StdOutRule(
         result_pattern,
         {
           "benchmark": "default",
           "vm": "jvmci",
           "config.name": "default",
           "metric.name": "max",
           "metric.value": ("<max>", float),
           "metric.unit": "jops",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "higher",
           "metric.iteration": 0
         }
       ),
       mx_benchmark.StdOutRule(
         result_pattern,
         {
           "benchmark": "default",
           "vm": "jvmci",
           "config.name": "default",
           "metric.name": "critical",
           "metric.value": ("<critical>", float),
           "metric.unit": "jops",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "higher",
           "metric.iteration": 0
         }
       )
     ]
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
       mx_benchmark.StdOutRule(
         r"====== (?P<benchmark>[a-zA-Z0-9_]+) \((?P<benchgroup>[a-zA-Z0-9_]+)\), iteration (?P<iteration>[0-9]+) completed \((?P<value>[0-9]+(.[0-9]*)?) ms\) ======",
         {
           "benchmark": ("<benchmark>", str),
           "vm": "jvmci",
           "config.name": "default",
           "metric.name": "warmup",
           "metric.value": ("<value>", float),
           "metric.unit": "ms",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "lower",
           "metric.iteration": ("<iteration>", int),
         }
       ),
       mx_benchmark.StdOutRule(
         r"====== (?P<benchmark>[a-zA-Z0-9_]+) \((?P<benchgroup>[a-zA-Z0-9_]+)\), final iteration completed \((?P<value>[0-9]+(.[0-9]*)?) ms\) ======",
         {
           "benchmark": ("<benchmark>", str),
           "vm": "jvmci",
           "config.name": "default",
           "metric.name": "final-time",
           "metric.value": ("<value>", float),
           "metric.unit": "ms",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "lower",
           "metric.iteration": 0,
         }
       )
     ]
Esempio n. 3
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     runArgs = self.postprocessRunArgs(benchmarks[0], self.runArgs(bmSuiteArgs))
     if runArgs is None:
         return []
     totalIterations = int(runArgs[runArgs.index("-n") + 1])
     return [
       mx_benchmark.StdOutRule(
         r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
         {
           "benchmark": ("<benchmark>", str),
           "bench-suite": self.benchSuiteName(),
           "vm": "jvmci",
           "config.name": "default",
           "config.vm-flags": self.shorten_vm_flags(self.vmArgs(bmSuiteArgs)),
           "metric.name": "final-time",
           "metric.value": ("<time>", int),
           "metric.unit": "ms",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "lower",
           "metric.iteration": 0
         }
       ),
       mx_benchmark.StdOutRule(
         r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
         {
           "benchmark": ("<benchmark>", str),
           "bench-suite": self.benchSuiteName(),
           "vm": "jvmci",
           "config.name": "default",
           "config.vm-flags": self.shorten_vm_flags(self.vmArgs(bmSuiteArgs)),
           "metric.name": "warmup",
           "metric.value": ("<time>", int),
           "metric.unit": "ms",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "lower",
           "metric.iteration": totalIterations - 1
         }
       ),
       mx_benchmark.StdOutRule(
         r"===== " + re.escape(self.daCapoSuiteTitle()) + " (?P<benchmark>[a-zA-Z0-9_]+) completed warmup [0-9]+ in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long
         {
           "benchmark": ("<benchmark>", str),
           "bench-suite": self.benchSuiteName(),
           "vm": "jvmci",
           "config.name": "default",
           "config.vm-flags": self.shorten_vm_flags(self.vmArgs(bmSuiteArgs)),
           "metric.name": "warmup",
           "metric.value": ("<time>", int),
           "metric.unit": "ms",
           "metric.type": "numeric",
           "metric.score-function": "id",
           "metric.better": "lower",
           "metric.iteration": ("$iteration", int)
         }
       )
     ]
Esempio n. 4
0
    def rules(self, output, benchmarks, bmSuiteArgs):
        rules = []
        if benchmarks and len(benchmarks) == 1:
            # Captures output generated by -XX:+PrintNMethodStatistics
            rules.append(mx_benchmark.StdOutRule(
                r"Statistics for (?P<methods>[0-9]+) bytecoded nmethods for JVMCI:\n total in heap  = (?P<value>[0-9]+)",
                {
                    "benchmark": benchmarks[0],
                    "vm": "jvmci",
                    "metric.name": "code-size",
                    "metric.value": ("<value>", int),
                    "metric.unit": "B",
                    "metric.type": "numeric",
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                })
            )

            # Captures output generated by -XX:+CITime
            rules.append(mx_benchmark.StdOutRule(
                r"C1 {speed: (?P<value>[-+]?\d*\.\d+|\d+) bytes/s;",
                {
                    "benchmark": benchmarks[0],
                    "vm": "jvmci",
                    "metric.name": "baseline-tier-throughput",
                    "metric.value": ("<value>", float),
                    "metric.unit": "B/s",
                    "metric.type": "numeric",
                    "metric.score-function": "id",
                    "metric.better": "higher",
                    "metric.iteration": 0,
                })
            )
            rules.append(mx_benchmark.StdOutRule(
                r"(C2|JVMCI|JVMCI-native) {speed: (?P<value>[-+]?\d*\.\d+|\d+) bytes/s;",
                {
                    "benchmark": benchmarks[0],
                    "vm": "jvmci",
                    "metric.name": "top-tier-throughput",
                    "metric.value": ("<value>", float),
                    "metric.unit": "B/s",
                    "metric.type": "numeric",
                    "metric.score-function": "id",
                    "metric.better": "higher",
                    "metric.iteration": 0,
                })
            )
        return rules
Esempio n. 5
0
 def polybenchRules(self, output, benchmarks, bmSuiteArgs):
     rules = [
         mx_benchmark.StdOutRule(
             r"\[(?P<name>.*)\] iteration ([0-9]*): (?P<value>.*) (?P<unit>.*)",
             {
                 "bench-suite": "csuite",
                 "benchmark": benchmarks[0],
                 "metric.better": "lower",
                 "metric.name": "warmup",
                 "metric.unit": ("<unit>", str),
                 "metric.value": ("<value>", float),
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.iteration": ("$iteration", int),
             }),
         PolybenchExcludeWarmupRule(
             r"\[(?P<name>.*)\] iteration (?P<iteration>[0-9]*): (?P<value>.*) (?P<unit>.*)",
             {
                 "bench-suite": "csuite",
                 "benchmark": benchmarks[0],
                 "metric.better": "lower",
                 "metric.name": "time",
                 "metric.unit": ("<unit>", str),
                 "metric.value": ("<value>", float),
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.iteration": ("<iteration>", int),
             },
             startPattern=r"::: Running :::")
     ]
     return rules
Esempio n. 6
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     super_rules = super(ScalaDaCapoWarmupBenchmarkSuite,
                         self).rules(out, benchmarks, bmSuiteArgs)
     return super_rules + [
         mx_benchmark.StdOutRule(
             r"===== " + re.escape(self.daCapoSuiteTitle()) +
             " (?P<benchmark>[a-zA-Z0-9_]+) walltime [0-9]+ : (?P<time>[0-9]+) msec =====",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "bench-suite":
                 self.benchSuiteName(),
                 "vm":
                 "jvmci",
                 "config.name":
                 "default",
                 "config.vm-flags":
                 self.shorten_vm_flags(self.vmArgs(bmSuiteArgs)),
                 "metric.name":
                 "walltime",
                 "metric.value": ("<time>", int),
                 "metric.unit":
                 "ms",
                 "metric.type":
                 "numeric",
                 "metric.score-function":
                 "id",
                 "metric.better":
                 "lower",
                 "metric.iteration": ("$iteration", int)
             })
     ]
 def rules(self, out, benchmarks, bmSuiteArgs):
     runArgs = self.postprocessRunArgs(benchmarks[0],
                                       self.runArgs(bmSuiteArgs))
     if runArgs is None:
         return []
     totalIterations = int(runArgs[runArgs.index("-n") + 1])
     return [
         mx_benchmark.StdOutRule(
             r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "time",
                 "metric.value": ("<time>", int),
                 "metric.unit": "ms",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.iteration": 0
             }),
         mx_benchmark.StdOutRule(
             r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "warmup",
                 "metric.value": ("<time>", int),
                 "metric.unit": "ms",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.iteration": totalIterations - 1
             }),
         mx_benchmark.StdOutRule(
             r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup [0-9]+ in (?P<time>[0-9]+) msec =====",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "warmup",
                 "metric.value": ("<time>", int),
                 "metric.unit": "ms",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.iteration": ("$iteration", int)
             })
     ]
Esempio n. 8
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(self.benchHigherScoreRegex(), {
             "benchmark": ("<benchmark>", str),
             "metric.name": "time",
             "metric.type": "numeric",
             "metric.value": ("<score>", float),
             "metric.score-function": "id",
             "metric.better": "lower",
             "metric.iteration": 0,
         }),
     ]
Esempio n. 9
0
 def rules(self, output, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r"^First response received in (?P<startup>\d*[.,]?\d*) ms", {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "startup",
                 "metric.value": ("<startup>", float),
                 "metric.unit": "ms",
                 "metric.better": "lower",
             })
     ]
Esempio n. 10
0
 def rules(self, output, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r"\[(?P<name>.*)\] after run: (?P<value>.*) (?P<unit>.*)", {
                 "benchmark": ("<name>", str),
                 "metric.better": "lower",
                 "metric.name": "time",
                 "metric.unit": ("<unit>", str),
                 "metric.value": ("<value>", float),
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.iteration": 0,
             })
     ]
Esempio n. 11
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     # Example of wrk output:
     # "Requests/sec:   5453.61"
     return [
         mx_benchmark.StdOutRule(
             r"^Requests/sec:\s*(?P<throughput>\d*[.,]?\d*)$", {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "throughput",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "op/s",
                 "metric.better": "higher",
             })
     ]
Esempio n. 12
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r'Passed:(\s+)(?P<count>[\d]+)',
             {
                 "benchmark": ("llvm-unit-tests", str),
                 # TODO: it's a borrowed metric name, a new one should be registered
                 "metric.name": "jck-passed",
                 "metric.type": "numeric",
                 "metric.value": ("<count>", int),
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.unit": "#"
             }),
         mx_benchmark.StdOutRule(
             r'Failed:(\s+)(?P<count>[\d]+)',
             {
                 "benchmark": ("llvm-unit-tests", str),
                 # TODO: it's a borrowed metric name, a new one should be registered
                 "metric.name": "jck-failed",
                 "metric.type": "numeric",
                 "metric.value": ("<count>", int),
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.unit": "#"
             }),
         mx_benchmark.StdOutRule(
             r'Testing Time:(\s+)(?P<time>\d+(?:\.\d+)?)+s', {
                 "benchmark": ("llvm-unit-tests", str),
                 "metric.name": "time",
                 "metric.type": "numeric",
                 "metric.value": ("<time>", float),
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.unit": "s"
             })
     ]
Esempio n. 13
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     assert len(benchmarks) == 1
     return [
         mx_benchmark.StdOutRule(r'^Hundred thousand prime numbers in (?P<time>[0-9]+) ms\n$', {
             "bench-suite": self.name(),
             "benchmark": (benchmarks[0], str),
             "metric.name": "time",
             "metric.type": "numeric",
             "metric.unit": "ms",
             "metric.value": ("<time>", int),
             "metric.score-function": "id",
             "metric.better": "lower",
             "metric.iteration": ("$iteration", int),
         })
     ]
Esempio n. 14
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r"^Score on (?P<benchmark>[a-zA-Z0-9\._]+): (?P<score>[0-9]+((,|\.)[0-9]+)?) ops/m$",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "throughput",
                 "metric.value": ("<score>", float),
                 "metric.unit": "op/min",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.iteration": 0
             })
     ]
Esempio n. 15
0
 def rules(self, output, benchmarks, bmSuiteArgs):
     suite, benchmark = self.parse_suite_benchmark(bmSuiteArgs)
     return [
         mx_benchmark.StdOutRule(
             r"ops/sec = (?P<throughput>[0-9]+.[0-9]+)", {
                 "benchmark": suite + "/" + benchmark,
                 "vm": self.config_name(),
                 "metric.name": "throughput",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "ops/s",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.iteration": 0,
             })
     ]
Esempio n. 16
0
 def rules(self, out, benchmarks, bm_suite_args):
     return [
         # We collect all our measures as "warmup"s. `AveragingBenchmarkMixin.addAverageAcrossLatestResults` then
         # takes care of creating one final "memory" point which is the average of the last N points, where N is
         # obtained from `AveragingBenchmarkMixin.getExtraIterationCount`.
         mx_benchmark.StdOutRule(r"(?P<path>.*): (warmup )?iteration\[(?P<iteration>.*)\]: (?P<value>.*) MB", {
             "benchmark": ("<path>", str),
             "metric.better": "lower",
             "metric.name": "warmup",
             "metric.unit": "MB",
             "metric.value": ("<value>", float),
             "metric.type": "numeric",
             "metric.score-function": "id",
             "metric.iteration": ("<iteration>", int)
         })
     ]
Esempio n. 17
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     arg = int(self.benchmarksIterations()[benchmarks[0]])
     return [
         mx_benchmark.StdOutRule(
             r"^(?P<benchmark>[a-zA-Z0-9\.\-]+): (?P<time>[0-9]+(\.[0-9]+)?$)",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "time",
                 "metric.value": ("<time>", float),
                 "metric.unit": "s",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.arg": arg,
             }),
     ]
Esempio n. 18
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     # Example of wrk2 output:
     # " 50.000%  3.24ms"
     return [
         mx_benchmark.StdOutRule(
             r"^\s*(?P<percentile>\d*[.,]?\d*)%\s+(?P<latency>\d*[.,]?\d*)ms$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "sample-time",
                 "metric.value": ("<latency>", float),
                 "metric.unit": "ms",
                 "metric.better": "lower",
                 "metric.percentile": ("<percentile>", float),
             })
     ]
Esempio n. 19
0
 def rules(self, output, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r"The executed image size for benchmark (?P<bench_suite>[a-zA-Z0-9_\-]+):(?P<benchmark>[a-zA-Z0-9_\-]+) is (?P<value>[0-9]+) B",
             {
                 "bench-suite": ("<bench_suite>", str),
                 "benchmark": ("<benchmark>", str),
                 "vm": "svm",
                 "metric.name": "binary-size",
                 "metric.value": ("<value>", int),
                 "metric.unit": "B",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.iteration": 0,
             })
     ]
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         mx_benchmark.StdOutRule(
             r"^Valid run, Score is  (?P<score>[0-9]+)$",  # pylint: disable=line-too-long
             {
                 "benchmark": "default",
                 "vm": "jvmci",
                 "config.name": "default",
                 "metric.name": "throughput",
                 "metric.value": ("<score>", float),
                 "metric.unit": "bops",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.iteration": 0
             })
     ]
Esempio n. 21
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     # Example of jmeter output:
     # "summary =     70 in 00:00:01 =   47.6/s Avg:    12 Min:     3 Max:   592 Err:     0 (0.00%)"
     return [
         mx_benchmark.StdOutRule(
             r"^summary \+\s+(?P<requests>[0-9]+) in (?P<hours>\d+):(?P<minutes>\d\d):(?P<seconds>\d\d) =\s+(?P<throughput>\d*[.,]?\d*)/s Avg:\s+(?P<avg>\d+) Min:\s+(?P<min>\d+) Max:\s+(?P<max>\d+) Err:\s+(?P<errors>\d+) \((?P<errpct>\d*[.,]?\d*)\%\)",  # pylint: disable=line-too-long
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "warmup",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "op/s",
                 "metric.better": "higher",
                 "metric.iteration": ("$iteration", int),
                 "warnings": ("<errors>", str),
             })
     ]
Esempio n. 22
0
    def rules(self, output, benchmarks, bmSuiteArgs):
        class NativeImageTimeToInt(object):
            def __call__(self, *args, **kwargs):
                return int(float(args[0].replace(',', '')))

        return [
            mx_benchmark.StdOutRule(r'^\[(?P<benchmark>\S+?):[0-9]+\][ ]+\[total\]:[ ]+(?P<time>[0-9,.]+?) ms', {
                "bench-suite": self.name(),
                "benchmark": ("<benchmark>", str),
                "metric.name": "time",
                "metric.type": "numeric",
                "metric.unit": "ms",
                "metric.value": ("<time>", NativeImageTimeToInt()),
                "metric.score-function": "id",
                "metric.better": "lower",
                "metric.iteration": 0,
            })
        ]
Esempio n. 23
0
 def rules(self, output, benchmarks, bmSuiteArgs):
     if benchmarks and len(benchmarks) == 1:
         return [
             mx_benchmark.StdOutRule(
                 # r"Statistics for (?P<methods>[0-9]+) bytecoded nmethods for JVMCI:\n total in heap  = (?P<value>[0-9]+)",
                 r"Statistics for (?P<methods>[0-9]+) bytecoded nmethods for JVMCI:\n total in heap  = (?P<value>[0-9]+)",
                 {
                     "benchmark": benchmarks[0],
                     "vm": "jvmci",
                     "metric.name": "code-size",
                     "metric.value": ("<value>", int),
                     "metric.unit": "B",
                     "metric.type": "numeric",
                     "metric.score-function": "id",
                     "metric.better": "lower",
                     "metric.iteration": 0,
                 })
         ]
     return []
 def rules(self, out, benchmarks, bmSuiteArgs):
     benchmark = self.benchmarksType() + "." + benchmarks[0][0]
     arg = benchmarks[0][1]
     return [
         mx_benchmark.StdOutRule(
             r"^(?P<benchmark>[a-zA-Z0-9\.\-]+): (?P<time>[0-9]+(\.[0-9]+)?$)",  # pylint: disable=line-too-long
             {
                 "benchmark": "".join(benchmark),
                 "metric.name": "time",
                 "peak": ("<time>", float),
                 "python.params": "".join(arg),
                 "metric.value": ("<time>", float),
                 "metric.unit": "s",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.arg": " ".join(arg),
             }),
     ]
Esempio n. 25
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         WasmJMHJsonRule(mx_benchmark.JMHBenchmarkSuiteBase.jmh_result_file, self.benchSuiteName(bmSuiteArgs)),
         mx_benchmark.StdOutRule(
             r"Iteration (?P<iteration>[0-9]+), result = -?[0-9]+, sec = ([0-9]+\.[0-9]+), ops / sec = (?P<value>([0-9]+\.[0-9]+))", # pylint: disable=line-too-long
             {
                 "benchmark": self.getBenchmarkName(bmSuiteArgs),
                 "bench-suite": self.benchSuiteName(bmSuiteArgs),
                 "vm": self.name(),
                 "config.name": "default",
                 "metric.name": "throughput",
                 "metric.value": ("<value>", float),
                 "metric.unit": "op/s",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.iteration": ("<iteration>", int)
             }
         )
     ]
 def rules(self, out, benchmarks, bmSuiteArgs):
     suite_name = self.name()
     if benchmarks and len(benchmarks) == 1:
         suite_name = suite_name + "-single"
     return [
         mx_benchmark.StdOutRule(
             r"^Score on (?P<benchmark>[a-zA-Z0-9\._]+): (?P<score>[0-9]+((,|\.)[0-9]+)?) ops/m$",  # pylint: disable=line-too-long
             {
                 "benchmark": ("<benchmark>", str),
                 "bench-suite": suite_name,
                 "vm": "jvmci",
                 "config.name": "default",
                 "metric.name": "throughput",
                 "metric.value": ("<score>", float),
                 "metric.unit": "op/min",
                 "metric.type": "numeric",
                 "metric.score-function": "id",
                 "metric.better": "higher",
                 "metric.iteration": 0
             })
     ]
Esempio n. 27
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     return [
         SulongBenchmarkRule(
             r'^first [\d]+ warmup iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)',
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "warmup",
                 "metric.type": "numeric",
                 "metric.value": ("<score>", int),
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.unit": "us",
                 "metric.iteration": ("<iteration>", int),
             }),
         SulongBenchmarkRule(
             r'^last [\d]+ iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)',
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "time",
                 "metric.type": "numeric",
                 "metric.value": ("<score>", int),
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.unit": "us",
                 "metric.iteration": ("<iteration>", int),
             }),
         mx_benchmark.StdOutRule(
             r'^Pure-startup \(microseconds\) (?P<benchmark>[\S]+): (?P<score>\d+)',
             {
                 "benchmark": ("<benchmark>", str),
                 "metric.name": "pure-startup",
                 "metric.type": "numeric",
                 "metric.value": ("<score>", int),
                 "metric.score-function": "id",
                 "metric.better": "lower",
                 "metric.unit": "us",
                 "metric.iteration": ("0", int),
             }),
     ]
Esempio n. 28
0
    def rules(self, output, benchmarks, bmSuiteArgs):
        class NativeImageTimeToInt(object):
            def __call__(self, *args, **kwargs):
                return int(float(args[0].replace(',', '')))

        class NativeImageHexToInt(object):
            def __call__(self, *args, **kwargs):
                return int(args[0], 16)

        return [
            mx_benchmark.StdOutRule(
                r"The executed image size for benchmark (?P<bench_suite>[a-zA-Z0-9_\-]+):(?P<benchmark>[a-zA-Z0-9_\-]+) is (?P<value>[0-9]+) B",
                {
                    "bench-suite": ("<bench_suite>", str),
                    "benchmark": ("<benchmark>", str),
                    "vm": "svm",
                    "metric.name": "binary-size",
                    "metric.value": ("<value>", int),
                    "metric.unit": "B",
                    "metric.type": "numeric",
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                }),
            mx_benchmark.StdOutRule(
                r"The (?P<type>[a-zA-Z0-9_\-]+) configuration size for benchmark (?P<bench_suite>[a-zA-Z0-9_\-]+):(?P<benchmark>[a-zA-Z0-9_\-]+) is (?P<value>[0-9]+) B",
                {
                    "bench-suite": ("<bench_suite>", str),
                    "benchmark": ("<benchmark>", str),
                    "vm": "svm",
                    "metric.name": "config-size",
                    "metric.value": ("<value>", int),
                    "metric.unit": "B",
                    "metric.type": "numeric",
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": ("<type>", str)
                }),
            mx_benchmark.StdOutRule(
                r'^\[\S+:[0-9]+\][ ]+\[total\]:[ ]+(?P<time>[0-9,.]+?) ms', {
                    "benchmark": benchmarks[0],
                    "metric.name": "compile-time",
                    "metric.type": "numeric",
                    "metric.unit": "ms",
                    "metric.value": ("<time>", NativeImageTimeToInt()),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": "total",
                }),
            mx_benchmark.StdOutRule(
                r'^\[\S+:[0-9]+\][ ]+(?P<phase>\w+?):[ ]+(?P<time>[0-9,.]+?) ms',
                {
                    "benchmark": benchmarks[0],
                    "metric.name": "compile-time",
                    "metric.type": "numeric",
                    "metric.unit": "ms",
                    "metric.value": ("<time>", NativeImageTimeToInt()),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": ("<phase>", str),
                }),
            mx_benchmark.StdOutRule(
                r'^[ ]*[0-9]+[ ]+.(?P<section>[a-zA-Z0-9._-]+?)[ ]+(?P<size>[0-9a-f]+?)[ ]+',
                {
                    "benchmark": benchmarks[0],
                    "metric.name": "binary-section-size",
                    "metric.type": "numeric",
                    "metric.unit": "B",
                    "metric.value": ("<size>", NativeImageHexToInt()),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": ("<section>", str),
                }),
            mx_benchmark.JsonStdOutFileRule(
                r'^# Printing analysis results stats to: (?P<path>\S+?)$',
                'path', {
                    "benchmark": benchmarks[0],
                    "metric.name": "analysis-stats",
                    "metric.type": "numeric",
                    "metric.unit": "#",
                    "metric.value": ("<total_reachable_types>", int),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": "reachable-types",
                }, ['total_reachable_types']),
            mx_benchmark.JsonStdOutFileRule(
                r'^# Printing analysis results stats to: (?P<path>\S+?)$',
                'path', {
                    "benchmark": benchmarks[0],
                    "metric.name": "analysis-stats",
                    "metric.type": "numeric",
                    "metric.unit": "#",
                    "metric.value": ("<total_reachable_methods>", int),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": "reachable-methods",
                }, ['total_reachable_methods']),
            mx_benchmark.JsonStdOutFileRule(
                r'^# Printing analysis results stats to: (?P<path>\S+?)$',
                'path', {
                    "benchmark": benchmarks[0],
                    "metric.name": "analysis-stats",
                    "metric.type": "numeric",
                    "metric.unit": "#",
                    "metric.value": ("<total_reachable_fields>", int),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": "reachable-fields",
                }, ['total_reachable_fields']),
            mx_benchmark.JsonStdOutFileRule(
                r'^# Printing analysis results stats to: (?P<path>\S+?)$',
                'path', {
                    "benchmark": benchmarks[0],
                    "metric.name": "analysis-stats",
                    "metric.type": "numeric",
                    "metric.unit": "B",
                    "metric.value": ("<total_memory_bytes>", int),
                    "metric.score-function": "id",
                    "metric.better": "lower",
                    "metric.iteration": 0,
                    "metric.object": "memory"
                }, ['total_memory_bytes'])
        ]
Esempio n. 29
0
 def rules(self, out, benchmarks, bmSuiteArgs):
     # Example of wrk output:
     # "Requests/sec:   5453.61"
     return [
         mx_benchmark.StdOutRule(
             r"^startup-throughput Requests/sec:\s*(?P<throughput>\d*[.,]?\d*)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "startup-throughput",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "op/s",
                 "metric.better": "higher",
             }),
         mx_benchmark.StdOutRule(
             r"^peak-throughput Requests/sec:\s*(?P<throughput>\d*[.,]?\d*)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "peak-throughput",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "op/s",
                 "metric.better": "higher",
             }),
         mx_benchmark.StdOutRule(
             r"^throughput-for-peak-latency Requests/sec:\s*(?P<throughput>\d*[.,]?\d*)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "throughput-for-peak-latency",
                 "metric.value": ("<throughput>", float),
                 "metric.unit": "op/s",
                 "metric.better": "higher",
             }),
         mx_benchmark.StdOutRule(
             r"^startup-latency-co\s+(?P<percentile>\d*[.,]?\d*)%\s+(?P<latency>\d*[.,]?\d*)(?P<unit>ms)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "startup-latency-co",
                 "metric.value": ("<latency>", float),
                 "metric.unit": ("ms", str),
                 "metric.better": "lower",
                 "metric.percentile": ("<percentile>", float),
             }),
         mx_benchmark.StdOutRule(
             r"^peak-latency-co\s+(?P<percentile>\d*[.,]?\d*)%\s+(?P<latency>\d*[.,]?\d*)(?P<unit>ms)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "peak-latency-co",
                 "metric.value": ("<latency>", float),
                 "metric.unit": ("ms", str),
                 "metric.better": "lower",
                 "metric.percentile": ("<percentile>", float),
             }),
         mx_benchmark.StdOutRule(
             r"^peak-latency\s+(?P<percentile>\d*[.,]?\d*)%\s+(?P<latency>\d*[.,]?\d*)(?P<unit>ms)\s*$",
             {
                 "benchmark": benchmarks[0],
                 "bench-suite": self.benchSuiteName(),
                 "metric.name": "peak-latency",
                 "metric.value": ("<latency>", float),
                 "metric.unit": ("ms", str),
                 "metric.better": "lower",
                 "metric.percentile": ("<percentile>", float),
             })
     ] + super(BaseWrkBenchmarkSuite, self).rules(out, benchmarks,
                                                  bmSuiteArgs)