Exemplo n.º 1
0
def getSearchPathOption(lib_args=None):
    if lib_args is None:
        lib_args = ['-lgmp', '-lgfortran']

    lib_names = []

    lib_aliases = {
        '-lc': ['libc.so.6', 'libc.dylib'],
        '-lstdc++': ['libstdc++.so.6', 'libstdc++.6.dylib'],
        '-lgmp': ['libgmp.so.10', 'libgmp.10.dylib'],
        '-lgfortran': ['libgfortran.so.3', 'libgfortran.3.dylib'],
        '-lpcre': ['libpcre.so.3', 'libpcre.dylib']
    }
    osStr = mx.get_os()
    index = {'linux': 0, 'darwin': 1}[mx.get_os()]
    if index is None:
        mx.log_error("{0} not supported!".format(osStr))

    for lib_arg in ['-lc', '-lstdc++'] + lib_args:
        if lib_arg in lib_aliases:
            lib_arg = lib_aliases[lib_arg][index]
        else:
            lib_arg = lib_arg[2:]
        lib_names.append(lib_arg)

    libpath = join(
        mx.project('com.oracle.truffle.llvm.test.native').getOutput(), 'bin')
    for path, _, files in os.walk(libpath):
        for f in files:
            if f.endswith('.so'):
                lib_names.append(join(path, f))

    return '-Dsulong.DynamicNativeLibraryPath=' + ':'.join(lib_names)
Exemplo n.º 2
0
def gate(args, tasks):
    with Task('Vm: Basic GraalVM Tests', tasks, tags=[VmGateTasks.graal]) as t:
        if t:
            _java = join(mx_vm.graalvm_output(), 'bin', 'java')

            _out = mx.OutputCapture()
            if mx.run([_java, '-XX:+JVMCIPrintProperties'], nonZeroIsFatal=False, out=_out, err=_out):
                mx.log_error(_out.data)
                mx.abort('The GraalVM image is not built with a JVMCI-enabled JDK, it misses `-XX:+JVMCIPrintProperties`.')

            _out = subprocess.check_output([_java, '-version'], stderr=subprocess.STDOUT)
            if args.strict_mode:
                # A full open-source build should be built with an open-source JDK
                _version_regex = _openjdk_version_regex
            else:
                # Allow Oracle JDK in non-strict mode as it is common on developer machines
                _version_regex = _anyjdk_version_regex
            match = _version_regex.match(_out)
            if match is None:
                if args.strict_mode and _anyjdk_version_regex.match(_out):
                    mx.abort("In --strict-mode, GraalVM must be built with OpenJDK")
                else:
                    mx.abort('Unexpected version string:\n{}Does not match:\n{}'.format(_out, _version_regex.pattern))
            elif match.group('graalvm_version') != _suite.release_version():
                mx.abort("Wrong GraalVM version in -version string: got '{}', expected '{}'".format(match.group('graalvm_version'), _suite.release_version()))

    if mx_vm.has_component('js'):
        with Task('Vm: Graal.js tests', tasks, tags=[VmGateTasks.graal_js]) as t:
            if t:
                pass

    gate_sulong(tasks)
    gate_ruby(tasks)
Exemplo n.º 3
0
def ideclean(args):
    """remove all IDE project configurations"""
    def rm(path):
        if exists(path):
            os.remove(path)

    for s in mx.suites() + [mx._mx_suite]:
        rm(join(s.get_mx_output_dir(), 'eclipse-config.zip'))
        rm(join(s.get_mx_output_dir(), 'netbeans-config.zip'))
        shutil.rmtree(join(s.dir, '.idea'), ignore_errors=True)

    for p in mx.projects() + mx._mx_suite.projects:
        if not p.isJavaProject():
            continue

        shutil.rmtree(join(p.dir, '.settings'), ignore_errors=True)
        shutil.rmtree(join(p.dir, '.externalToolBuilders'), ignore_errors=True)
        shutil.rmtree(join(p.dir, 'nbproject'), ignore_errors=True)
        rm(join(p.dir, '.classpath'))
        rm(join(p.dir, '.checkstyle'))
        rm(join(p.dir, '.project'))
        rm(join(p.dir, '.factorypath'))
        rm(join(p.dir, p.name + '.iml'))
        rm(join(p.dir, 'build.xml'))
        rm(join(p.dir, 'eclipse-build.xml'))
        try:
            rm(join(p.dir, p.name + '.jar'))
        except:
            mx.log_error("Error removing {0}".format(p.name + '.jar'))

    for d in mx._dists.values():
        if not d.isJARDistribution():
            continue
        if d.get_ide_project_dir():
            shutil.rmtree(d.get_ide_project_dir(), ignore_errors=True)
Exemplo n.º 4
0
    def printStats(self):
        passed_len = len(self.passed)
        failed_len = len(self.failed)
        failed_references_len = len(self.failed_references)
        total_len = failed_len + passed_len

        mx.log()

        if len(self.failed_references):
            mx.log_error('{0} compiled reference Tests failed!'.format(
                failed_references_len))
            for x in range(0, failed_references_len):
                mx.log_error(str(x) + ') ' + self.failed_references[x])
            mx.log()

        if failed_len != 0:
            mx.log_error('Failed {0} of {1} Tests!'.format(
                failed_len, total_len))
            for x in range(0, len(self.failed)):
                mx.log_error('{0}) {1}'.format(x, self.failed[x]))
        elif total_len == 0:
            mx.log_error(
                'There is something odd with the testsuite, {0} Tests executed!'
                .format(total_len))
        else:
            mx.log('Passed all {0} Tests!'.format(total_len))
Exemplo n.º 5
0
def pullLLVMBinaries(args=None):
    """downloads the LLVM binaries"""
    toolDir = join(_toolDir, "llvm")
    mx.ensure_dir_exists(toolDir)
    osStr = mx.get_os()
    arch = mx.get_arch()
    if osStr == 'windows':
        mx.log_error('windows currently only supported with cygwin!')
        return
    elif osStr == 'linux':
        if arch == 'amd64':
            urls = [
                'https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86_64-linux-ubuntu-12.04.tar.gz'
            ]
        else:
            urls = [
                'https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86-linux-ubuntu-12.04.tar.gz'
            ]
    elif osStr == 'darwin':
        urls = [
            'https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86_64-apple-darwin11.tar.gz'
        ]
    elif osStr == 'cygwin':
        urls = [
            'https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86-mingw32-EXPERIMENTAL.tar.gz'
        ]
    else:
        mx.log_error("{0} {1} not supported!".format(osStr, arch))
    localPath = pullsuite(toolDir, urls)
    tar(localPath, toolDir, stripLevels=1)
    os.remove(localPath)
Exemplo n.º 6
0
def clangformat(args=None):
    """ Runs clang-format on C/C++ files in native projects of the primary suite """
    parser = ArgumentParser(prog='mx clangformat')
    parser.add_argument(
        '--with-projects',
        action='store_true',
        help=
        'check native projects. Defaults to true unless a path is specified.')
    parser.add_argument('paths',
                        metavar='path',
                        nargs='*',
                        help='check given paths')
    args = parser.parse_args(args)
    paths = [(p, "<cmd-line-argument>") for p in args.paths]

    if not paths or args.with_projects:
        paths += [(p.dir, p.name) for p in mx.projects(limit_to_primary=True)
                  if p.isNativeProject() and getattr(p, "clangFormat", True)]

    error = False
    for f, reason in paths:
        if not checkCFiles(f, reason):
            error = True
    if error:
        mx.log_error("found formatting errors!")
        sys.exit(-1)
 def interpreter(self):
     try:
         return subprocess.check_output("which %s" %
                                        JythonVm.JYTHON_INTERPRETER,
                                        shell=True).decode().strip()
     except OSError as e:
         mx.log_error(e)
         mx.abort("Error when executing `which jython`!\n")
Exemplo n.º 8
0
def checkNoHttp(args=None):
    """checks that https is used instead of http in Travis and the mx script"""
    for f in httpCheckFiles:
        line_number = 0
        for line in open(f):
            if "http" + chr(58) + "//" in line:
                mx.log_error("http:" + chr(58) + " in line " + str(line_number) + " of " + str(f) + " could be a security issue! please change to https://")
                exit(-1)
            line_number += 1
Exemplo n.º 9
0
def checkCFiles(targetDir):
    error = False
    for path, _, files in os.walk(targetDir):
        for f in files:
            if f.endswith('.c') or f.endswith('.cpp') or f.endswith('.h') or f.endswith('.hpp'):
                if not checkCFile(path + '/' + f):
                    error = True
    if error:
        mx.log_error("found formatting errors!")
        exit(-1)
Exemplo n.º 10
0
 def filterLines(self, lines):
     data = []
     for line in lines:
         try:
             data.append(float(line))
         except ValueError:
             mx.log_error(line)
     if len(data) % 3 != 0:
         raise AssertionError("Number of values not a multiple of 3")
     return data
Exemplo n.º 11
0
def checkCFiles(targetDir):
    error = False
    for path, _, files in os.walk(targetDir):
        for f in files:
            if f.endswith('.c') or f.endswith('.cpp') or f.endswith('.h'):
                if not checkCFile(path + '/' + f):
                    error = True
    if error:
        mx.log_error("found formatting errors!")
        exit(-1)
 def interpreter(self):
     try:
         return subprocess.check_output("which %s" %
                                        JythonVm.JYTHON_INTERPRETER,
                                        shell=True).decode().strip()
     except Exception as e:  # pylint: disable=broad-except
         mx.log_error(e)
         mx.abort(
             "`jython` is neither on the path, nor is {} set!\n".format(
                 ENV_JYTHON_JAR))
Exemplo n.º 13
0
def gate_body(args, tasks):
    with Task('Vm: Basic GraalVM Tests', tasks, tags=[VmGateTasks.compiler]) as t:
        if t and mx_vm.has_component('Graal compiler'):
            _java = join(mx_vm.graalvm_output(), 'bin', 'java')

            _out = mx.OutputCapture()
            if mx.run([_java, '-XX:+JVMCIPrintProperties'], nonZeroIsFatal=False, out=_out, err=_out):
                mx.log_error(_out.data)
                mx.abort('The GraalVM image is not built with a JVMCI-enabled JDK, it misses `-XX:+JVMCIPrintProperties`.')

            _out = subprocess.check_output([_java, '-version'], stderr=subprocess.STDOUT)
            if args.strict_mode:
                # A full open-source build should be built with an open-source JDK
                _version_regex = _openjdk_version_regex
            else:
                # Allow Oracle JDK in non-strict mode as it is common on developer machines
                _version_regex = _anyjdk_version_regex
            match = _version_regex.match(_out)
            if match is None:
                if args.strict_mode and _anyjdk_version_regex.match(_out):
                    mx.abort("In --strict-mode, GraalVM must be built with OpenJDK")
                else:
                    mx.abort('Unexpected version string:\n{}Does not match:\n{}'.format(_out, _version_regex.pattern))
            elif match.group('graalvm_version') != _suite.release_version():
                mx.abort("Wrong GraalVM version in -version string: got '{}', expected '{}'".format(match.group('graalvm_version'), _suite.release_version()))

    with Task('Vm: Sulong tests', tasks, tags=[VmGateTasks.sulong]) as t:
        if t and mx_vm.has_component('Sulong', fatalIfMissing=True):
            pass

    with Task('Vm: Graal.js tests', tasks, tags=[VmGateTasks.graal_js]) as t:
        if t and mx_vm.has_component('Graal.js', fatalIfMissing=True):
            pass

    with Task('Vm: Graal.nodejs tests', tasks, tags=[VmGateTasks.graal_nodejs]) as t:
        if t and mx_vm.has_component('Graal.nodejs', fatalIfMissing=True):
            pass

    with Task('Vm: TruffleRuby tests', tasks, tags=[VmGateTasks.truffleruby]) as t:
        if t and mx_vm.has_component('TruffleRuby', fatalIfMissing=True):
            pass

    with Task('Vm: FastR tests', tasks, tags=[VmGateTasks.fastr]) as t:
        if t and mx_vm.has_component('FastR', fatalIfMissing=True):
            pass

    with Task('Vm: Graal.Python tests', tasks, tags=[VmGateTasks.graalpython]) as t:
        if t and mx_vm.has_component('Graal.Python', fatalIfMissing=True):
            pass

    gate_sulong(tasks)
    gate_ruby(tasks)
Exemplo n.º 14
0
 def run_llvm_reduce(nrmutations, input_bc, output_ll):
     reduce_out = mx.OutputCapture()
     try:
         args = [input_bc,
                 "-ignore_remaining_args=1",
                 "-mtriple", "x86_64-unknown-linux-gnu",
                 "-nrmutations", str(nrmutations),
                 "-seed", str(rand.randint(0, 10000000)),
                 "-o", output_ll]
         _run_fuzz_tool("llvm-reduce", args, out=reduce_out, err=reduce_out)
     except SystemExit as se:
         mx.log_error(reduce_out.data)
         mx.abort("Error executing llvm-reduce: {}".format(se))
Exemplo n.º 15
0
 def run_cmake(cmdline, silent, *args, **kwargs):
     log_error = kwargs.pop("log_error", False)
     if mx._opts.verbose:
         mx.run(["cmake"] + cmdline, *args, **kwargs)
     else:
         with open(os.devnull, 'w') as fnull:
             err = mx.OutputCapture() if silent else None
             try:
                 mx.run(["cmake"] + cmdline, out=fnull, err=err, *args, **kwargs)
             except:
                 if log_error and err and err.data:
                     mx.log_error(err.data)
                 raise
Exemplo n.º 16
0
def check_filename_length(args):
    """check that all file name lengths are short enough for eCryptfs"""
    # For eCryptfs, see https://bugs.launchpad.net/ecryptfs/+bug/344878
    parser = ArgumentParser(prog="mx check-filename-length", description="Check file name length")
    parser.parse_known_args(args)
    max_length = 143
    too_long = []
    for _, _, filenames in os.walk('.'):
        for filename in filenames:
            if len(filename) > max_length:
                too_long.append(filename)
    if too_long:
        mx.log_error("The following file names are too long for eCryptfs: ")
        for x in too_long:
            mx.log_error(x)
        mx.abort("File names that are too long where found. Ensure all file names are under %d characters long." % max_length)
Exemplo n.º 17
0
def check_filename_length(args):
    """check that all file name lengths are short enough for eCryptfs"""
    # For eCryptfs, see https://bugs.launchpad.net/ecryptfs/+bug/344878
    parser = ArgumentParser(prog="mx check-filename-length", description="Check file name length")
    parser.parse_known_args(args)
    max_length = 143
    too_long = []
    for _, _, filenames in os.walk('.'):
        for filename in filenames:
            if len(filename) > max_length:
                too_long.append(filename)
    if too_long:
        mx.log_error("The following file names are too long for eCryptfs: ")
        for x in too_long:
            mx.log_error(x)
        mx.abort("File names that are too long where found. Ensure all file names are under %d characters long." % max_length)
Exemplo n.º 18
0
def mdlCheck(args=None):
    """runs mdl on all .md files in the projects and root directory"""
    error = False
    for mdlCheckPath in mdlCheckDirectories:
        for path, _, files in os.walk(mdlCheckPath):
            for f in files:
                if f.endswith('.md') and not any(path.startswith(exclude) for exclude in mdlCheckExcludeDirectories):
                    absPath = path + '/' + f
                    mdlCheckCommand = 'mdl -r~MD026,~MD002,~MD029,~MD032,~MD033,~MD003,~MD001 ' + absPath
                    try:
                        subprocess.check_output(mdlCheckCommand, stderr=subprocess.STDOUT, shell=True)
                    except subprocess.CalledProcessError as e:
                        mx.log_error(e) # prints command and return value
                        mx.log_error(e.output) # prints process output
                        error = True
    if error:
        exit(-1)
Exemplo n.º 19
0
    def print_commands_on_failure():
        sys.stdout.flush()
        sys.stderr.flush()

        mx.log_error('\nThe sequence of mx commands that were executed until the failure follows:\n')
        for command, command_args, kwargs in all_commands:
            mx.log_error(command_in_gate_message(command, command_args, kwargs))

        mx.log_error('\nIf the previous sequence is incomplete or some commands were executed programmatically use:\n')
        mx.log_error('mx' + shell_quoted_args(_mx_args + _mx_command_and_args) + '\n')

        sys.stderr.flush()
Exemplo n.º 20
0
 def runTool(self, args, errorMsg=None, verbose=None, **kwargs):
     try:
         if not mx.get_opts().verbose and not verbose:
             f = open(os.devnull, 'w')
             ret = mx.run(args, out=f, err=f, **kwargs)
         else:
             f = None
             ret = mx.run(args, **kwargs)
     except SystemExit:
         ret = -1
         if errorMsg is None:
             mx.log_error()
             mx.log_error('Error: Cannot run {}'.format(args))
         else:
             mx.log_error()
             mx.log_error('Error: {}'.format(errorMsg))
             mx.log_error(' '.join(args))
     if f is not None:
         f.close()
     return ret
Exemplo n.º 21
0
def runIRBuilderTest38(vmArgs):
    """test ir-writer with llvm 3.8 bitcode files (see -h or --help)"""
    vmArgs, otherArgs = mx_sulong.truffle_extract_VM_args(vmArgs)
    parser = argparse.ArgumentParser(
        description="Compiles all or selected test suites.")
    parser.add_argument('suite',
                        nargs='*',
                        help=' '.join(irBuilderTests38.keys()),
                        default=irBuilderTests38.keys())
    parser.add_argument('--skip-compilation',
                        help='skip suite compilation',
                        action='store_true')  # TODO: makefile
    parsedArgs = parser.parse_args(otherArgs)

    # test if we have the required tools installed
    LlvmAS_38.find_tool()
    LlvmLLI_38.find_tool()

    returnCode = 0
    for testSuiteName in parsedArgs.suite:
        suite = irBuilderTests38[testSuiteName]
        """runs the test suite"""
        if parsedArgs.skip_compilation is False:
            mx_sulong.mx_testsuites.compileSuite([suite[0]])
        try:
            mx_sulong.mx_testsuites.run(
                vmArgs + suite[3] + ['-Dpolyglot.irwriter.LLVMVersion=3.8'],
                suite[1], [])
        except KeyboardInterrupt:
            sys.exit(-1)
        except:
            mx.log_error("unexpected exception thrown, continue...")

        testSuite = IRGeneratorSuite(LlvmAS_38, LlvmLLI_38)
        testSuite.run(suite[2])
        if not testSuite.wasSuccessfull():
            returnCode = 1

    return returnCode
Exemplo n.º 22
0
def pullLLVMBinaries(args=None):
    """downloads the LLVM binaries"""
    toolDir = join(_toolDir, "llvm")
    mx.ensure_dir_exists(toolDir)
    osStr = mx.get_os()
    arch = mx.get_arch()
    if osStr == 'windows':
        mx.log_error('windows currently only supported with cygwin!')
        return
    elif osStr == 'linux':
        if arch == 'amd64':
            urls = ['https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86_64-linux-ubuntu-12.04.tar.gz']
        else:
            urls = ['https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86-linux-ubuntu-12.04.tar.gz']
    elif osStr == 'darwin':
        urls = ['https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86_64-apple-darwin11.tar.gz']
    elif osStr == 'cygwin':
        urls = ['https://lafo.ssw.uni-linz.ac.at/pub/sulong-deps/clang+llvm-3.2-x86-mingw32-EXPERIMENTAL.tar.gz']
    else:
        mx.log_error("{0} {1} not supported!".format(osStr, arch))
    localPath = pullsuite(toolDir, urls)
    tar(localPath, toolDir, stripLevels=1)
    os.remove(localPath)
Exemplo n.º 23
0
def runIRBuilderTestGen38(vmArgs):
    """create llvm-ir testcases which are then run against llvm as well as Sulong (see -h or --help)"""
    vmArgs, otherArgs = mx_sulong.truffle_extract_VM_args(vmArgs)
    parser = argparse.ArgumentParser(
        description="Compiles all or selected test suites.")
    parser.add_argument('suite',
                        nargs='*',
                        help=' '.join(irBuilderTestsGen38.keys()),
                        default=irBuilderTestsGen38.keys())
    parsedArgs = parser.parse_args(otherArgs)

    returnCode = 0
    for testSuiteName in parsedArgs.suite:
        suite = irBuilderTestsGen38[testSuiteName]
        """runs the test suite"""

        # remove old files inside build directory
        if os.path.isdir(suite[1]):
            for _file in os.listdir(suite[1]):
                if _file.endswith(".ll") or _file.endswith(".bc"):
                    os.remove(os.path.join(suite[1], _file))

        try:
            mx_sulong.mx_testsuites.run(
                vmArgs + ['-Dirwriter.LLVMVersion=3.8'], suite[0], [])
        except KeyboardInterrupt:
            sys.exit(-1)
        except:
            mx.log_error("unexpected exception thrown, continue...")

        testSuite = IRGeneratorBuilderSuite(LlvmAS_38, LlvmLLI_38)
        testSuite.run(suite[1])
        if not testSuite.wasSuccessfull():
            returnCode = 1

    return returnCode
Exemplo n.º 24
0
def runJalangi(args, excl="", outFile=None, tracable=True):
    """run jalangi"""
    jalangiArgs = [join(_suite.dir, "src/ch.usi.inf.nodeprof/js/jalangi.js")]

    # jalangi arg parser (accepts GraalVM '--nodeprof.' options for convenience)
    parser = ArgumentParser(prog="mx jalangi",
                            description="Run NodeProf-Jalangi")
    # analysis (multiple arguments allowed)
    parser.add_argument("--analysis",
                        "--nodeprof.Analysis",
                        help="Jalangi analysis",
                        action="append",
                        default=[])
    # options
    parser.add_argument("--debug",
                        "--nodeprof.Debug",
                        help="enable NodeProf debug logging",
                        action="store_true")
    parser.add_argument("--excl",
                        "--nodeprof.ExcludeSource",
                        help="exclude sources",
                        default="")
    parser.add_argument("--scope",
                        "--nodeprof.Scope",
                        help="instrumentation scope",
                        choices=["app", "module", "all"],
                        default="module")
    # mx-only options
    parser.add_argument("--svm",
                        help="enable SubstrateVM",
                        action="store_true")
    parser.add_argument("--trace",
                        help="enable instrumentation API tracing",
                        action="store_true")

    parsed, args = parser.parse_known_args(args)

    # process analysis args
    for analysis_arg in parsed.analysis:
        # check if analysis file exists
        analysisPath = os.path.abspath(analysis_arg)
        if not os.path.exists(analysis_arg):
            mx.log_error("analysis file " + analysis_arg + " (" +
                         analysisPath + ") does not exist")
            sys.exit(1)

    if len(parsed.analysis) == 0:
        mx.log("Warning: no Jalangi --analysis specified")

    # append analyses and unparsed args
    jalangiAnalysisArg = [
        j for i in parsed.analysis for j in ['--analysis', i]
    ] + args
    # exclude analyses by default
    excl = ','.join([i for i in parsed.excl.split(',') if i != ''] +
                    [os.path.abspath(i) for i in parsed.analysis])

    jalangiArgs = ["--nodeprof.Scope=" + parsed.scope
                   ] + (["--nodeprof.ExcludeSource=" + excl] if len(excl) else
                        []) + jalangiArgs + jalangiAnalysisArg
    _runJalangi(jalangiArgs,
                outFile=outFile,
                svm=parsed.svm,
                debug=parsed.debug,
                trace=(tracable and parsed.trace))
    def runBenchmark(self, benchmark, bmSuiteArgs):
        directory = self.directory()
        if directory is None:
            directory, benchmark = benchmark.split('/')

        arguments = ['benchmark']
        if self.config()['kind'] == 'simple':
            arguments.extend(['--simple', '--iterations', '--elapsed'])
            time = self.time()
            if isinstance(time, dict):
                if benchmark in time:
                    time = str(time[benchmark])
                else:
                    time = str(time['default'])
            else:
                time = str(self.time())
            arguments.extend(['--time', time])
        elif self.config()['kind'] == 'fixed-iterations':
            iterations_config = self.config()['iterations'][benchmark]
            fixed_iterations = sorted(iterations_config.keys())
            fixed_iterations_arg = ','.join([str(i) for i in fixed_iterations])
            arguments.extend(['--iterations', '--elapsed', '--ips'])
            arguments.extend(['--fixed-iterations'])
            arguments.extend([fixed_iterations_arg])
            if iterations_config != {1: 'single-shot'}:
                # single-shot benchmarks use subprocesses so startup is already included
                arguments.extend(
                    ['--start-time', 'START_TIME_SET_BY_JT_BENCHMARK'])
        else:
            raise AssertionError("Unknown benchmark kind: " +
                                 self.config()['kind'])

        if ':' in benchmark:
            benchmark_file, benchmark_name = benchmark.split(':')
            benchmark_names = [benchmark_name]
        else:
            benchmark_file = benchmark
            benchmark_names = []
        arguments.extend(['bench/' + directory + '/' + benchmark_file + '.rb'])
        arguments.extend(benchmark_names)
        arguments.extend(bmSuiteArgs)
        out = mx.OutputCapture()

        if jt(arguments, out=out, nonZeroIsFatal=False) == 0:
            mx.log(out.data)
            lines = out.data.split('\n')[1:-1]

            data = self.filterLines(lines)
            iterations = [d for n, d in enumerate(data) if n % 3 == 0]
            elapsed = [d for n, d in enumerate(data) if n % 3 == 1]
            samples = [d for n, d in enumerate(data) if n % 3 == 2]

            if lines[-1] == 'optimised away':
                return [{
                    'benchmark': benchmark,
                    'metric.name': 'throughput',
                    'metric.value': sample,
                    'metric.unit': 'op/s',
                    'metric.better': 'higher',
                    'metric.iteration': n,
                    'extra.metric.warmedup': 'false',
                    'extra.metric.elapsed-num': e
                } for n, (e, sample) in enumerate(zip(elapsed, samples))] + [{
                    'benchmark':
                    benchmark,
                    'metric.name':
                    'throughput',
                    'metric.value':
                    2147483647,  # arbitrary high value (--simple won't run more than this many ips)
                    'metric.unit':
                    'op/s',
                    'metric.better':
                    'higher',
                    'metric.iteration':
                    len(samples),
                    'extra.metric.warmedup':
                    'true',
                    'extra.metric.elapsed-num':
                    elapsed[-1] + 2.0 if elapsed else
                    2.0,  # just put the data point beyond the last one a bit
                    'error':
                    'optimised away'
                }]
            elif self.config()['kind'] == 'fixed-iterations':
                iteration_config = self.config()['iterations'][benchmark]
                return [{
                    'benchmark': benchmark,
                    'metric.name': iteration_config[iteration],
                    'metric.iteration': iteration,
                    'metric.value': e,
                    'metric.unit': 's',
                    'metric.better': 'lower'
                } for n, (e, iteration) in enumerate(zip(elapsed, iterations))
                        if iteration in iteration_config]
            else:
                return [{
                    'benchmark':
                    benchmark,
                    'metric.name':
                    'throughput',
                    'metric.value':
                    sample,
                    'metric.unit':
                    'op/s',
                    'metric.better':
                    'higher',
                    'metric.iteration':
                    n,
                    'extra.metric.warmedup':
                    'true' if n / float(len(samples)) >= 0.5 else 'false',
                    'extra.metric.elapsed-num':
                    e
                } for n, (e, sample) in enumerate(zip(elapsed, samples))]
        else:
            mx.log_error("ERROR:")
            mx.log_error(out.data)

            return [{
                'benchmark': benchmark,
                'metric.name': 'throughput',
                'metric.value': 0,
                'metric.unit': 'op/s',
                'metric.better': 'higher',
                'extra.metric.warmedup': 'true',
                'error': 'failed'
            }]