Beispiel #1
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--isolated-script-test-output', type=str,
                      required=True)
  parser.add_argument('--xvfb', help='start xvfb', action='store_true')

  # This argument is ignored for now.
  parser.add_argument('--isolated-script-test-chartjson-output', type=str)
  # This argument is ignored for now.
  parser.add_argument('--isolated-script-test-perf-output', type=str)
  # This argument is translated below.
  parser.add_argument('--isolated-script-test-filter', type=str)

  args, rest_args = parser.parse_known_args()

  env = os.environ.copy()
  env['CHROME_HEADLESS'] = '1'
  cmd = [sys.executable] + rest_args
  cmd += ['--write-full-results-to', args.isolated_script_test_output]
  temp_filter_file = None
  try:
    if args.isolated_script_test_filter:
      filter_list = common.extract_filter_list(args.isolated_script_test_filter)
      # Need to dump this to a file in order to use --file-list.
      temp_filter_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
      temp_filter_file.write('\n'.join(filter_list))
      temp_filter_file.close()
      cmd += ['--test-list=' + temp_filter_file.name]
    if args.xvfb:
      return xvfb.run_executable(cmd, env)
    else:
      return common.run_command(cmd, env=env)
  finally:
    if temp_filter_file:
      os.unlink(temp_filter_file.name)
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output', type=str)
    parser.add_argument('--isolated-script-test-filter', type=str)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    # Kept for compatiblity.
    # TODO(jmadill): Remove when removed from the recipes. http://crbug.com/954415
    parser.add_argument('--isolated-script-test-perf-output', type=str)

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=' + env['GTEST_TOTAL_SHARDS']]
        env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=' + env['GTEST_SHARD_INDEX']]
        env.pop('GTEST_SHARD_INDEX')

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--isolated-script-test-output=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
 def _generate_filter_args(self):
     if self._options.isolated_script_test_filter:
         filter_list = common.extract_filter_list(
             self._options.isolated_script_test_filter)
         # Need to convert this to a valid regex.
         filter_regex = '(' + '|'.join(filter_list) + ')'
         return ['--story-filter=' + filter_regex]
     return []
 def generate_test_filter_args(self, test_filter_str):
     filter_list = common.extract_filter_list(test_filter_str)
     # isolated_script_test_filter comes in like:
     #   gpu_tests.webgl_conformance_integration_test.WebGLConformanceIntegrationTest.WebglExtension_WEBGL_depth_texture  # pylint: disable=line-too-long
     # but we need to pass it to --test-filter like this:
     #   WebglExtension_WEBGL_depth_texture
     filter_list = [f.split('.')[-1] for f in filter_list]
     # Need to convert this to a valid regex.
     filter_regex = '(' + '|'.join(filter_list) + ')'
     return ['--test-filter=%s' % filter_regex]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if ('--isolated-script-test-chartjson-output' in arg
                or '--isolated-script-test-perf-output' in arg):
            rest_args.pop(index)
            break
        index += 1
    if args.isolated_script_test_filter:
        # This test harness doesn't yet support reading the test list from
        # a file.
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # This harness takes the test names to run as the first arguments.
        # The first argument of rest_args is the script to run, so insert
        # the test names after that.
        rest_args = ([rest_args[0]] + filter_list + ['--exact-test-filter'] +
                     rest_args[1:])

    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    cmd = [sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', args.isolated_script_test_output
    ]
    if args.xvfb:
        return xvfb.run_executable(cmd, env)
    else:
        return common.run_command(cmd, env=env)
Beispiel #6
0
def execute_gtest_perf_test(args, rest_args):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    output_json = {
        'valid': valid,
        'failures': failures,
    }
    return rc, charts, output_json
    def generate_test_filter_args(self, test_filter_str):
        filter_list = common.extract_filter_list(test_filter_str)
        self._temp_filter_file = tempfile.NamedTemporaryFile(mode='w',
                                                             delete=False)
        self._temp_filter_file.write('\n'.join(filter_list))
        self._temp_filter_file.close()
        arg_name = 'test-list'
        if any(r in self.rest_args[0] for r in KNOWN_TYP_TEST_RUNNERS):
            arg_name = 'file-list'

        return ['--%s=' % arg_name + self._temp_filter_file.name]
 def _generate_filter_args(self):
   if self._options.isolated_script_test_filter:
     filter_list = common.extract_filter_list(
         self._options.isolated_script_test_filter)
     filter_arguments = [_TelemetryFilterArgument(f) for f in filter_list]
     applicable_stories = [
         f.story for f in filter_arguments if f.benchmark == self.benchmark]
     # Need to convert this to a valid regex.
     filter_regex = '(' + '|'.join(applicable_stories) + ')'
     return ['--story-filter=' + filter_regex]
   return []
 def _generate_filter_args(self):
   if self._options.isolated_script_test_filter:
     filter_list = common.extract_filter_list(
       self._options.isolated_script_test_filter)
     return ['--gtest_filter=' + ':'.join(filter_list)]
   return []
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, rest_args = parser.parse_known_args()

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

    try:
        rc = 0
        try:
            executable = rest_args[0]
            extra_flags = []
            if len(rest_args) > 1:
                extra_flags = rest_args[1:]

            # These flags are to make sure that test output perf metrics in the log.
            if not '--verbose' in extra_flags:
                extra_flags.append('--verbose')
            if not '--test-launcher-print-test-stdio=always' in extra_flags:
                extra_flags.append('--test-launcher-print-test-stdio=always')
            if args.isolated_script_test_filter:
                filter_list = common.extract_filter_list(
                    args.isolated_script_test_filter)
                extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

            if IsWindows():
                executable = '.\%s.exe' % executable
            else:
                executable = './%s' % executable
            with common.temporary_file() as tempfile_path:
                env['CHROME_HEADLESS'] = '1'
                rc = common.run_command_with_output([executable] + extra_flags,
                                                    env=env,
                                                    stdoutfile=tempfile_path)
                # Now get the correct json format from the stdout to write to the
                # perf results file
                results_processor = (generate_legacy_perf_dashboard_json.
                                     LegacyResultsProcessor())
                charts = results_processor.GenerateJsonResults(tempfile_path)
                # TODO(eakuefner): Make isolated_script_test_perf_output mandatory
                # after flipping flag in swarming.
                if args.isolated_script_test_perf_output:
                    filename = args.isolated_script_test_perf_output
                else:
                    filename = args.isolated_script_test_chartjson_output
                # Write the returned encoded json to a the charts output file
                with open(filename, 'w') as f:
                    f.write(charts)
        except Exception:
            traceback.print_exc()
            rc = 1

        valid = (rc == 0)
        failures = [] if valid else ['(entire test suite)']
        with open(args.isolated_script_test_output, 'w') as fp:
            json.dump({
                'valid': valid,
                'failures': failures,
            }, fp)

        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
Beispiel #11
0
def run_benchmark(args, rest_args, histogram_results):
    """  Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args = cmd_args + ['--story-filter=' + filter_regex]
    try:
        cmd = [sys.executable] + cmd_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log
def run_benchmark(args, rest_args):
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    valid = True
    num_failures = 0
    histogram_results_present = 'histograms' in args.output_format
    chartjson_results_present = 'chartjson' in args.output_format
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args = cmd_args + ['--story-filter=' + filter_regex]
    try:
        cmd = [sys.executable] + cmd_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env)
        else:
            rc = common.run_command(cmd, env=env)

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        if histogram_results_present:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        elif chartjson_results_present:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
        else:
            tempfile_name = None

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if ('--isolated-script-test-chartjson-output' in arg
                or '--isolated-script-test-perf-output' in arg):
            rest_args.pop(index)
            break
        index += 1
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        rest_args.append('--test-filter=' + filter_regex)

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    try:
        valid = True
        rc = 0
        try:
            env['CHROME_HEADLESS'] = '1'
            rc = common.run_command(
                [sys.executable] + rest_args + sharding_args +
                ['--write-full-results-to', args.isolated_script_test_output],
                env=env)
        except Exception:
            traceback.print_exc()
            valid = False

        if not valid:
            failures = ['(entire test suite)']
            with open(args.isolated_script_test_output, 'w') as fp:
                json.dump({
                    'valid': valid,
                    'failures': failures,
                }, fp)

        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
Beispiel #14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    # total_shards = None
    # shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=%d' % env['GTEST_TOTAL_SHARDS']]
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=%d' % env['GTEST_SHARD_INDEX']]

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--results-file=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_perf_output:
            extra_flags.append('--histogram-json-file=%s' %
                               args.isolated_script_test_perf_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
Beispiel #15
0
def execute_telemetry_benchmark_helper(args, rest_args, histogram_results):
    """Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    # TODO(crbug.com/920002): These arguments cannot go into
    # run_performance_tests.py because
    # run_gtest_perf_tests.py does not yet support them. Note that ideally
    # we would use common.BaseIsolatedScriptArgsAdapter, but this will take
    # a good deal of refactoring to accomplish.
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-repeat',
                        type=int,
                        required=False)
    parser.add_argument(
        '--isolated-script-test-launcher-retry-limit',
        type=int,
        required=False,
        choices=[0
                 ])  # Telemetry does not support retries. crbug.com/894254#c21
    parser.add_argument('--isolated-script-test-also-run-disabled-tests',
                        default=False,
                        action='store_true',
                        required=False)
    # Parse leftover args not already parsed in run_performance_tests.py or in
    # main().
    args, rest_args = parser.parse_known_args(args=rest_args, namespace=args)

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args.append('--story-filter=' + filter_regex)
    if args.isolated_script_test_repeat:
        cmd_args.append('--pageset-repeat=' +
                        str(args.isolated_script_test_repeat))
    if args.isolated_script_test_also_run_disabled_tests:
        cmd_args.append('--also-run-disabled-tests')
    cmd_args.append('--output-dir=' + tempfile_dir)
    cmd_args.append('--output-format=json-test-results')
    cmd = [sys.executable] + cmd_args
    rc = 1  # Set default returncode in case there is an exception.
    try:
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(tempfile_dir, ignore_errors=True)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log