예제 #1
0
    def run_test(self):
        self.parse_args()
        cmd = self.generate_isolated_script_cmd()

        env = os.environ.copy()

        # Assume we want to set up the sandbox environment variables all the
        # time; doing so is harmless on non-Linux platforms and is needed
        # all the time on Linux.
        env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
        valid = True
        rc = 0
        try:
            env['CHROME_HEADLESS'] = '1'
            if self.options.xvfb:
                return xvfb.run_executable(cmd, env)
            else:
                return run_command(cmd, env=env)

        except Exception:
            rc = 1
            traceback.print_exc()
            valid = False

        if not valid:
            failures = ['(entire test suite)']
            with open(self.options.isolated_script_test_output, 'w') as fp:
                json.dump({
                    'valid': valid,
                    'failures': failures,
                }, fp)

        return rc
예제 #2
0
def run_wrapper(args, cmd, env, stdoutfile=None):
    if args.xvfb:
        return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
    else:
        return test_env.run_command_with_output(cmd,
                                                env=env,
                                                stdoutfile=stdoutfile)
예제 #3
0
    def run_test(self):
        self.parse_args()
        cmd = self.generate_isolated_script_cmd()

        env = os.environ.copy()

        # Assume we want to set up the sandbox environment variables all the
        # time; doing so is harmless on non-Linux platforms and is needed
        # all the time on Linux.
        env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
        valid = True
        try:
            env['CHROME_HEADLESS'] = '1'
            print 'Running command: %s\nwith env: %r' % (' '.join(cmd), env)
            if self.options.xvfb:
                exit_code = xvfb.run_executable(cmd, env)
            else:
                exit_code = test_env.run_command(cmd, env=env)
            print 'Command returned exit code %d' % exit_code
            return exit_code
        except Exception:
            traceback.print_exc()
            valid = False
        finally:
            self.clean_up_after_test_run()

        if not valid:
            failures = ['(entire test suite)']
            with open(self.options.isolated_script_test_output, 'w') as fp:
                json.dump({
                    'valid': valid,
                    'failures': failures,
                }, fp)

        return 1
예제 #4
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--isolated-script-test-output', type=str,
                      required=True)
  parser.add_argument('--xvfb', help='start xvfb', action='store_true')

  # This argument is ignored for now.
  parser.add_argument('--isolated-script-test-chartjson-output', type=str)
  # This argument is ignored for now.
  parser.add_argument('--isolated-script-test-perf-output', type=str)
  # This argument is translated below.
  parser.add_argument('--isolated-script-test-filter', type=str)

  args, rest_args = parser.parse_known_args()

  env = os.environ.copy()
  env['CHROME_HEADLESS'] = '1'
  cmd = [sys.executable] + rest_args
  cmd += ['--write-full-results-to', args.isolated_script_test_output]
  temp_filter_file = None
  try:
    if args.isolated_script_test_filter:
      filter_list = common.extract_filter_list(args.isolated_script_test_filter)
      # Need to dump this to a file in order to use --file-list.
      temp_filter_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
      temp_filter_file.write('\n'.join(filter_list))
      temp_filter_file.close()
      cmd += ['--test-list=' + temp_filter_file.name]
    if args.xvfb:
      return xvfb.run_executable(cmd, env)
    else:
      return common.run_command(cmd, env=env)
  finally:
    if temp_filter_file:
      os.unlink(temp_filter_file.name)
예제 #5
0
def execute_telemetry_benchmark(
    command_generator, output_paths, use_xvfb=False):
  start = time.time()

  env = os.environ.copy()
  env['CHROME_HEADLESS'] = '1'
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

  return_code = 1
  temp_dir = tempfile.mkdtemp('telemetry')
  try:
    command = command_generator.generate(temp_dir)
    if use_xvfb:
      # When running with xvfb, we currently output both to stdout and to the
      # file. It would be better to only output to the file to keep the logs
      # clean.
      return_code = xvfb.run_executable(
          command, env=env, stdoutfile=output_paths.logs)
    else:
      with open(output_paths.logs, 'w') as handle:
        return_code = test_env.run_command_output_to_handle(
            command, handle, env=env)
    expected_results_filename = os.path.join(temp_dir, 'test-results.json')
    if os.path.exists(expected_results_filename):
      shutil.move(expected_results_filename, output_paths.test_results)
    else:
      common.write_interrupted_test_results_to(output_paths.test_results, start)
    expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
    shutil.move(expected_perf_filename, output_paths.perf_results)

    csv_file_path = os.path.join(temp_dir, 'results.csv')
    if os.path.isfile(csv_file_path):
      shutil.move(csv_file_path, output_paths.csv_perf_results)
  except Exception:
    print ('The following exception may have prevented the code from '
           'outputing structured test results and perf results output:')
    print traceback.format_exc()
  finally:
    # Add ignore_errors=True because otherwise rmtree may fail due to leaky
    # processes of tests are still holding opened handles to files under
    # |tempfile_dir|. For example, see crbug.com/865896
    shutil.rmtree(temp_dir, ignore_errors=True)

  print_duration('executing benchmark %s' % command_generator.benchmark, start)

  # Telemetry sets exit code to -1 to indicate that no stories were run. This
  # becomes 255 on linux because linux doesn't support -1 so it does modulo:
  # -1 % 256 == 255.
  # TODO(crbug.com/1019139): Make 111 be the exit code that means
  # "no stories were run.".
  if return_code in (111, -1, 255):
    print ('Exit code %s indicates that no stories were run, so we are marking '
           'this as a success.' % return_code)
    return 0
  if return_code:
    return return_code
  return 0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate()
        if use_xvfb:
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            return_code = test_env.run_command_with_output(
                command, env=env, stdoutfile=output_paths.logs)
        # Get the correct json format from the stdout to write to the perf
        # results file.
        results_processor = generate_legacy_perf_dashboard_json.\
            LegacyResultsProcessor()
        graph_json_string = results_processor.GenerateJsonResults(
            output_paths.logs)
        with open(output_paths.perf_results, 'w') as fh:
            fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    write_legacy_test_results(return_code, output_paths.test_results)
    return return_code
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--isolated-script-test-output', type=str)
  args, _ = parser.parse_known_args()

  if sys.platform == 'win32':
    exe = os.path.join('.', 'flatbuffers_unittests.exe')
  else:
    exe = os.path.join('.', 'flatbuffers_unittests')

  env = os.environ.copy()
  failures = []
  with common.temporary_file() as tempfile_path:
    rc = xvfb.run_executable([exe], env, stdoutfile=tempfile_path)

    # The flatbuffer tests do not really conform to anything parsable, except
    # that they will succeed with "ALL TESTS PASSED".
    with open(tempfile_path) as f:
      output = f.read()
      if output != "ALL TESTS PASSED\n":
        failures = [output]

  if args.isolated_script_test_output:
    with open(args.isolated_script_test_output, 'w') as fp:
      json.dump({'valid': True,'failures': failures}, fp)

  return rc
예제 #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output', type=str)
    parser.add_argument('--isolated-script-test-filter', type=str)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    # Kept for compatiblity.
    # TODO(jmadill): Remove when removed from the recipes. http://crbug.com/954415
    parser.add_argument('--isolated-script-test-perf-output', type=str)

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=' + env['GTEST_TOTAL_SHARDS']]
        env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=' + env['GTEST_SHARD_INDEX']]
        env.pop('GTEST_SHARD_INDEX')

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--isolated-script-test-output=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate(output_paths.benchmark_path)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            with open(output_paths.logs, 'w') as handle:
                try:
                    return_code = test_env.run_command_output_to_handle(
                        command, handle, env=env)
                except OSError as e:
                    print(
                        'Command to run gtest perf test %s failed with an OSError: %s'
                        % (output_paths.name, e))
                    return_code = 1
        if (not os.path.exists(output_paths.perf_results)
                and os.path.exists(output_paths.logs)):
            # Get the correct json format from the stdout to write to the perf
            # results file if gtest does not generate one.
            results_processor = generate_legacy_perf_dashboard_json.\
                LegacyResultsProcessor()
            graph_json_string = results_processor.GenerateJsonResults(
                output_paths.logs)
            with open(output_paths.perf_results, 'w') as fh:
                fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    if os.path.exists(output_paths.perf_results):
        if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
            with path_util.SysPath(path_util.GetTracingDir()):
                # pylint: disable=no-name-in-module
                from tracing.value import gtest_json_converter
                # pylint: enable=no-name-in-module
            gtest_json_converter.ConvertGtestJsonFile(
                output_paths.perf_results)
    else:
        print('ERROR: gtest perf test %s did not generate perf output' %
              output_paths.name)
        return_code = 1
    write_simple_test_results(return_code, output_paths.test_results,
                              output_paths.name)
    return return_code
def execute_telemetry_benchmark(command_generator,
                                output_paths,
                                use_xvfb=False):
    start = time.time()

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    return_code = 1
    temp_dir = tempfile.mkdtemp('telemetry')
    try:
        command = command_generator.generate(temp_dir)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env=env,
                                              stdoutfile=output_paths.logs)
        else:
            return_code = test_env.run_command_with_output(
                command, env=env, stdoutfile=output_paths.logs)
        expected_results_filename = os.path.join(temp_dir, 'test-results.json')
        if os.path.exists(expected_results_filename):
            shutil.move(expected_results_filename, output_paths.test_results)
        else:
            common.write_interrupted_test_results_to(output_paths.test_results,
                                                     start)
        expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
        shutil.move(expected_perf_filename, output_paths.perf_results)

        csv_file_path = os.path.join(temp_dir, 'results.csv')
        if os.path.isfile(csv_file_path):
            shutil.move(csv_file_path, output_paths.csv_perf_results)
    except Exception:
        print(
            'The following exception may have prevented the code from '
            'outputing structured test results and perf results output:')
        print traceback.format_exc()
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(temp_dir, ignore_errors=True)

    print_duration('executing benchmark %s' % command_generator.benchmark,
                   start)

    if return_code:
        return return_code
    return 0
예제 #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if ('--isolated-script-test-chartjson-output' in arg
                or '--isolated-script-test-perf-output' in arg):
            rest_args.pop(index)
            break
        index += 1
    if args.isolated_script_test_filter:
        # This test harness doesn't yet support reading the test list from
        # a file.
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # This harness takes the test names to run as the first arguments.
        # The first argument of rest_args is the script to run, so insert
        # the test names after that.
        rest_args = ([rest_args[0]] + filter_list + ['--exact-test-filter'] +
                     rest_args[1:])

    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    cmd = [sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', args.isolated_script_test_output
    ]
    if args.xvfb:
        return xvfb.run_executable(cmd, env)
    else:
        return common.run_command(cmd, env=env)
예제 #12
0
def execute_gtest_perf_test(args, rest_args):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    output_json = {
        'valid': valid,
        'failures': failures,
    }
    return rc, charts, output_json
예제 #13
0
def _run_and_get_output(args, cmd, env):
    lines = []
    logging.debug(' '.join(cmd))
    with common.temporary_file() as tempfile_path:
        if args.xvfb:
            exit_code = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
        else:
            exit_code = run_command_with_output(cmd, env=env, stdoutfile=tempfile_path, log=True)
        with open(tempfile_path) as f:
            for line in f:
                lines.append(line.strip())
    return exit_code, lines
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)

    args = parser.parse_args(argv)

    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    if sys.platform == 'win32':
        exe = os.path.join('.', 'content_shell.exe')
    elif sys.platform == 'darwin':
        exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',
                           'Content Shell')
    else:
        exe = os.path.join('.', 'content_shell')

    with common.temporary_file() as tempfile_path:
        env['CHROME_HEADLESS'] = '1'
        rc = xvfb.run_executable([
            sys.executable,
            os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',
                         'breakpad_integration_test.py'), '--verbose',
            '--build-dir', '.', '--binary', exe, '--json', tempfile_path
        ], env)

        with open(tempfile_path) as f:
            failures = json.load(f)

    with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': True,
            'failures': failures,
        }, fp)

    return rc
예제 #15
0
def main(argv):
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-perf-output', type=str,
      required=False)

  args = parser.parse_args(argv)

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

  if sys.platform == 'win32':
    exe = os.path.join('.', 'content_shell.exe')
  elif sys.platform == 'darwin':
    exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',
                       'Content Shell')
  else:
    exe = os.path.join('.', 'content_shell')

  with common.temporary_file() as tempfile_path:
    rc = xvfb.run_executable([
        sys.executable,
        os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',
                     'breakpad_integration_test.py'),
        '--verbose',
        '--build-dir', '.',
        '--binary', exe,
        '--json', tempfile_path
    ], env)

    with open(tempfile_path) as f:
      failures = json.load(f)

  with open(args.isolated_script_test_output, 'w') as fp:
    json.dump({
        'valid': True,
        'failures': failures,
    }, fp)

  return rc
예제 #16
0
def _run_and_get_output(args, cmd, env):
    lines = []
    with common.temporary_file() as tempfile_path:
        if args.xvfb:
            ret = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
        else:
            ret = test_env.run_command_with_output(cmd,
                                                   env=env,
                                                   stdoutfile=tempfile_path)
        if ret:
            logging.error('Error running test suite.')
            return None
        with open(tempfile_path) as f:
            for line in f:
                lines.append(line.strip())
    return lines
예제 #17
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--isolated-script-test-output', type=str,
                      required=True)
  parser.add_argument('--xvfb', help='start xvfb', action='store_true')

  # This argument is ignored for now.
  parser.add_argument('--isolated-script-test-chartjson-output', type=str)

  args, rest_args = parser.parse_known_args()

  env = os.environ
  cmd = [sys.executable] + rest_args
  cmd += ['--write-full-results-to', args.isolated_script_test_output]
  if args.xvfb:
    return xvfb.run_executable(cmd, env)
  else:
    return common.run_command(cmd, env=env)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if ('--isolated-script-test-chartjson-output' in arg
                or '--isolated-script-test-perf-output' in arg):
            rest_args.pop(index)
            break
        index += 1

    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    env = os.environ.copy()
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    cmd = [sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', args.isolated_script_test_output
    ]
    if args.xvfb:
        return xvfb.run_executable(cmd, env)
    else:
        return common.run_command(cmd, env=env)
예제 #19
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  # Remove the chartjson extra arg until this script cares about chartjson
  # results from telemetry
  index = 0
  for arg in rest_args:
    if ('--isolated-script-test-chartjson-output' in arg or
        '--isolated-script-test-perf-output' in arg):
      rest_args.pop(index)
      break
    index += 1

  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  env = os.environ.copy()
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  cmd = [sys.executable] + rest_args + sharding_args + [
      '--write-full-results-to', args.isolated_script_test_output]
  if args.xvfb:
    return xvfb.run_executable(cmd, env)
  else:
    return common.run_command(cmd, env=env)
예제 #20
0
    def run_tests(args, tests, extra_flags, env, screenshot_dir):
        for test in tests['traces']:
            with common.temporary_file() as tempfile_path:
                cmd = [
                    args.test_suite,
                    DEFAULT_TEST_PREFIX + test,
                    '--render-test-output-dir=%s' % screenshot_dir,
                    '--one-frame-only',
                ] + extra_flags

                if args.xvfb:
                    rc = xvfb.run_executable(cmd,
                                             env,
                                             stdoutfile=tempfile_path)
                else:
                    rc = test_env.run_command_with_output(
                        cmd, env=env, stdoutfile=tempfile_path)

                pass_fail = 'PASS' if rc == 0 else 'FAIL'
                result_tests[test] = {'expected': 'PASS', 'actual': pass_fail}
                results['num_failures_by_type'][pass_fail] += 1

        return results['num_failures_by_type']['FAIL'] == 0
예제 #21
0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate(output_paths.benchmark_path)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            with open(output_paths.logs, 'w') as handle:
                return_code = test_env.run_command_output_to_handle(command,
                                                                    handle,
                                                                    env=env)
        if not os.path.exists(output_paths.perf_results):
            # Get the correct json format from the stdout to write to the perf
            # results file if gtest does not generate one.
            results_processor = generate_legacy_perf_dashboard_json.\
                LegacyResultsProcessor()
            graph_json_string = results_processor.GenerateJsonResults(
                output_paths.logs)
            with open(output_paths.perf_results, 'w') as fh:
                fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    write_legacy_test_results(return_code, output_paths.test_results)
    return return_code
예제 #22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=argparse.FileType('w'),
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output', required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    valid = True
    num_failures = 0
    chartjson_results_present = '--output-format=chartjson' in rest_args
    chartresults = None
    json_test_results = None

    results = None
    try:
        cmd = [sys.executable] + rest_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env)
        else:
            rc = common.run_command(cmd, env=env)

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        if chartjson_results_present:
            chart_tempfile_name = os.path.join(tempfile_dir,
                                               'results-chart.json')
            with open(chart_tempfile_name) as f:
                chartresults = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    if chartjson_results_present and args.isolated_script_test_chartjson_output:
        chartjson_output_file = \
          open(args.isolated_script_test_chartjson_output, 'w')
        json.dump(chartresults, chartjson_output_file)

    json.dump(json_test_results, args.isolated_script_test_output)
    return rc
def main(argv):
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=False)
  parser.add_argument(
      '--isolated-script-test-perf-output', type=str,
      required=False)
  parser.add_argument(
      '--isolated-script-test-filter', type=str,
      required=False)
  parser.add_argument(
      '--platform', type=str, default=sys.platform, required=False)

  args = parser.parse_args(argv)

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

  additional_args = []
  if args.platform == 'win32':
    exe = os.path.join('.', 'content_shell.exe')
  elif args.platform == 'darwin':
    exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',
                       'Content Shell')
    # The Content Shell binary does not directly link against
    # the Content Shell Framework (it is loaded at runtime). Ensure that
    # symbols are dumped for the Framework too.
    additional_args = [
        '--additional-binary',
        os.path.join('.', 'Content Shell.app', 'Contents', 'Frameworks',
                     'Content Shell Framework.framework', 'Versions',
                     'Current', 'Content Shell Framework')
    ]
  elif args.platform == 'android':
    exe = os.path.join('.', 'lib.unstripped',
                       'libcontent_shell_content_view.so')
  else:
    exe = os.path.join('.', 'content_shell')

  with common.temporary_file() as tempfile_path:
    env['CHROME_HEADLESS'] = '1'
    rc = xvfb.run_executable([
        sys.executable,
        os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',
                     'breakpad_integration_test.py'),
        '--verbose',
        '--build-dir', '.',
        '--binary', exe,
        '--json', tempfile_path,
        '--platform', args.platform,
    ] + additional_args, env)

    with open(tempfile_path) as f:
      failures = json.load(f)

  with open(args.isolated_script_test_output, 'w') as fp:
    json.dump({
        'valid': True,
        'failures': failures,
    }, fp)

  return rc
예제 #24
0
def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False,
                            is_unittest=False):
  start = time.time()

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  env['CHROME_HEADLESS'] = '1'
  #TODO(crbug/1138988): Some gtests do not implements the unit_test_launcher.cc.
  # As a result, they will not respect the arguments added by
  # _generate_shard_args() and will still use the values of GTEST_SHARD_INDEX
  # and GTEST_TOTAL_SHARDS to run part of the tests.
  # Removing those environment variables as a workaround.
  if command_generator._ignore_shard_env_vars:
    if 'GTEST_TOTAL_SHARDS' in env:
      env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
      env.pop('GTEST_SHARD_INDEX')

  return_code = 0
  try:
    command = command_generator.generate(output_paths.benchmark_path)
    if use_xvfb:
      # When running with xvfb, we currently output both to stdout and to the
      # file. It would be better to only output to the file to keep the logs
      # clean.
      return_code = xvfb.run_executable(
          command, env, stdoutfile=output_paths.logs)
    else:
      with open(output_paths.logs, 'w') as handle:
        try:
          return_code = test_env.run_command_output_to_handle(
              command, handle, env=env)
        except OSError as e:
          print('Command to run gtest perf test %s failed with an OSError: %s' %
                (output_paths.name, e))
          return_code = 1
    if (not os.path.exists(output_paths.perf_results) and
        os.path.exists(output_paths.logs)):
      # Get the correct json format from the stdout to write to the perf
      # results file if gtest does not generate one.
      results_processor = generate_legacy_perf_dashboard_json.\
          LegacyResultsProcessor()
      graph_json_string = results_processor.GenerateJsonResults(
          output_paths.logs)
      with open(output_paths.perf_results, 'w') as fh:
        fh.write(graph_json_string)
  except Exception:
    traceback.print_exc()
    return_code = 1
  if os.path.exists(output_paths.perf_results):
    if command_generator.executable_name in GTEST_CONVERSION_WHITELIST:
      with path_util.SysPath(path_util.GetTracingDir()):
        # pylint: disable=no-name-in-module
        from tracing.value import gtest_json_converter
        # pylint: enable=no-name-in-module
      gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results)
  else:
    print('ERROR: gtest perf test %s did not generate perf output' %
          output_paths.name)
    return_code = 1
  write_simple_test_results(return_code, output_paths.test_results,
                            output_paths.name)
  if not is_unittest:
    upload_simple_test_results(return_code, output_paths.name)

  print_duration(
      'executing gtest %s' % command_generator.executable_name, start)

  return return_code
예제 #25
0
def run_benchmark(args, rest_args, histogram_results):
    """  Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args = cmd_args + ['--story-filter=' + filter_regex]
    try:
        cmd = [sys.executable] + cmd_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log
def run_benchmark(args, rest_args):
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    valid = True
    num_failures = 0
    histogram_results_present = 'histograms' in args.output_format
    chartjson_results_present = 'chartjson' in args.output_format
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args = cmd_args + ['--story-filter=' + filter_regex]
    try:
        cmd = [sys.executable] + cmd_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env)
        else:
            rc = common.run_command(cmd, env=env)

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        if histogram_results_present:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        elif chartjson_results_present:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
        else:
            tempfile_name = None

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results
예제 #27
0
def execute_telemetry_benchmark_helper(args, rest_args, histogram_results):
    """Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    # TODO(crbug.com/920002): These arguments cannot go into
    # run_performance_tests.py because
    # run_gtest_perf_tests.py does not yet support them. Note that ideally
    # we would use common.BaseIsolatedScriptArgsAdapter, but this will take
    # a good deal of refactoring to accomplish.
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-repeat',
                        type=int,
                        required=False)
    parser.add_argument(
        '--isolated-script-test-launcher-retry-limit',
        type=int,
        required=False,
        choices=[0
                 ])  # Telemetry does not support retries. crbug.com/894254#c21
    parser.add_argument('--isolated-script-test-also-run-disabled-tests',
                        default=False,
                        action='store_true',
                        required=False)
    # Parse leftover args not already parsed in run_performance_tests.py or in
    # main().
    args, rest_args = parser.parse_known_args(args=rest_args, namespace=args)

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args.append('--story-filter=' + filter_regex)
    if args.isolated_script_test_repeat:
        cmd_args.append('--pageset-repeat=' +
                        str(args.isolated_script_test_repeat))
    if args.isolated_script_test_also_run_disabled_tests:
        cmd_args.append('--also-run-disabled-tests')
    cmd_args.append('--output-dir=' + tempfile_dir)
    cmd_args.append('--output-format=json-test-results')
    cmd = [sys.executable] + cmd_args
    rc = 1  # Set default returncode in case there is an exception.
    try:
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(tempfile_dir, ignore_errors=True)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log
예제 #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    # total_shards = None
    # shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=%d' % env['GTEST_TOTAL_SHARDS']]
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=%d' % env['GTEST_SHARD_INDEX']]

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--results-file=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_perf_output:
            extra_flags.append('--histogram-json-file=%s' %
                               args.isolated_script_test_perf_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
예제 #29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, rest_args = parser.parse_known_args()

    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
            # TODO(eakuefner): Make isolated_script_test_perf_output mandatory after
            # flipping flag in swarming.
            if args.isolated_script_test_perf_output:
                filename = args.isolated_script_test_perf_output
            else:
                filename = args.isolated_script_test_chartjson_output
            # Write the returned encoded json to a the charts output file
            with open(filename, 'w') as f:
                f.write(charts)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': valid,
            'failures': failures,
        }, fp)

    return rc