Пример #1
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output', required=True)
    parser.add_argument('args', nargs=argparse.REMAINDER)

    args = parser.parse_args(argv)

    passthrough_args = args.args
    if passthrough_args[0] == '--':
        passthrough_args = passthrough_args[1:]

    results = {}

    for filename in os.listdir(common.SCRIPT_DIR):
        if not filename.endswith('.py'):
            continue
        if filename in ('common.py', 'get_compile_targets.py'):
            continue

        with common.temporary_file() as tempfile_path:
            rc = common.run_command(
                [sys.executable,
                 os.path.join(common.SCRIPT_DIR, filename)] +
                passthrough_args +
                ['compile_targets', '--output', tempfile_path])
            if rc != 0:
                return rc

            with open(tempfile_path) as f:
                results[filename] = json.load(f)

    with open(args.output, 'w') as f:
        json.dump(results, f)

    return 0
Пример #2
0
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    test_args = ['--retry-limit', '3']
    if 'android' == args.properties.get('target_platform'):
        test_args += ['--browser', 'android-chromium', '--device', 'android']
    else:
        test_args += ['--browser', args.build_config_fs.lower()]

    with common.temporary_file() as tempfile_path:
        test_args += ['--write-full-results-to', tempfile_path]
        rc = common.run_runtest(args, [
            '--test-type', 'telemetry_perf_unittests', '--run-python-script',
            os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests')
        ] + test_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    parsed_results = common.parse_common_test_results(results,
                                                      test_separator='.')
    failures = parsed_results['unexpected_failures']

    json.dump(
        {
            'valid':
            bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                 ((rc == 0) or failures)),
            'failures':
            failures.keys(),
        }, args.output)

    return rc
Пример #3
0
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results,
               test_results):
    keys = get_skia_gold_keys(args)

    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(
            args)
        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
            skia_gold_temp_dir, gold_properties)
        gold_session = gold_session_manager.GetSkiaGoldSession(keys)

        traces = [trace.split(' ')[0] for trace in tests]
        for test in traces:

            # Apply test filter if present.
            if args.isolated_script_test_filter:
                full_name = 'angle_restricted_trace_gold_tests.%s' % test
                if not fnmatch.fnmatch(full_name,
                                       args.isolated_script_test_filter):
                    logging.info(
                        'Skipping test %s because it does not match filter %s'
                        % (full_name, args.isolated_script_test_filter))
                    continue

            with common.temporary_file() as tempfile_path:
                cmd = [
                    args.test_suite,
                    DEFAULT_TEST_PREFIX + test,
                    '--render-test-output-dir=%s' % screenshot_dir,
                    '--one-frame-only',
                    '--verbose-logging',
                ] + extra_flags

                result = None
                for iteration in range(0, args.flaky_retries + 1):
                    if result != PASS:
                        if iteration > 0:
                            logging.info('Retrying flaky test: "%s"...' % test)
                        result = PASS if run_wrapper(
                            args, cmd, env, tempfile_path) == 0 else FAIL

                artifacts = {}

                if result == PASS:
                    result = upload_test_result_to_skia_gold(
                        args, gold_session_manager, gold_session,
                        gold_properties, screenshot_dir, test, artifacts)

                expected_result = SKIP if result == SKIP else PASS
                test_results[test] = {
                    'expected': expected_result,
                    'actual': result
                }
                if result == FAIL:
                    test_results[test]['is_unexpected'] = True
                if len(artifacts) > 0:
                    test_results[test]['artifacts'] = artifacts
                results['num_failures_by_type'][result] += 1

        return results['num_failures_by_type'][FAIL] == 0
Пример #4
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            'vpython',
            os.path.join(common.SRC_DIR, 'testing', 'test_env.py'),
            os.path.join(common.SRC_DIR, 'tools', 'metrics',
                         'metrics_python_tests.py'),
            '--isolated-script-test-output',
            tempfile_path,
            '--skip-set-lpac-acls=1',
        ],
                                cwd=os.path.join(common.SRC_DIR, 'out',
                                                 args.build_config_fs))

        with open(tempfile_path) as f:
            isolated_results = json.load(f)

    results = common.parse_common_test_results(isolated_results,
                                               test_separator='.')

    failures = [
        '%s: %s' % (k, v) for k, v in results['unexpected_failures'].items()
    ]
    common.record_local_script_results('metrics_python_tests', args.output,
                                       failures, True)

    return rc
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--test-launcher-summary-output',
                        type=argparse.FileType('w'),
                        required=True)
    args, rest_args = parser.parse_known_args()
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([sys.executable] + rest_args + [
            '--write-full-results-to',
            tempfile_path,
        ])
        with open(tempfile_path) as f:
            results = json.load(f)
        parsed_results = common.parse_common_test_results(results,
                                                          test_separator='.')
        failures = parsed_results['unexpected_failures']

        json.dump(
            {
                'valid':
                bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                     ((rc == 0) or failures)),
                'failures':
                failures.keys(),
            }, args.test_launcher_summary_output)

    return rc
Пример #6
0
def get_device_info(args, failures):
  """Parses the device info for each attached device, and returns a summary
  of the device info and any mismatches.

  Returns:
    A dict indicating the result.
  """
  if not is_linux():
    return {}

  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        sys.executable,
        os.path.join(args.paths['checkout'],
                     'third_party',
                     'catapult',
                     'devil',
                     'devil',
                     'android',
                     'tools',
                     'device_status.py'),
        '--json-output', tempfile_path,
        '--blacklist-file', os.path.join(
            args.paths['checkout'], 'out', 'bad_devices.json')])

    if rc:
      failures.append('device_status')
      return {}

    with open(tempfile_path, 'r') as src:
      device_info = json.load(src)

  results = {}
  results['devices'] = sorted(v['serial'] for v in device_info)

  details = [
      v['ro.build.fingerprint'] for v in device_info if not v['blacklisted']]

  def unique_build_details(index):
    return sorted(list(set([v.split(':')[index] for v in details])))

  parsed_details = {
    'device_names': unique_build_details(0),
    'build_versions': unique_build_details(1),
    'build_types': unique_build_details(2),
  }

  for k, v in parsed_details.iteritems():
    if len(v) == 1:
      results[k] = v[0]
    else:
      results[k] = 'MISMATCH'
      results['%s_list' % k] = v
      failures.append(k)

  for v in device_info:
    if v['blacklisted']:
      failures.append('Device %s blacklisted' % v['serial'])

  return results
Пример #7
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            sys.executable,
            os.path.join(common.SRC_DIR, 'build', 'check_gn_headers.py'),
            '--out-dir',
            os.path.join(args.paths['checkout'], 'out', args.build_config_fs),
            '--whitelist',
            os.path.join(common.SRC_DIR, 'build',
                         'check_gn_headers_whitelist.txt'),
            '--json',
            tempfile_path,
            '--verbose',
        ],
                                cwd=common.SRC_DIR)

        with open(tempfile_path) as f:
            failures = json.load(f)

    json.dump({
        'valid': True,
        'failures': failures,
    }, args.output)

    return rc
Пример #8
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            sys.executable,
            os.path.join(common.SRC_DIR, 'third_party', 'WebKit', 'Tools',
                         'Scripts', 'test-webkitpy'),
            '--write-full-results-to',
            tempfile_path,
        ],
                                cwd=args.paths['checkout'])

        with open(tempfile_path) as f:
            results = json.load(f)

    parsed_results = common.parse_common_test_results(results)
    failures = parsed_results['unexpected_failures']

    json.dump(
        {
            'valid':
            bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                 ((rc == 0) or failures)),
            'failures':
            failures.keys(),
        }, args.output)

    return rc
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    perf_id = args.properties.get("perf-id")
    script_args = args.args
    if IsWindows():
        script_args[0] += ".exe"
    test_suite = script_args[0]

    with common.temporary_file() as tempfile_path:
        gtest_args = [
            "--target",
            args.build_config_fs,
            "--annotate",
            "graphing",
            "--perf-id",
            perf_id,
            "--perf-dashboard-id",
            test_suite,
            "--results-url",
            args.properties.get("results-url"),
            "--slave-name",
            args.properties.get("slavename"),
            "--builder-name",
            args.properties.get("buildername"),
            "--build-number",
            str(args.properties.get("buildnumber")),
            "--log-processor-output-file",
            tempfile_path,
            "--test-type",
            test_suite,
        ]

        if "android" == args.properties.get("target_platform"):
            gtest_args.extend(
                [
                    "--no-xvfb",
                    "--run-python-script",
                    os.path.join(args.paths["checkout"], "build", "android", "test_runner.py"),
                    "gtest",
                    "--release",
                    "--suite",
                    test_suite,
                    "--verbose",
                ]
            )
        else:
            gtest_args.extend(["--xvfb"])
            gtest_args.extend(script_args)

        rc = common.run_runtest(args, gtest_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({"valid": bool(rc == 0), "failures": results["failed"]}, args.output)

    return rc
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  test_args = ['--retry-limit', '3']
  if 'android' == args.properties.get('target_platform'):
    test_args += ['--browser', 'android-chrome-shell', '--device', 'android']
  else:
    test_args += ['--browser', args.build_config_fs.lower()]

  with common.temporary_file() as tempfile_path:
    test_args += ['--write-full-results-to', tempfile_path]
    rc = common.run_runtest(args, [
        '--test-type', 'telemetry_perf_unittests',
        '--run-python-script',
        os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests')
    ] + test_args + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
Пример #11
0
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  script_args = args.args
  test_suite = script_args[0]

  with common.temporary_file() as tempfile_path:
    cmd = [
        os.path.join(
            args.paths['checkout'], 'build', 'android', 'test_runner.py'),
        'gtest',
        '--release' if 'release' in args.build_config_fs.lower() else '--debug',
        '--suite', test_suite,
        '--verbose',
        '--flakiness-dashboard-server=http://test-results.appspot.com',
        '--json-results-file', tempfile_path,
    ]
    if filter_tests:
      cmd.extend(['--gtest-filter', ':'.join(filter_tests)])

    rc = common.run_command(cmd)

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_gtest_test_results(results)

  json.dump({
      'valid': True,
      'failures': parsed_results['failures'],
  }, args.output)

  return rc
Пример #12
0
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    perf_id = args.properties.get('perf-id')
    script_args = args.args
    test_suite = script_args[0]
    if IsWindows():
        script_args[0] += '.exe'

    with common.temporary_file() as tempfile_path:
        gtest_args = [
            '--target',
            args.build_config_fs,
            '--annotate',
            'graphing',
            '--perf-id',
            perf_id,
            '--perf-dashboard-id',
            test_suite,
            '--results-url',
            args.properties.get('results-url'),
            '--slave-name',
            args.properties.get('slavename'),
            '--builder-name',
            args.properties.get('buildername'),
            '--build-number',
            str(args.properties.get('buildnumber')),
            '--log-processor-output-file',
            tempfile_path,
            '--test-type',
            test_suite,
        ]

        if 'android' == args.properties.get('target_platform'):
            gtest_args.extend([
                '--no-xvfb',
                '--run-python-script',
                os.path.join(args.paths['checkout'], 'out',
                             args.build_config_fs, 'bin',
                             'run_%s' % test_suite),
                '--verbose',
            ])
            gtest_args.extend(script_args[1:])
        else:
            gtest_args.extend(['--xvfb'])
            gtest_args.extend(script_args)

        rc = common.run_runtest(args, gtest_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({
        'valid': bool(rc == 0),
        'failures': results['failed'],
    }, args.output)

    return rc
Пример #13
0
def main_run(args):
    typ_path = os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
                     'third_party', 'catapult', 'third_party', 'typ'))
    _AddToPathIfNeeded(typ_path)
    import typ

    top_level_dir = os.path.join(common.SRC_DIR, 'headless', 'lib', 'browser',
                                 'devtools_api')
    with common.temporary_file() as tempfile_path:
        rc = typ.main(argv=[],
                      top_level_dir=top_level_dir,
                      write_full_results_to=tempfile_path,
                      coverage_source=[top_level_dir])

        with open(tempfile_path) as f:
            results = json.load(f)

    parsed_results = common.parse_common_test_results(results,
                                                      test_separator='.')
    failures = parsed_results['unexpected_failures']

    json.dump(
        {
            'valid':
            bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                 ((rc == 0) or failures)),
            'failures':
            failures.keys(),
        }, args.output)

    return rc
Пример #14
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("--output", required=True)
    parser.add_argument("args", nargs=argparse.REMAINDER)

    args = parser.parse_args(argv)

    passthrough_args = args.args
    if passthrough_args[0] == "--":
        passthrough_args = passthrough_args[1:]

    results = {}

    for filename in os.listdir(common.SCRIPT_DIR):
        if not filename.endswith(".py"):
            continue
        if filename in ("common.py", "get_compile_targets.py"):
            continue

        with common.temporary_file() as tempfile_path:
            rc = common.run_command(
                [sys.executable, os.path.join(common.SCRIPT_DIR, filename)]
                + passthrough_args
                + ["compile_targets", "--output", tempfile_path]
            )
            if rc != 0:
                return rc

            with open(tempfile_path) as f:
                results[filename] = json.load(f)

    with open(args.output, "w") as f:
        json.dump(results, f)

    return 0
Пример #15
0
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  with common.temporary_file() as tempfile_path:
    rc = common.run_runtest(args, [
        '--annotate', 'gtest',
        '--test-type', 'telemetry_unittests',
        '--run-python-script',
        os.path.join(common.SRC_DIR, 'tools', 'telemetry', 'run_tests'),
        '--browser', args.build_config_fs.lower(),
        '--retry-limit', '3',
        '--write-full-results-to', tempfile_path,
    ] + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
Пример #16
0
def main_run(args):
    runner = os.path.join(common.SRC_DIR, 'mojo', 'tools', 'apptest_runner.py')
    build_dir = os.path.join(common.SRC_DIR, 'out', args.build_config_fs)

    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            runner, build_dir, '--verbose', '--write-full-results-to',
            tempfile_path
        ])
        with open(tempfile_path) as f:
            results = json.load(f)

    parsed_results = common.parse_common_test_results(results,
                                                      test_separator='.')
    failures = parsed_results['unexpected_failures']

    json.dump(
        {
            'valid':
            bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                 ((rc == 0) or failures)),
            'failures':
            failures.keys(),
        }, args.output)

    return rc
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--isolated-script-test-output', type=str)
  args, _ = parser.parse_known_args()

  if sys.platform == 'win32':
    exe = os.path.join('.', 'flatbuffers_unittests.exe')
  else:
    exe = os.path.join('.', 'flatbuffers_unittests')

  env = os.environ.copy()
  failures = []
  with common.temporary_file() as tempfile_path:
    rc = xvfb.run_executable([exe], env, stdoutfile=tempfile_path)

    # The flatbuffer tests do not really conform to anything parsable, except
    # that they will succeed with "ALL TESTS PASSED".
    with open(tempfile_path) as f:
      output = f.read()
      if output != "ALL TESTS PASSED\n":
        failures = [output]

  if args.isolated_script_test_output:
    with open(args.isolated_script_test_output, 'w') as fp:
      json.dump({'valid': True,'failures': failures}, fp)

  return rc
Пример #18
0
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            sys.executable,
            os.path.join(common.SRC_DIR, 'chrome', 'test',
                         'nacl_test_injection',
                         'buildbot_nacl_integration.py'),
            '--mode',
            args.build_config_fs,
            '--json_build_results_output_file',
            tempfile_path,
        ] + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({
        'valid': True,
        'failures': [f['raw_name'] for f in results],
    }, args.output)

    return rc
Пример #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--isolated-script-test-output", type=argparse.FileType("w"), required=True)
    parser.add_argument("--xvfb", help="Start xvfb.", action="store_true")
    args, rest_args = parser.parse_known_args()
    xvfb_proc = None
    openbox_proc = None
    env = os.environ.copy()
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir=".")
        assert xvfb_proc and openbox_proc, "Failed to start xvfb"
    try:
        with common.temporary_file() as tempfile_path:
            rc = common.run_command([sys.executable] + rest_args + ["--write-full-results-to", tempfile_path], env=env)
            with open(tempfile_path) as f:
                results = json.load(f)
            parsed_results = common.parse_common_test_results(results, test_separator=".")
            failures = parsed_results["unexpected_failures"]

            json.dump(
                {
                    "valid": bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)),
                    "failures": failures.keys(),
                },
                args.isolated_script_test_output,
            )

        return rc
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
def main_run(args):
  typ_path = os.path.abspath(os.path.join(
      os.path.dirname(__file__), os.path.pardir, os.path.pardir,
      'third_party', 'typ'))
  _AddToPathIfNeeded(typ_path)
  import typ

  top_level_dir = os.path.join(
      common.SRC_DIR, 'headless', 'lib', 'browser', 'devtools_api')
  with common.temporary_file() as tempfile_path:
    rc = typ.main(
        argv=[],
        top_level_dir=top_level_dir,
        write_full_results_to=tempfile_path,
        coverage_source=[top_level_dir])

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
Пример #21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output', type=str)
    parser.add_argument('--isolated-script-test-filter', type=str)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    # Kept for compatiblity.
    # TODO(jmadill): Remove when removed from the recipes. http://crbug.com/954415
    parser.add_argument('--isolated-script-test-perf-output', type=str)

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=' + env['GTEST_TOTAL_SHARDS']]
        env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=' + env['GTEST_SHARD_INDEX']]
        env.pop('GTEST_SHARD_INDEX')

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--isolated-script-test-output=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
Пример #22
0
def execute_gtest_perf_test(args, rest_args):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    output_json = {
        'valid': valid,
        'failures': failures,
    }
    return rc, charts, output_json
Пример #23
0
def _run_and_get_output(args, cmd, env):
    lines = []
    logging.debug(' '.join(cmd))
    with common.temporary_file() as tempfile_path:
        if args.xvfb:
            exit_code = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
        else:
            exit_code = run_command_with_output(cmd, env=env, stdoutfile=tempfile_path, log=True)
        with open(tempfile_path) as f:
            for line in f:
                lines.append(line.strip())
    return exit_code, lines
Пример #24
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_runtest(args, [
            '--test-type', 'sizes', '--run-python-script',
            os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy',
                         'scripts', 'slave', 'chromium', 'sizes.py'), '--json',
            tempfile_path
        ])
        with open(tempfile_path) as f:
            results = json.load(f)

    with open(
            os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
                         'perf_expectations.json')) as f:
        perf_expectations = json.load(f)

    prefix = args.args[0]

    valid = (rc == 0)
    failures = []

    for name, result in results.iteritems():
        fqtn = '%s/%s/%s' % (prefix, name, result['identifier'])
        if fqtn not in perf_expectations:
            continue

        if perf_expectations[fqtn]['type'] != 'absolute':
            print 'ERROR: perf expectation %r is not yet supported' % fqtn
            valid = False
            continue

        actual = result['value']
        expected = perf_expectations[fqtn]['regress']
        better = perf_expectations[fqtn]['better']
        check_result = ((actual <= expected) if better == 'lower' else
                        (actual >= expected))

        if not check_result:
            failures.append(fqtn)
            print 'FAILED %s: actual %s, expected %s, better %s' % (
                fqtn, actual, expected, better)

    json.dump({
        'valid': valid,
        'failures': failures,
    }, args.output)

    # sizes.py itself doesn't fail on regressions.
    if failures and rc == 0:
        rc = 1

    return rc
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  try:
    with common.temporary_file() as tempfile_path:
      rc = common.run_command([sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', tempfile_path,
      ], env=env)
      with open(tempfile_path) as f:
        results = json.load(f)
      parsed_results = common.parse_common_test_results(results,
                                                        test_separator='.')
      failures = parsed_results['unexpected_failures']

      json.dump({
          'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                        ((rc == 0) or failures)),
          'failures': failures.keys(),
      }, args.isolated_script_test_output)

    return rc
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
Пример #26
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_runtest(args, [
        '--test-type', 'sizes',
        '--run-python-script',
        os.path.join(
            common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts', 'slave',
            'chromium', 'sizes.py'),
        '--json', tempfile_path])
    with open(tempfile_path) as f:
      results = json.load(f)

  with open(os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
                         'perf_expectations.json')) as f:
    perf_expectations = json.load(f)

  prefix = args.args[0]

  valid = (rc == 0)
  failures = []

  for name, result in results.iteritems():
    fqtn = '%s/%s/%s' % (prefix, name, result['identifier'])
    if fqtn not in perf_expectations:
      continue

    if perf_expectations[fqtn]['type'] != 'absolute':
      print 'ERROR: perf expectation %r is not yet supported' % fqtn
      valid = False
      continue

    actual = result['value']
    expected = perf_expectations[fqtn]['regress']
    better = perf_expectations[fqtn]['better']
    check_result = ((actual <= expected) if better == 'lower'
                    else (actual >= expected))

    if not check_result:
      failures.append(fqtn)
      print 'FAILED %s: actual %s, expected %s, better %s' % (
          fqtn, actual, expected, better)

  json.dump({
      'valid': valid,
      'failures': failures,
  }, args.output)

  # sizes.py itself doesn't fail on regressions.
  if failures and rc == 0:
    rc = 1

  return rc
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)

    args = parser.parse_args(argv)

    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    if sys.platform == 'win32':
        exe = os.path.join('.', 'content_shell.exe')
    elif sys.platform == 'darwin':
        exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',
                           'Content Shell')
    else:
        exe = os.path.join('.', 'content_shell')

    with common.temporary_file() as tempfile_path:
        env['CHROME_HEADLESS'] = '1'
        rc = xvfb.run_executable([
            sys.executable,
            os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',
                         'breakpad_integration_test.py'), '--verbose',
            '--build-dir', '.', '--binary', exe, '--json', tempfile_path
        ], env)

        with open(tempfile_path) as f:
            failures = json.load(f)

    with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': True,
            'failures': failures,
        }, fp)

    return rc
Пример #28
0
def main_run(args):
    if not sys.platform.startswith('win'):
        json.dump(
            {
                'valid': False,
                'failures': ['This script should only be called on win32.'],
            }, args.output)

    with common.temporary_file() as tempfile_path:
        syzyasan_integration_test_res = common.run_integration_test(
            SYZYASAN_INTEGRATION_TEST, ['--log-to-json', tempfile_path],
            tempfile_path, args.output)

    return syzyasan_integration_test_res
Пример #29
0
def main_run(args):
  if not sys.platform.startswith('win'):
    json.dump({
        'valid': False,
        'failures': ['This script should only be called on win32.'],
    }, args.output)

  with common.temporary_file() as tempfile_path:
    kasko_integration_test_res = common.run_integration_test(
        KASKO_INTEGRATION_TEST,
        ['--log-to-json', tempfile_path],
        tempfile_path, args.output)

  return kasko_integration_test_res
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  perf_id = args.properties.get('perf-id')
  script_args = args.args
  test_suite = script_args[0]
  if IsWindows():
    script_args[0] += '.exe'

  with common.temporary_file() as tempfile_path:
    gtest_args = [
          '--target', args.build_config_fs,
          '--annotate', 'graphing',
          '--perf-id', perf_id,
          '--perf-dashboard-id', test_suite,
          '--results-url', args.properties.get('results-url'),
          '--slave-name', args.properties.get('slavename'),
          '--builder-name', args.properties.get('buildername'),
          '--build-number', str(args.properties.get('buildnumber')),
          '--log-processor-output-file', tempfile_path,
          '--test-type', test_suite,
    ]

    if 'android' == args.properties.get('target_platform'):
      gtest_args.extend([
          '--no-xvfb',
          '--run-python-script', os.path.join(
              args.paths['checkout'], 'out', args.build_config_fs, 'bin',
              'run_%s' % test_suite),
          '--verbose',
      ])
      gtest_args.extend(script_args[1:])
    else:
      gtest_args.extend(['--xvfb'])
      gtest_args.extend(script_args)

    rc = common.run_runtest(args, gtest_args + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  json.dump({
      'valid': bool(rc == 0),
      'failures': results['failed'],
  }, args.output)

  return rc
Пример #31
0
def main(argv):
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-perf-output', type=str,
      required=False)

  args = parser.parse_args(argv)

  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

  if sys.platform == 'win32':
    exe = os.path.join('.', 'content_shell.exe')
  elif sys.platform == 'darwin':
    exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',
                       'Content Shell')
  else:
    exe = os.path.join('.', 'content_shell')

  with common.temporary_file() as tempfile_path:
    rc = xvfb.run_executable([
        sys.executable,
        os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',
                     'breakpad_integration_test.py'),
        '--verbose',
        '--build-dir', '.',
        '--binary', exe,
        '--json', tempfile_path
    ], env)

    with open(tempfile_path) as f:
      failures = json.load(f)

  with open(args.isolated_script_test_output, 'w') as fp:
    json.dump({
        'valid': True,
        'failures': failures,
    }, fp)

  return rc
Пример #32
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            sys.executable,
            os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools',
                         'lint_test_expectations.py'), '--json', tempfile_path
        ])

        with open(tempfile_path) as f:
            failures = json.load(f)

    common.record_local_script_results('blink_lint_expectations', args.output,
                                       failures, True)

    return rc
Пример #33
0
def _run_and_get_output(args, cmd, env):
    lines = []
    with common.temporary_file() as tempfile_path:
        if args.xvfb:
            ret = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
        else:
            ret = test_env.run_command_with_output(cmd,
                                                   env=env,
                                                   stdoutfile=tempfile_path)
        if ret:
            logging.error('Error running test suite.')
            return None
        with open(tempfile_path) as f:
            for line in f:
                lines.append(line.strip())
    return lines
Пример #34
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            os.path.join(common.SRC_DIR, 'android_webview', 'tools',
                         'webview_licenses.py'), 'scan', '--json',
            tempfile_path
        ])

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({
        'valid': True,
        'failures': results,
    }, args.output)

    return rc
Пример #35
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            sys.executable,
            os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools',
                         'lint_test_expectations.py'), '--json', tempfile_path
        ])

        with open(tempfile_path) as f:
            failures = json.load(f)

    json.dump({
        'valid': True,
        'failures': failures,
    }, args.output)

    return rc
Пример #36
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command(
            [
                sys.executable,
                os.path.join(common.SRC_DIR, "third_party", "WebKit", "Tools", "Scripts", "lint-test-expectations"),
                "--json",
                tempfile_path,
            ]
        )

        with open(tempfile_path) as f:
            failures = json.load(f)

    json.dump({"valid": True, "failures": failures}, args.output)

    return rc
Пример #37
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        os.path.join(common.SRC_DIR, 'android_webview', 'tools',
                     'webview_licenses.py'),
        'scan',
        '--json', tempfile_path
    ])

    with open(tempfile_path) as f:
      results = json.load(f)

  json.dump({
      'valid': True,
      'failures': results,
  }, args.output)

  return rc
Пример #38
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        sys.executable,
        os.path.join(common.SRC_DIR, 'tools', 'checkbins', 'checkbins.py'),
        '--verbose',
        '--json', tempfile_path,
        os.path.join(args.paths['checkout'], 'out', args.build_config_fs),
    ])

    with open(tempfile_path) as f:
      checkbins_results = json.load(f)

  json.dump({
      'valid': True,
      'failures': checkbins_results,
  }, args.output)

  return rc
Пример #39
0
def main_run(args):
    print(sys.executable)
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            with_python3(),
            os.path.join(common.SRC_DIR, 'tools', 'checkbins', 'checkbins.py'),
            '--verbose',
            '--json',
            tempfile_path,
            os.path.join(args.paths['checkout'], 'out', args.build_config_fs),
        ])

        with open(tempfile_path) as f:
            checkbins_results = json.load(f)

    common.record_local_script_results('checkbins', args.output,
                                       checkbins_results, True)

    return rc
Пример #40
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        os.path.join(common.SRC_DIR, 'tools', 'checkperms', 'checkperms.py'),
        '--root', args.paths['checkout'],
        '--json', tempfile_path
    ])

    with open(tempfile_path) as f:
      checkperms_results = json.load(f)

  result_set = set()
  for result in checkperms_results:
    result_set.add((result['rel_path'], result['error']))

  failures = ['%s: %s' % (r[0], r[1]) for r in result_set]
  common.record_local_script_results(
      'checkperms', args.output, failures, True)

  return rc
Пример #41
0
def main_run(args):
  runner = os.path.join(common.SRC_DIR, 'mojo', 'tools', 'apptest_runner.py')
  build_dir = os.path.join(common.SRC_DIR, 'out', args.build_config_fs)

  with common.temporary_file() as tempfile_path:
    rc = common.run_command([runner, build_dir, '--verbose',
                             '--write-full-results-to', tempfile_path])
    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
Пример #42
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        os.path.join(common.SRC_DIR, 'buildtools', 'checkdeps', 'checkdeps.py'),
        '--json', tempfile_path
    ])

    with open(tempfile_path) as f:
      checkdeps_results = json.load(f)

  result_set = set()
  for result in checkdeps_results:
    for violation in result['violations']:
      result_set.add((result['dependee_path'], violation['include_path']))

  json.dump({
      'valid': True,
      'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
  }, args.output)

  return rc
Пример #43
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        os.path.join(common.SRC_DIR, 'tools', 'checkperms', 'checkperms.py'),
        '--root', args.paths['checkout'],
        '--json', tempfile_path
    ])

    with open(tempfile_path) as f:
      checkperms_results = json.load(f)

  result_set = set()
  for result in checkperms_results:
    result_set.add((result['rel_path'], result['error']))

  json.dump({
      'valid': True,
      'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
  }, args.output)

  return rc
Пример #44
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_command([
            os.path.join(common.SRC_DIR, 'buildtools', 'checkdeps',
                         'checkdeps.py'), '--json', tempfile_path
        ])

        with open(tempfile_path) as f:
            checkdeps_results = json.load(f)

    result_set = set()
    for result in checkdeps_results:
        for violation in result['violations']:
            result_set.add(
                (result['dependee_path'], violation['include_path']))

    failures = ['%s: %s' % (r[0], r[1]) for r in result_set]
    common.record_local_script_results('checkdeps', args.output, failures,
                                       True)

    return rc
Пример #45
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        os.path.join(common.SRC_DIR, 'tools', 'checkperms', 'checkperms.py'),
        '--root', args.paths['checkout'],
        '--json', tempfile_path
    ])

    with open(tempfile_path) as f:
      checkperms_results = json.load(f)

  result_set = set()
  for result in checkperms_results:
    result_set.add((result['rel_path'], result['error']))

  json.dump({
      'valid': True,
      'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
  }, args.output)

  return rc
    def run_tests(args, tests, extra_flags, env, screenshot_dir):
        keys = get_skia_gold_keys(args)

        with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
            gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(
                args)
            gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
                skia_gold_temp_dir, gold_properties)
            gold_session = gold_session_manager.GetSkiaGoldSession(keys)

            for test in tests['traces']:
                with common.temporary_file() as tempfile_path:
                    cmd = [
                        args.test_suite,
                        DEFAULT_TEST_PREFIX + test,
                        '--render-test-output-dir=%s' % screenshot_dir,
                        '--one-frame-only',
                        '--verbose-logging',
                    ] + extra_flags

                    result = PASS if run_wrapper(args, cmd, env,
                                                 tempfile_path) == 0 else FAIL

                    artifacts = {}

                    if result == PASS:
                        result = upload_test_result_to_skia_gold(
                            args, gold_session_manager, gold_session,
                            gold_properties, screenshot_dir, test, artifacts)

                    expected_result = SKIP if result == SKIP else PASS
                    result_tests[test] = {
                        'expected': expected_result,
                        'actual': result
                    }
                    if len(artifacts) > 0:
                        result_tests[test]['artifacts'] = artifacts
                    results['num_failures_by_type'][result] += 1

            return results['num_failures_by_type'][FAIL] == 0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        sys.executable,
        os.path.join(common.SRC_DIR, 'third_party', 'WebKit',
                     'Tools', 'Scripts', 'test-webkitpy'),
        '--write-full-results-to', tempfile_path,
    ], cwd=args.paths['checkout'])

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results)
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
Пример #48
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([
        sys.executable,
        os.path.join(common.SRC_DIR, 'build', 'check_gn_headers.py'),
        '--out-dir',
        os.path.join(args.paths['checkout'], 'out', args.build_config_fs),
        '--whitelist',
        os.path.join(common.SRC_DIR, 'build', 'check_gn_headers_whitelist.txt'),
        '--json', tempfile_path,
        '--verbose',
    ], cwd=common.SRC_DIR)

    with open(tempfile_path) as f:
      failures = json.load(f)

  json.dump({
      'valid': True,
      'failures': failures,
  }, args.output)

  return rc
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  try:
    with common.temporary_file() as tempfile_path:
      rc = common.run_command([sys.executable] + rest_args + [
        '--write-full-results-to', tempfile_path,
      ], env=env)
      with open(tempfile_path) as f:
        results = json.load(f)
      parsed_results = common.parse_common_test_results(results,
                                                        test_separator='.')
      failures = parsed_results['unexpected_failures']

      json.dump({
          'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                        ((rc == 0) or failures)),
          'failures': failures.keys(),
      }, args.isolated_script_test_output)

    return rc
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
    '--isolated-script-test-output',
    type=argparse.FileType('w'),
    required=True)
  args, rest_args = parser.parse_known_args()
  with common.temporary_file() as tempfile_path:
    rc = common.run_command([sys.executable] + rest_args + [
      '--write-full-results-to', tempfile_path,
    ])
    with open(tempfile_path) as f:
      results = json.load(f)
    parsed_results = common.parse_common_test_results(results,
                                                      test_separator='.')
    failures = parsed_results['unexpected_failures']

    json.dump({
        'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                      ((rc == 0) or failures)),
        'failures': failures.keys(),
    }, args.isolated_script_test_output)

  return rc
Пример #51
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

  args, rest_args = parser.parse_known_args()

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

  try:
    valid = True
    rc = 0
    try:
      executable = rest_args[0]
      if IsWindows():
        executable = '.\%s.exe' % executable
      else:
        executable = './%s' % executable
      with common.temporary_file() as tempfile_path:
        valid = (common.run_command_with_output([executable],
            env=env, stdoutfile=tempfile_path) == 0)

        # Now get the correct json format from the stdout to write to the
        # perf results file
        results_processor = (
            generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
        charts = results_processor.GenerateJsonResults(tempfile_path)
        # Write the returned encoded json to a the charts output file
        with open(args.isolated_script_test_chartjson_output, 'w') as f:
          f.write(charts)
    except Exception:
      traceback.print_exc()
      valid = False

    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
      json.dump({
          'valid': valid,
          'failures': failures,
      }, fp)

    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)