Example #1
0
def run_server(port):
    app = tornado.web.Application([
        (r"/ping", PingHandler),
        (r"/", ScrapeHandler),
        (r"/scrape", ScrapeHandler),
    ])
    app.listen(port)
    xvfb.start_xvfb()
    tornado.ioloop.IOLoop.current().start()
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  try:
    valid = True
    rc = 0
    try:
      rc = common.run_command([sys.executable] + rest_args + sharding_args + [
        '--write-abbreviated-json-results-to', args.isolated_script_test_output,
      ], env=env)
      valid = bool(rc == 0)
    except Exception:
      traceback.print_exc()
      valid = False

    if not valid:
      failures = ['(entire test suite)']
      with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': valid,
            'failures': failures,
        }, fp)

    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
Example #3
0
def _run_test_with_xvfb(config, shell, args, apptest):
  '''Run the test with xvfb; return the output or raise an exception.'''
  env = os.environ.copy()
  if (config.target_os != Config.OS_LINUX or '--gtest_list_tests' in args
      or not xvfb.should_start_xvfb(env)):
    return _run_test_with_timeout(config, shell, args, apptest, env)

  try:
    # Simply prepending xvfb.py to the command line precludes direct control of
    # test subprocesses, and prevents easily getting output when tests timeout.
    xvfb_proc = None
    openbox_proc = None
    global XVFB_DISPLAY_ID
    display_string = ':' + str(XVFB_DISPLAY_ID)
    (xvfb_proc, openbox_proc) = xvfb.start_xvfb(env, Paths(config).build_dir,
                                                display=display_string)
    XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
    if not xvfb_proc or not xvfb_proc.pid:
      raise Exception('Xvfb failed to start; aborting test run.')
    if not openbox_proc or not openbox_proc.pid:
      raise Exception('Openbox failed to start; aborting test run.')
    logging.getLogger().debug('Running Xvfb %s (pid %d) and Openbox (pid %d).' %
                              (display_string, xvfb_proc.pid, openbox_proc.pid))
    return _run_test_with_timeout(config, shell, args, apptest, env)
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--isolated-script-test-output", type=argparse.FileType("w"), required=True)
    parser.add_argument("--xvfb", help="Start xvfb.", action="store_true")
    args, rest_args = parser.parse_known_args()
    xvfb_proc = None
    openbox_proc = None
    env = os.environ.copy()
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir=".")
        assert xvfb_proc and openbox_proc, "Failed to start xvfb"
    try:
        with common.temporary_file() as tempfile_path:
            rc = common.run_command([sys.executable] + rest_args + ["--write-full-results-to", tempfile_path], env=env)
            with open(tempfile_path) as f:
                results = json.load(f)
            parsed_results = common.parse_common_test_results(results, test_separator=".")
            failures = parsed_results["unexpected_failures"]

            json.dump(
                {
                    "valid": bool(rc <= common.MAX_FAILURES_EXIT_STATUS and ((rc == 0) or failures)),
                    "failures": failures.keys(),
                },
                args.isolated_script_test_output,
            )

        return rc
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
Example #5
0
def _run_test_with_xvfb(config, shell, args, apptest):
    """Run the test with xvfb; return the output or raise an exception."""
    env = os.environ.copy()
    # Make sure gtest doesn't try to add color to the output. Color is done via
    # escape sequences which confuses the code that searches the gtest output.
    env["GTEST_COLOR"] = "no"
    if config.target_os != Config.OS_LINUX or "--gtest_list_tests" in args or not xvfb.should_start_xvfb(env):
        return _run_test_with_timeout(config, shell, args, apptest, env)

    try:
        # Simply prepending xvfb.py to the command line precludes direct control of
        # test subprocesses, and prevents easily getting output when tests timeout.
        xvfb_proc = None
        openbox_proc = None
        global XVFB_DISPLAY_ID
        display_string = ":" + str(XVFB_DISPLAY_ID)
        (xvfb_proc, openbox_proc) = xvfb.start_xvfb(env, Paths(config).build_dir, display=display_string)
        XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
        if not xvfb_proc or not xvfb_proc.pid:
            raise Exception("Xvfb failed to start; aborting test run.")
        if not openbox_proc or not openbox_proc.pid:
            raise Exception("Openbox failed to start; aborting test run.")
        logging.getLogger().debug(
            "Running Xvfb %s (pid %d) and Openbox (pid %d)." % (display_string, xvfb_proc.pid, openbox_proc.pid)
        )
        return _run_test_with_timeout(config, shell, args, apptest, env)
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
Example #6
0
def _run_test_with_xvfb(config, shell, args, apptest):
    '''Run the test with xvfb; return the output or raise an exception.'''
    env = os.environ.copy()
    if (config.target_os != Config.OS_LINUX or '--gtest_list_tests' in args
            or not xvfb.should_start_xvfb(env)):
        return _run_test_with_timeout(config, shell, args, apptest, env)

    try:
        # Simply prepending xvfb.py to the command line precludes direct control of
        # test subprocesses, and prevents easily getting output when tests timeout.
        xvfb_proc = None
        openbox_proc = None
        global XVFB_DISPLAY_ID
        display_string = ':' + str(XVFB_DISPLAY_ID)
        (xvfb_proc, openbox_proc) = xvfb.start_xvfb(env,
                                                    Paths(config).build_dir,
                                                    display=display_string)
        XVFB_DISPLAY_ID = (XVFB_DISPLAY_ID + 1) % 50000
        if not xvfb_proc or not xvfb_proc.pid:
            raise Exception('Xvfb failed to start; aborting test run.')
        if not openbox_proc or not openbox_proc.pid:
            raise Exception('Openbox failed to start; aborting test run.')
        logging.getLogger().debug(
            'Running Xvfb %s (pid %d) and Openbox (pid %d).' %
            (display_string, xvfb_proc.pid, openbox_proc.pid))
        return _run_test_with_timeout(config, shell, args, apptest, env)
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

  args, rest_args = parser.parse_known_args()

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

  try:
    valid = True
    rc = 0
    try:
      executable = rest_args[0]
      if IsWindows():
        executable = '.\%s.exe' % executable
      else:
        executable = './%s' % executable

      rc = common.run_command_with_output([executable] + [
        '--write-abbreviated-json-results-to', args.isolated_script_test_output,
      ], env=env, stdoutfile=args.isolated_script_test_chartjson_output)

      # Now get the correct json format from the stdout to write to the
      # perf results file
    except Exception:
      traceback.print_exc()
      valid = False

    if not valid:
      failures = ['(entire test suite)']
      with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': valid,
            'failures': failures,
        }, fp)

    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=argparse.FileType('w'),
                        required=True)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    try:
        tempfile_dir = tempfile.mkdtemp('telemetry')
        valid = True
        failures = []
        try:
            rc = common.run_command(
                [sys.executable] + rest_args +
                ['--output-dir', tempfile_dir, '--output-format=json'],
                env=env)
            tempfile_name = os.path.join(tempfile_dir, 'results.json')
            with open(tempfile_name) as f:
                results = json.load(f)
            for value in results['per_page_values']:
                if value['type'] == 'failure':
                    failures.append(results['pages'][str(
                        value['page_id'])]['name'])
            valid = bool(rc == 0 or failures)
        except Exception:
            traceback.print_exc()
            valid = False
        finally:
            shutil.rmtree(tempfile_dir)

        if not valid and not failures:
            failures = ['(entire test suite)']
            if rc == 0:
                rc = 1  # Signal an abnormal exit.

        json.dump({
            'valid': valid,
            'failures': failures,
        }, args.isolated_script_test_output)
        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  try:
    tempfile_dir = tempfile.mkdtemp('telemetry')
    valid = True
    failures = []
    try:
      rc = common.run_command([sys.executable] + rest_args + [
        '--output-dir', tempfile_dir,
        '--output-format=json'
      ], env=env)
      tempfile_name = os.path.join(tempfile_dir, 'results.json')
      with open(tempfile_name) as f:
        results = json.load(f)
      for value in results['per_page_values']:
        if value['type'] == 'failure':
          failures.append(results['pages'][str(value['page_id'])]['name'])
      valid = bool(rc == 0 or failures)
    except Exception:
      traceback.print_exc()
      valid = False
    finally:
      shutil.rmtree(tempfile_dir)

    if not valid and not failures:
      failures = ['(entire test suite)']
      if rc == 0:
        rc = 1  # Signal an abnormal exit.

    json.dump({
        'valid': valid,
        'failures': failures,
    }, args.isolated_script_test_output)
    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  try:
    with common.temporary_file() as tempfile_path:
      rc = common.run_command([sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', tempfile_path,
      ], env=env)
      with open(tempfile_path) as f:
        results = json.load(f)
      parsed_results = common.parse_common_test_results(results,
                                                        test_separator='.')
      failures = parsed_results['unexpected_failures']

      json.dump({
          'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                        ((rc == 0) or failures)),
          'failures': failures.keys(),
      }, args.isolated_script_test_output)

    return rc
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
Example #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if '--isolated-script-test-chartjson-output' in arg:
            rest_args.pop(index)
            break
        index += 1

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    try:
        return common.run_command(
            [sys.executable] + rest_args + sharding_args +
            ['--write-full-results-to', args.isolated_script_test_output],
            env=env)
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  # Remove the chartjson extra arg until this script cares about chartjson
  # results from telemetry
  index = 0
  for arg in rest_args:
    if '--isolated-script-test-chartjson-output' in arg:
      rest_args.pop(index)
      break
    index += 1

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  try:
    return common.run_command([sys.executable] + rest_args + sharding_args + [
      '--write-full-results-to', args.isolated_script_test_output], env=env)
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=argparse.FileType('w'),
                        required=True)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    xvfb_proc = None
    openbox_proc = None
    env = os.environ.copy()
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc = xvfb.start_xvfb(env=env, build_dir='.')
        assert xvfb_proc and openbox_proc, 'Failed to start xvfb'
    try:
        with common.temporary_file() as tempfile_path:
            rc = common.run_command([sys.executable] + rest_args + [
                '--write-full-results-to',
                tempfile_path,
            ],
                                    env=env)
            with open(tempfile_path) as f:
                results = json.load(f)
            parsed_results = common.parse_common_test_results(
                results, test_separator='.')
            failures = parsed_results['unexpected_failures']

            json.dump(
                {
                    'valid':
                    bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                         ((rc == 0) or failures)),
                    'failures':
                    failures.keys(),
                }, args.isolated_script_test_output)

        return rc
    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  try:
    with common.temporary_file() as tempfile_path:
      rc = common.run_command([sys.executable] + rest_args + [
        '--write-full-results-to', tempfile_path,
      ], env=env)
      with open(tempfile_path) as f:
        results = json.load(f)
      parsed_results = common.parse_common_test_results(results,
                                                        test_separator='.')
      failures = parsed_results['unexpected_failures']

      json.dump({
          'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                        ((rc == 0) or failures)),
          'failures': failures.keys(),
      }, args.isolated_script_test_output)

    return rc
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
Example #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=argparse.FileType('w'),
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    try:
        tempfile_dir = tempfile.mkdtemp('telemetry')
        valid = True
        failures = []
        chartjson_results_present = '--output-format=chartjson' in rest_args
        chartresults = None
        json_test_results_present = '--output-format=json-test-results' in rest_args
        json_test_results = None

        results = None
        try:
            rc = common.run_command(
                [sys.executable] + rest_args +
                ['--output-dir', tempfile_dir, '--output-format=json'],
                env=env)
            # If we have also output chartjson read it in and return it.
            # results-chart.json is the file name output by telemetry when the
            # chartjson output format is included
            if chartjson_results_present:
                chart_tempfile_name = os.path.join(tempfile_dir,
                                                   'results-chart.json')
                with open(chart_tempfile_name) as f:
                    chartresults = json.load(f)
            # We need to get chartjson results first as this may be a disabled
            # benchmark that was run
            if (not chartjson_results_present
                    or (chartjson_results_present
                        and chartresults.get('enabled', True))):
                tempfile_name = os.path.join(tempfile_dir, 'results.json')
                with open(tempfile_name) as f:
                    results = json.load(f)
                for value in results['per_page_values']:
                    if value['type'] == 'failure':
                        page_data = results['pages'][str(value['page_id'])]
                        name = page_data.get('name')
                        if not name:
                            name = page_data['url']

                        failures.append(name)
                valid = bool(rc == 0 or failures)

            if json_test_results_present:
                tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
                with open(tempfile_name) as f:
                    json_test_results = json.load(f)

        except Exception:
            traceback.print_exc()
            if results:
                print 'results, which possibly caused exception: %s' % json.dumps(
                    results, indent=2)
            valid = False
        finally:
            shutil.rmtree(tempfile_dir)

        if not valid and not failures:
            failures = ['(entire test suite)']
            if rc == 0:
                rc = 1  # Signal an abnormal exit.

        if chartjson_results_present and args.isolated_script_test_chartjson_output:
            chartjson_output_file = \
              open(args.isolated_script_test_chartjson_output, 'w')
            json.dump(chartresults, chartjson_output_file)

        if not json_test_results_present:
            json_test_results = {'valid': valid, 'failures': failures}

        json.dump(json_test_results, args.isolated_script_test_output)
        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if '--isolated-script-test-chartjson-output' in arg:
            rest_args.pop(index)
            break
        index += 1

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    try:
        valid = True
        rc = 0
        try:
            rc = common.run_command(
                [sys.executable] + rest_args + sharding_args +
                ['--write-full-results-to', args.isolated_script_test_output],
                env=env)
        except Exception:
            traceback.print_exc()
            valid = False

        if not valid:
            failures = ['(entire test suite)']
            with open(args.isolated_script_test_output, 'w') as fp:
                json.dump({
                    'valid': valid,
                    'failures': failures,
                }, fp)

        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  # Remove the chartjson extra arg until this script cares about chartjson
  # results from telemetry
  index = 0
  for arg in rest_args:
    if '--isolated-script-test-chartjson-output' in arg:
      rest_args.pop(index)
      break
    index += 1

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  # Compatibility with gtest-based sharding.
  total_shards = None
  shard_index = None
  if 'GTEST_TOTAL_SHARDS' in env:
    total_shards = int(env['GTEST_TOTAL_SHARDS'])
    del env['GTEST_TOTAL_SHARDS']
  if 'GTEST_SHARD_INDEX' in env:
    shard_index = int(env['GTEST_SHARD_INDEX'])
    del env['GTEST_SHARD_INDEX']
  sharding_args = []
  if total_shards is not None and shard_index is not None:
    sharding_args = [
      '--total-shards=%d' % total_shards,
      '--shard-index=%d' % shard_index
    ]
  try:
    with common.temporary_file() as tempfile_path:
      rc = common.run_command([sys.executable] + rest_args + sharding_args + [
        '--write-full-results-to', tempfile_path,
      ], env=env)
      with open(tempfile_path) as f:
        results = json.load(f)
      parsed_results = common.parse_common_test_results(results,
                                                        test_separator='.')
      failures = parsed_results['unexpected_failures']

      json.dump({
          'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                        ((rc == 0) or failures)),
          'failures': failures.keys(),
      }, args.isolated_script_test_output)

    return rc
  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

  args, rest_args = parser.parse_known_args()

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

  try:
    valid = True
    rc = 0
    try:
      executable = rest_args[0]
      if IsWindows():
        executable = '.\%s.exe' % executable
      else:
        executable = './%s' % executable
      with common.temporary_file() as tempfile_path:
        valid = (common.run_command_with_output([executable],
            env=env, stdoutfile=tempfile_path) == 0)

        # Now get the correct json format from the stdout to write to the
        # perf results file
        results_processor = (
            generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
        charts = results_processor.GenerateJsonResults(tempfile_path)
        # Write the returned encoded json to a the charts output file
        with open(args.isolated_script_test_chartjson_output, 'w') as f:
          f.write(charts)
    except Exception:
      traceback.print_exc()
      valid = False

    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
      json.dump({
          'valid': valid,
          'failures': failures,
      }, fp)

    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    args, rest_args = parser.parse_known_args()
    # Remove the chartjson extra arg until this script cares about chartjson
    # results from telemetry
    index = 0
    for arg in rest_args:
        if ('--isolated-script-test-chartjson-output' in arg
                or '--isolated-script-test-perf-output' in arg):
            rest_args.pop(index)
            break
        index += 1
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)

        # isolated_script_test_filter comes in like:
        #   gpu_tests.webgl_conformance_integration_test.WebGLConformanceIntegrationTest.WebglExtension_WEBGL_depth_texture  # pylint: disable=line-too-long
        # but we need to pass it to --test-filter like this:
        #   WebglExtension_WEBGL_depth_texture
        filter_list = [f.split('.')[-1] for f in filter_list]

        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        rest_args.append('--test-filter=' + filter_regex)

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
    # Compatibility with gtest-based sharding.
    total_shards = None
    shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
        del env['GTEST_TOTAL_SHARDS']
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
        del env['GTEST_SHARD_INDEX']
    sharding_args = []
    if total_shards is not None and shard_index is not None:
        sharding_args = [
            '--total-shards=%d' % total_shards,
            '--shard-index=%d' % shard_index
        ]
    try:
        valid = True
        rc = 0
        try:
            env['CHROME_HEADLESS'] = '1'
            rc = common.run_command(
                [sys.executable] + rest_args + sharding_args +
                ['--write-full-results-to', args.isolated_script_test_output],
                env=env)
        except Exception:
            traceback.print_exc()
            valid = False

        if not valid:
            failures = ['(entire test suite)']
            with open(args.isolated_script_test_output, 'w') as fp:
                json.dump({
                    'valid': valid,
                    'failures': failures,
                }, fp)

        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
Example #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, rest_args = parser.parse_known_args()

    xvfb_proc = None
    openbox_proc = None
    xcompmgr_proc = None
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    if args.xvfb and xvfb.should_start_xvfb(env):
        xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                                 build_dir='.')
        assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

    try:
        rc = 0
        try:
            executable = rest_args[0]
            extra_flags = []
            if len(rest_args) > 1:
                extra_flags = rest_args[1:]

            # These flags are to make sure that test output perf metrics in the log.
            if not '--verbose' in extra_flags:
                extra_flags.append('--verbose')
            if not '--test-launcher-print-test-stdio=always' in extra_flags:
                extra_flags.append('--test-launcher-print-test-stdio=always')
            if args.isolated_script_test_filter:
                filter_list = common.extract_filter_list(
                    args.isolated_script_test_filter)
                extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

            if IsWindows():
                executable = '.\%s.exe' % executable
            else:
                executable = './%s' % executable
            with common.temporary_file() as tempfile_path:
                env['CHROME_HEADLESS'] = '1'
                rc = common.run_command_with_output([executable] + extra_flags,
                                                    env=env,
                                                    stdoutfile=tempfile_path)
                # Now get the correct json format from the stdout to write to the
                # perf results file
                results_processor = (generate_legacy_perf_dashboard_json.
                                     LegacyResultsProcessor())
                charts = results_processor.GenerateJsonResults(tempfile_path)
                # TODO(eakuefner): Make isolated_script_test_perf_output mandatory
                # after flipping flag in swarming.
                if args.isolated_script_test_perf_output:
                    filename = args.isolated_script_test_perf_output
                else:
                    filename = args.isolated_script_test_chartjson_output
                # Write the returned encoded json to a the charts output file
                with open(filename, 'w') as f:
                    f.write(charts)
        except Exception:
            traceback.print_exc()
            rc = 1

        valid = (rc == 0)
        failures = [] if valid else ['(entire test suite)']
        with open(args.isolated_script_test_output, 'w') as fp:
            json.dump({
                'valid': valid,
                'failures': failures,
            }, fp)

        return rc

    finally:
        xvfb.kill(xvfb_proc)
        xvfb.kill(openbox_proc)
        xvfb.kill(xcompmgr_proc)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=argparse.FileType('w'),
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', required=False)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
  args, rest_args = parser.parse_known_args()
  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
  try:
    tempfile_dir = tempfile.mkdtemp('telemetry')
    valid = True
    failures = []
    chartjson_results_present = '--output-format=chartjson' in rest_args
    chartresults = None

    results = None
    try:
      rc = common.run_command([sys.executable] + rest_args + [
        '--output-dir', tempfile_dir,
        '--output-format=json'
      ], env=env)
            # If we have also output chartjson read it in and return it.
      # results-chart.json is the file name output by telemetry when the
      # chartjson output format is included
      if chartjson_results_present:
        chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
        with open(chart_tempfile_name) as f:
          chartresults = json.load(f)
      # We need to get chartjson results first as this may be a disabled
      # benchmark that was run
      if (not chartjson_results_present or
         (chartjson_results_present and chartresults.get('enabled', True))):
        tempfile_name = os.path.join(tempfile_dir, 'results.json')
        with open(tempfile_name) as f:
          results = json.load(f)
        for value in results['per_page_values']:
          if value['type'] == 'failure':
            page_data = results['pages'][str(value['page_id'])]
            name = page_data.get('name')
            if not name:
              name = page_data['url']

            failures.append(name)
        valid = bool(rc == 0 or failures)

    except Exception:
      traceback.print_exc()
      if results:
        print 'results, which possibly caused exception: %s' % json.dumps(
            results, indent=2)
      valid = False
    finally:
      shutil.rmtree(tempfile_dir)

    if not valid and not failures:
      failures = ['(entire test suite)']
      if rc == 0:
        rc = 1  # Signal an abnormal exit.

    if chartjson_results_present and args.isolated_script_test_chartjson_output:
      chartjson_output_file = \
        open(args.isolated_script_test_chartjson_output, 'w')
      json.dump(chartresults, chartjson_output_file)

    json.dump({
        'valid': valid,
        'failures': failures
    }, args.isolated_script_test_output)
    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)