Exemplo n.º 1
0
    def _ConstructDefaultProcessor(self):
        """Creates a LegacyResultsProcessor instance.

    Returns:
      An instance of LegacyResultsProcessor class
    """
        return generate_legacy_perf_dashboard_json.LegacyResultsProcessor()
Exemplo n.º 2
0
def execute_gtest_perf_test(args, rest_args):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    output_json = {
        'valid': valid,
        'failures': failures,
    }
    return rc, charts, output_json
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--isolated-script-test-output', type=str,
      required=True)
  parser.add_argument(
      '--isolated-script-test-chartjson-output', type=str,
      required=True)
  parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

  args, rest_args = parser.parse_known_args()

  xvfb_proc = None
  openbox_proc = None
  xcompmgr_proc = None
  env = os.environ.copy()
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  if args.xvfb and xvfb.should_start_xvfb(env):
    xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
                                                             build_dir='.')
    assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'

  try:
    valid = True
    rc = 0
    try:
      executable = rest_args[0]
      if IsWindows():
        executable = '.\%s.exe' % executable
      else:
        executable = './%s' % executable
      with common.temporary_file() as tempfile_path:
        valid = (common.run_command_with_output([executable],
            env=env, stdoutfile=tempfile_path) == 0)

        # Now get the correct json format from the stdout to write to the
        # perf results file
        results_processor = (
            generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
        charts = results_processor.GenerateJsonResults(tempfile_path)
        # Write the returned encoded json to a the charts output file
        with open(args.isolated_script_test_chartjson_output, 'w') as f:
          f.write(charts)
    except Exception:
      traceback.print_exc()
      valid = False

    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
      json.dump({
          'valid': valid,
          'failures': failures,
      }, fp)

    return rc

  finally:
    xvfb.kill(xvfb_proc)
    xvfb.kill(openbox_proc)
    xvfb.kill(xcompmgr_proc)
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, rest_args = parser.parse_known_args()

    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
            # TODO(eakuefner): Make isolated_script_test_perf_output mandatory after
            # flipping flag in swarming.
            if args.isolated_script_test_perf_output:
                filename = args.isolated_script_test_perf_output
            else:
                filename = args.isolated_script_test_chartjson_output
            # Write the returned encoded json to a the charts output file
            with open(filename, 'w') as f:
                f.write(charts)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': valid,
            'failures': failures,
        }, fp)

    return rc