def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    env['CHROME_HEADLESS'] = '1'

    return_code = 0
    try:
        command = command_generator.generate()
        if use_xvfb:
            return_code = xvfb.run_executable(command,
                                              env,
                                              stdoutfile=output_paths.logs)
        else:
            return_code = test_env.run_command_with_output(
                command, env=env, stdoutfile=output_paths.logs)
        # Get the correct json format from the stdout to write to the perf
        # results file.
        results_processor = generate_legacy_perf_dashboard_json.\
            LegacyResultsProcessor()
        graph_json_string = results_processor.GenerateJsonResults(
            output_paths.logs)
        with open(output_paths.perf_results, 'w') as fh:
            fh.write(graph_json_string)
    except Exception:
        traceback.print_exc()
        return_code = 1
    write_legacy_test_results(return_code, output_paths.test_results)
    return return_code
def run_wrapper(args, cmd, env, stdoutfile=None):
    if args.xvfb:
        return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
    else:
        return test_env.run_command_with_output(cmd,
                                                env=env,
                                                stdoutfile=stdoutfile)
Example #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output', type=str)
    parser.add_argument('--isolated-script-test-filter', type=str)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    # Kept for compatiblity.
    # TODO(jmadill): Remove when removed from the recipes. http://crbug.com/954415
    parser.add_argument('--isolated-script-test-perf-output', type=str)

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=' + env['GTEST_TOTAL_SHARDS']]
        env.pop('GTEST_TOTAL_SHARDS')
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=' + env['GTEST_SHARD_INDEX']]
        env.pop('GTEST_SHARD_INDEX')

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--isolated-script-test-output=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
Example #4
0
def execute_gtest_perf_test(args, rest_args):
    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    output_json = {
        'valid': valid,
        'failures': failures,
    }
    return rc, charts, output_json
def execute_telemetry_benchmark(command_generator,
                                output_paths,
                                use_xvfb=False):
    start = time.time()

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    return_code = 1
    temp_dir = tempfile.mkdtemp('telemetry')
    try:
        command = command_generator.generate(temp_dir)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env=env,
                                              stdoutfile=output_paths.logs)
        else:
            return_code = test_env.run_command_with_output(
                command, env=env, stdoutfile=output_paths.logs)
        expected_results_filename = os.path.join(temp_dir, 'test-results.json')
        if os.path.exists(expected_results_filename):
            shutil.move(expected_results_filename, output_paths.test_results)
        else:
            common.write_interrupted_test_results_to(output_paths.test_results,
                                                     start)
        expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
        shutil.move(expected_perf_filename, output_paths.perf_results)

        csv_file_path = os.path.join(temp_dir, 'results.csv')
        if os.path.isfile(csv_file_path):
            shutil.move(csv_file_path, output_paths.csv_perf_results)
    except Exception:
        print(
            'The following exception may have prevented the code from '
            'outputing structured test results and perf results output:')
        print traceback.format_exc()
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(temp_dir, ignore_errors=True)

    print_duration('executing benchmark %s' % command_generator.benchmark,
                   start)

    if return_code:
        return return_code
    return 0
Example #6
0
def _run_and_get_output(args, cmd, env):
    lines = []
    with common.temporary_file() as tempfile_path:
        if args.xvfb:
            ret = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
        else:
            ret = test_env.run_command_with_output(cmd,
                                                   env=env,
                                                   stdoutfile=tempfile_path)
        if ret:
            logging.error('Error running test suite.')
            return None
        with open(tempfile_path) as f:
            for line in f:
                lines.append(line.strip())
    return lines
Example #7
0
    def run_tests(args, tests, extra_flags, env, screenshot_dir):
        for test in tests['traces']:
            with common.temporary_file() as tempfile_path:
                cmd = [
                    args.test_suite,
                    DEFAULT_TEST_PREFIX + test,
                    '--render-test-output-dir=%s' % screenshot_dir,
                    '--one-frame-only',
                ] + extra_flags

                if args.xvfb:
                    rc = xvfb.run_executable(cmd,
                                             env,
                                             stdoutfile=tempfile_path)
                else:
                    rc = test_env.run_command_with_output(
                        cmd, env=env, stdoutfile=tempfile_path)

                pass_fail = 'PASS' if rc == 0 else 'FAIL'
                result_tests[test] = {'expected': 'PASS', 'actual': pass_fail}
                results['num_failures_by_type'][pass_fail] += 1

        return results['num_failures_by_type']['FAIL'] == 0
Example #8
0
def run_benchmark(args, rest_args, histogram_results):
    """  Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args = cmd_args + ['--story-filter=' + filter_regex]
    try:
        cmd = [sys.executable] + cmd_args + [
            '--output-dir',
            tempfile_dir,
            '--output-format=json-test-results',
        ]
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        shutil.rmtree(tempfile_dir)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log
Example #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, rest_args = parser.parse_known_args()

    env = os.environ.copy()
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        executable = rest_args[0]
        extra_flags = []
        if len(rest_args) > 1:
            extra_flags = rest_args[1:]

        # These flags are to make sure that test output perf metrics in the log.
        if not '--verbose' in extra_flags:
            extra_flags.append('--verbose')
        if not '--test-launcher-print-test-stdio=always' in extra_flags:
            extra_flags.append('--test-launcher-print-test-stdio=always')
        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            executable = '.\%s.exe' % executable
        else:
            executable = './%s' % executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

            # Now get the correct json format from the stdout to write to the perf
            # results file
            results_processor = (
                generate_legacy_perf_dashboard_json.LegacyResultsProcessor())
            charts = results_processor.GenerateJsonResults(tempfile_path)
            # TODO(eakuefner): Make isolated_script_test_perf_output mandatory after
            # flipping flag in swarming.
            if args.isolated_script_test_perf_output:
                filename = args.isolated_script_test_perf_output
            else:
                filename = args.isolated_script_test_chartjson_output
            # Write the returned encoded json to a the charts output file
            with open(filename, 'w') as f:
                f.write(charts)
    except Exception:
        traceback.print_exc()
        rc = 1

    valid = (rc == 0)
    failures = [] if valid else ['(entire test suite)']
    with open(args.isolated_script_test_output, 'w') as fp:
        json.dump({
            'valid': valid,
            'failures': failures,
        }, fp)

    return rc
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('executable', help='Test executable.')
    parser.add_argument('--isolated-script-test-output',
                        type=str,
                        required=True)
    parser.add_argument('--isolated-script-test-perf-output',
                        type=str,
                        required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')

    args, extra_flags = parser.parse_known_args()

    env = os.environ.copy()

    # total_shards = None
    # shard_index = None
    if 'GTEST_TOTAL_SHARDS' in env:
        extra_flags += ['--shard-count=%d' % env['GTEST_TOTAL_SHARDS']]
    if 'GTEST_SHARD_INDEX' in env:
        extra_flags += ['--shard-index=%d' % env['GTEST_SHARD_INDEX']]

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    rc = 0
    try:
        # Consider adding stdio control flags.
        if args.isolated_script_test_output:
            extra_flags.append('--results-file=%s' %
                               args.isolated_script_test_output)

        if args.isolated_script_test_perf_output:
            extra_flags.append('--histogram-json-file=%s' %
                               args.isolated_script_test_perf_output)

        if args.isolated_script_test_filter:
            filter_list = common.extract_filter_list(
                args.isolated_script_test_filter)
            extra_flags.append('--gtest_filter=' + ':'.join(filter_list))

        if IsWindows():
            args.executable = '.\\%s.exe' % args.executable
        else:
            args.executable = './%s' % args.executable
        with common.temporary_file() as tempfile_path:
            env['CHROME_HEADLESS'] = '1'
            cmd = [args.executable] + extra_flags

            if args.xvfb:
                rc = xvfb.run_executable(cmd, env, stdoutfile=tempfile_path)
            else:
                rc = test_env.run_command_with_output(cmd,
                                                      env=env,
                                                      stdoutfile=tempfile_path)

    except Exception:
        traceback.print_exc()
        rc = 1

    return rc
Example #11
0
def execute_telemetry_benchmark_helper(args, rest_args, histogram_results):
    """Run benchmark with args.

  Args:
    args: the option object resulted from parsing commandline args required for
      IsolatedScriptTest contract (see
      https://cs.chromium.org/chromium/build/scripts/slave/recipe_modules/chromium_tests/steps.py?rcl=d31f256fb860701e6dc02544f2beffe4e17c9b92&l=1639).
    rest_args: the args (list of strings) for running Telemetry benchmark.
    histogram_results: a boolean describes whether to output histograms format
      for the benchmark.

  Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
    rc: the return code of benchmark
    perf_results: json object contains the perf test results
    json_test_results: json object contains the Pass/Fail data of the benchmark.
    benchmark_log: string contains the stdout/stderr of the benchmark run.
  """
    # TODO(crbug.com/920002): These arguments cannot go into
    # run_performance_tests.py because
    # run_gtest_perf_tests.py does not yet support them. Note that ideally
    # we would use common.BaseIsolatedScriptArgsAdapter, but this will take
    # a good deal of refactoring to accomplish.
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-repeat',
                        type=int,
                        required=False)
    parser.add_argument(
        '--isolated-script-test-launcher-retry-limit',
        type=int,
        required=False,
        choices=[0
                 ])  # Telemetry does not support retries. crbug.com/894254#c21
    parser.add_argument('--isolated-script-test-also-run-disabled-tests',
                        default=False,
                        action='store_true',
                        required=False)
    # Parse leftover args not already parsed in run_performance_tests.py or in
    # main().
    args, rest_args = parser.parse_known_args(args=rest_args, namespace=args)

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'

    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
    tempfile_dir = tempfile.mkdtemp('telemetry')
    benchmark_log = ''
    stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
    valid = True
    num_failures = 0
    perf_results = None
    json_test_results = None

    results = None
    cmd_args = rest_args
    if args.isolated_script_test_filter:
        filter_list = common.extract_filter_list(
            args.isolated_script_test_filter)
        # Need to convert this to a valid regex.
        filter_regex = '(' + '|'.join(filter_list) + ')'
        cmd_args.append('--story-filter=' + filter_regex)
    if args.isolated_script_test_repeat:
        cmd_args.append('--pageset-repeat=' +
                        str(args.isolated_script_test_repeat))
    if args.isolated_script_test_also_run_disabled_tests:
        cmd_args.append('--also-run-disabled-tests')
    cmd_args.append('--output-dir=' + tempfile_dir)
    cmd_args.append('--output-format=json-test-results')
    cmd = [sys.executable] + cmd_args
    rc = 1  # Set default returncode in case there is an exception.
    try:
        if args.xvfb:
            rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
        else:
            rc = test_env.run_command_with_output(cmd,
                                                  env=env,
                                                  stdoutfile=stdoutfile)

        with open(stdoutfile) as f:
            benchmark_log = f.read()

        # If we have also output chartjson read it in and return it.
        # results-chart.json is the file name output by telemetry when the
        # chartjson output format is included
        tempfile_name = None
        if histogram_results:
            tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
        else:
            tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')

        if tempfile_name is not None:
            with open(tempfile_name) as f:
                perf_results = json.load(f)

        # test-results.json is the file name output by telemetry when the
        # json-test-results format is included
        tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
        with open(tempfile_name) as f:
            json_test_results = json.load(f)
        num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
        valid = bool(rc == 0 or num_failures != 0)

    except Exception:
        traceback.print_exc()
        if results:
            print 'results, which possibly caused exception: %s' % json.dumps(
                results, indent=2)
        valid = False
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(tempfile_dir, ignore_errors=True)

    if not valid and num_failures == 0:
        if rc == 0:
            rc = 1  # Signal an abnormal exit.

    return rc, perf_results, json_test_results, benchmark_log