def execute_telemetry_benchmark(
    command_generator, output_paths, use_xvfb=False):
  start = time.time()

  env = os.environ.copy()
  env['CHROME_HEADLESS'] = '1'
  # Assume we want to set up the sandbox environment variables all the
  # time; doing so is harmless on non-Linux platforms and is needed
  # all the time on Linux.
  env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

  return_code = 1
  temp_dir = tempfile.mkdtemp('telemetry')
  try:
    command = command_generator.generate(temp_dir)
    if use_xvfb:
      # When running with xvfb, we currently output both to stdout and to the
      # file. It would be better to only output to the file to keep the logs
      # clean.
      return_code = xvfb.run_executable(
          command, env=env, stdoutfile=output_paths.logs)
    else:
      with open(output_paths.logs, 'w') as handle:
        return_code = test_env.run_command_output_to_handle(
            command, handle, env=env)
    expected_results_filename = os.path.join(temp_dir, 'test-results.json')
    if os.path.exists(expected_results_filename):
      shutil.move(expected_results_filename, output_paths.test_results)
    else:
      common.write_interrupted_test_results_to(output_paths.test_results, start)
    expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
    shutil.move(expected_perf_filename, output_paths.perf_results)

    csv_file_path = os.path.join(temp_dir, 'results.csv')
    if os.path.isfile(csv_file_path):
      shutil.move(csv_file_path, output_paths.csv_perf_results)
  except Exception:
    print ('The following exception may have prevented the code from '
           'outputing structured test results and perf results output:')
    print traceback.format_exc()
  finally:
    # Add ignore_errors=True because otherwise rmtree may fail due to leaky
    # processes of tests are still holding opened handles to files under
    # |tempfile_dir|. For example, see crbug.com/865896
    shutil.rmtree(temp_dir, ignore_errors=True)

  print_duration('executing benchmark %s' % command_generator.benchmark, start)

  # Telemetry sets exit code to -1 to indicate that no stories were run. This
  # becomes 255 on linux because linux doesn't support -1 so it does modulo:
  # -1 % 256 == 255.
  # TODO(crbug.com/1019139): Make 111 be the exit code that means
  # "no stories were run.".
  if return_code in (111, -1, 255):
    print ('Exit code %s indicates that no stories were run, so we are marking '
           'this as a success.' % return_code)
    return 0
  if return_code:
    return return_code
  return 0
def execute_telemetry_benchmark(command_generator,
                                output_paths,
                                use_xvfb=False):
    start = time.time()

    env = os.environ.copy()
    env['CHROME_HEADLESS'] = '1'
    # Assume we want to set up the sandbox environment variables all the
    # time; doing so is harmless on non-Linux platforms and is needed
    # all the time on Linux.
    env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH

    return_code = 1
    temp_dir = tempfile.mkdtemp('telemetry')
    try:
        command = command_generator.generate(temp_dir)
        if use_xvfb:
            # When running with xvfb, we currently output both to stdout and to the
            # file. It would be better to only output to the file to keep the logs
            # clean.
            return_code = xvfb.run_executable(command,
                                              env=env,
                                              stdoutfile=output_paths.logs)
        else:
            return_code = test_env.run_command_with_output(
                command, env=env, stdoutfile=output_paths.logs)
        expected_results_filename = os.path.join(temp_dir, 'test-results.json')
        if os.path.exists(expected_results_filename):
            shutil.move(expected_results_filename, output_paths.test_results)
        else:
            common.write_interrupted_test_results_to(output_paths.test_results,
                                                     start)
        expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
        shutil.move(expected_perf_filename, output_paths.perf_results)

        csv_file_path = os.path.join(temp_dir, 'results.csv')
        if os.path.isfile(csv_file_path):
            shutil.move(csv_file_path, output_paths.csv_perf_results)
    except Exception:
        print(
            'The following exception may have prevented the code from '
            'outputing structured test results and perf results output:')
        print traceback.format_exc()
    finally:
        # Add ignore_errors=True because otherwise rmtree may fail due to leaky
        # processes of tests are still holding opened handles to files under
        # |tempfile_dir|. For example, see crbug.com/865896
        shutil.rmtree(temp_dir, ignore_errors=True)

    print_duration('executing benchmark %s' % command_generator.benchmark,
                   start)

    if return_code:
        return return_code
    return 0