Пример #1
0
def main():
  signal_util.setup()

  OPTIONS.parse_configure_file()

  parsed_args = launch_chrome_options.parse_args(sys.argv)
  logging_util.setup(
      level=logging.DEBUG if parsed_args.verbose else logging.INFO)

  _prepare_chrome_user_data_dir(parsed_args)
  global _CHROME_PID_PATH
  _CHROME_PID_PATH = os.path.join(_USER_DATA_DIR, 'chrome.pid')

  # If there is an X server at :0.0 and GPU is enabled, set it as the
  # current display.
  if parsed_args.display:
    os.environ['DISPLAY'] = parsed_args.display

  os.chdir(_ROOT_DIR)

  if not parsed_args.remote:
    _kill_running_chrome()

  if parsed_args.run_ninja:
    build_common.run_ninja()

  ld_library_path = os.environ.get('LD_LIBRARY_PATH')
  lib_paths = ld_library_path.split(':') if ld_library_path else []
  lib_paths.append(build_common.get_load_library_path())
  # Add the directory of the chrome binary so that .so files in the directory
  # can be loaded. This is needed for loading libudev.so.0.
  # TODO(crbug.com/375609): Remove the hack once it becomes no longer needed.
  lib_paths.append(os.path.dirname(_get_chrome_path(parsed_args)))
  os.environ['LD_LIBRARY_PATH'] = ':'.join(lib_paths)
  launch_chrome_util.set_environment_for_chrome()

  if not platform_util.is_running_on_remote_host():
    _check_apk_existence(parsed_args)

  if not platform_util.is_running_on_remote_host():
    prep_launch_chrome.prepare_crx(parsed_args)
  prep_launch_chrome.remove_crx_at_exit_if_needed(parsed_args)

  if parsed_args.remote:
    remote_executor.launch_remote_chrome(parsed_args, sys.argv[1:])
  else:
    platform_util.assert_machine(OPTIONS.target())
    _check_crx_existence(parsed_args)
    _run_chrome_iterations(parsed_args)

  return 0
Пример #2
0
def main(raw_args):
  args = _process_args(raw_args)

  if args.run_ninja:
    build_common.run_ninja()

  test_driver_list = []
  for n in xrange(args.repeat_runs):
    test_driver_list.extend(_get_test_driver_list(args))

  if args.plan_report:
    suite_results.initialize(test_driver_list, args, False)
    suite_results.report_expected_results(
        driver.scoreboard for driver in sorted(test_driver_list,
                                               key=lambda driver: driver.name))
    return 0
  elif args.list:
    pretty_print_tests(args)
    return 0
  elif args.remote:
    return _run_suites_and_output_results_remote(args, raw_args)
  else:
    return _run_suites_and_output_results_local(test_driver_list, args)
Пример #3
0
def main(raw_args):
    args = _process_args(raw_args)

    if args.run_ninja:
        build_common.run_ninja()

    test_driver_list = []
    for n in xrange(args.repeat_runs):
        test_driver_list.extend(_get_test_driver_list(args))

    if args.plan_report:
        suite_results.initialize(test_driver_list, args, False)
        suite_results.report_expected_results(
            driver.scoreboard
            for driver in sorted(test_driver_list,
                                 key=lambda driver: driver.name))
        return 0
    elif args.list:
        pretty_print_tests(args)
        return 0
    elif args.remote:
        return _run_suites_and_output_results_remote(args, raw_args)
    else:
        return _run_suites_and_output_results_local(test_driver_list, args)
Пример #4
0
def handle_compare(parsed_args):
  """The entry point for compare command.

  Args:
    parsed_args: An argparse.Namespace object.
  """
  expt_root = get_abs_arc_root()
  ctrl_root = get_abs_stash_root()

  if not os.path.exists(ctrl_root):
    sys.exit('%s not found; run "interleaved_perftest.py stash" first to save '
             'control binaries' % ctrl_root)

  check_current_configure_options(parsed_args)

  ctrl_options = load_configure_options(ctrl_root)
  expt_options = load_configure_options(expt_root)

  logging.info('iterations: %d', parsed_args.iterations)
  logging.info('ctrl_options: %s', ctrl_options)
  logging.info('expt_options: %s', expt_options)

  if parsed_args.run_ninja:
    build_common.run_ninja()

  launch_chrome_opts = []
  if parsed_args.verbose:
    launch_chrome_opts.append('--verbose')
  launch_chrome_opts.extend(parsed_args.launch_chrome_opt)

  ctrl_runner = InteractivePerfTestRunner(
      arc_root=ctrl_root,
      remote=parsed_args.remote,
      launch_chrome_opts=launch_chrome_opts,
      instance_id=0)
  expt_runner = InteractivePerfTestRunner(
      arc_root=expt_root,
      remote=parsed_args.remote,
      launch_chrome_opts=launch_chrome_opts,
      instance_id=1)

  with contextlib.closing(ctrl_runner), contextlib.closing(expt_runner):
    ctrl_runner.start()
    expt_runner.start()

    ctrl_perfs = collections.defaultdict(list)
    expt_perfs = collections.defaultdict(list)

    def do_ctrl():
      merge_perfs(ctrl_perfs, ctrl_runner.run())

    def do_expt():
      merge_perfs(expt_perfs, expt_runner.run())

    for iteration in xrange(parsed_args.iterations):
      print
      print '=================================== iteration %d/%d' % (
          iteration + 1, parsed_args.iterations)
      for do in random.sample((do_ctrl, do_expt), 2):
        do()

  print
  print 'VRAWPERF_CTRL=%r' % dict(ctrl_perfs)  # Convert from defaultdict.
  print 'VRAWPERF_EXPT=%r' % dict(expt_perfs)  # Convert from defaultdict.
  print
  print 'PERF=runs=%d CI=%d%%' % (
      parsed_args.iterations, parsed_args.confidence_level)
  if expt_options == ctrl_options:
    print '     configure_opts=%s' % expt_options
  else:
    print '     configure_opts=%s (vs. %s)' % (expt_options, ctrl_options)
  print '     launch_chrome_opts=%s' % ' '.join(parsed_args.launch_chrome_opt)

  def _print_metric(prefix, key, unit, frac_digits=0):
    def format_frac(k, sign=False):
      format_string = '%'
      if sign:
        format_string += '+'
      format_string += '.%d' % frac_digits
      format_string += 'f'
      return (format_string % k) + unit
    ctrl_sample = ctrl_perfs[key]
    expt_sample = expt_perfs[key]
    ctrl_median = statistics.compute_median(ctrl_sample)
    expt_median = statistics.compute_median(expt_sample)
    diff_estimate_lower, diff_estimate_upper = (
        bootstrap_estimation(
            ctrl_sample, expt_sample,
            statistics.compute_median,
            parsed_args.confidence_level))
    if diff_estimate_upper < 0:
      significance = '[--]'
    elif diff_estimate_lower > 0:
      significance = '[++]'
    else:
      significance = '[not sgfnt.]'
    print '     %s: ctrl=%s, expt=%s, diffCI=(%s,%s) %s' % (
        prefix,
        format_frac(ctrl_median),
        format_frac(expt_median),
        format_frac(diff_estimate_lower, sign=True),
        format_frac(diff_estimate_upper, sign=True),
        significance)

  _print_metric('boot', 'boot_time_ms', 'ms')
  _print_metric('  preEmbed', 'pre_embed_time_ms', 'ms')
  _print_metric('  pluginLoad', 'plugin_load_time_ms', 'ms')
  _print_metric('  onResume', 'on_resume_time_ms', 'ms')
  _print_metric('virt', 'app_virt_mem', 'MB', frac_digits=1)
  _print_metric('res', 'app_res_mem', 'MB', frac_digits=1)
  _print_metric('pdirt', 'app_pdirt_mem', 'MB', frac_digits=1)

  print '     (see go/arcipt for how to interpret these numbers)'
Пример #5
0
def handle_stash(parsed_args):
  """The entry point for stash command.

  Args:
    parsed_args: An argparse.Namespace object.
  """
  arc_root = get_abs_arc_root()
  stash_root = get_abs_stash_root()

  check_current_configure_options(parsed_args)

  options = load_configure_options(arc_root)
  logging.info('options: %s', options)
  if parsed_args.run_ninja:
    build_common.run_ninja()

  # See FILTER RULES section in rsync manpages for syntax.
  rules_text = """
  # No git repo.
  - .git/
  # Artifacts for the target arch and common.
  + /{out}/target/{target}/runtime/
  + /{out}/target/{target}/unittest_info/
  - /{out}/target/{target}/*
  + /{out}/target/{target}/
  + /{out}/target/common/
  - /{out}/target/*
  - /{out}/staging/
  # No internal-apks build artifacts.
  - /{out}/gms-core-build/
  - /{out}/google-contacts-sync-adapter-build/
  + /{out}/
  + /src/
  # aapt etc.
  + /third_party/android-sdk/
  # ninja etc.
  + /third_party/tools/ninja/
  + /third_party/tools/crosutils/mod_for_test_scripts/ssh_keys/
  - /third_party/tools/crosutils/mod_for_test_scripts/*
  + /third_party/tools/crosutils/mod_for_test_scripts/
  - /third_party/tools/crosutils/*
  + /third_party/tools/crosutils/
  - /third_party/tools/*
  + /third_party/tools/
  - /third_party/*
  + /third_party/
  + /launch_chrome
  - /*
  """.format(
      out=build_common.OUT_DIR,
      target=build_common.get_target_dir_name())

  rules = []
  for line in rules_text.strip().splitlines():
    line = line.strip()
    if line and not line.startswith('#'):
      rules.append(line)

  args = ['rsync', '-a', '--delete', '--delete-excluded', '--copy-links']
  if parsed_args.verbose:
    args.append('-v')
  args.extend(['--filter=%s' % rule for rule in rules])
  # A trailing dot is required to make rsync work as we expect.
  args.extend([os.path.join(arc_root, '.'), stash_root])

  logging.info(
      'running rsync to copy the arc tree to %s. please be patient...',
      stash_root)
  subprocess.check_call(args)

  logging.info('stashed the arc tree at %s.', stash_root)
Пример #6
0
def handle_compare(parsed_args):
    """The entry point for compare command.

  Args:
    parsed_args: An argparse.Namespace object.
  """
    expt_root = get_abs_arc_root()
    ctrl_root = get_abs_stash_root()

    if not os.path.exists(ctrl_root):
        sys.exit(
            '%s not found; run "interleaved_perftest.py stash" first to save '
            'control binaries' % ctrl_root)

    check_current_configure_options(parsed_args)

    ctrl_options = load_configure_options(ctrl_root)
    expt_options = load_configure_options(expt_root)

    logging.info('iterations: %d', parsed_args.iterations)
    logging.info('ctrl_options: %s', ctrl_options)
    logging.info('expt_options: %s', expt_options)

    if parsed_args.run_ninja:
        build_common.run_ninja()

    launch_chrome_opts = []
    if parsed_args.verbose:
        launch_chrome_opts.append('--verbose')
    launch_chrome_opts.extend(parsed_args.launch_chrome_opt)

    ctrl_runner = InteractivePerfTestRunner(
        arc_root=ctrl_root,
        remote=parsed_args.remote,
        launch_chrome_opts=launch_chrome_opts,
        instance_id=0)
    expt_runner = InteractivePerfTestRunner(
        arc_root=expt_root,
        remote=parsed_args.remote,
        launch_chrome_opts=launch_chrome_opts,
        instance_id=1)

    with contextlib.closing(ctrl_runner), contextlib.closing(expt_runner):
        ctrl_runner.start()
        expt_runner.start()

        ctrl_perfs = collections.defaultdict(list)
        expt_perfs = collections.defaultdict(list)

        def do_ctrl():
            merge_perfs(ctrl_perfs, ctrl_runner.run())

        def do_expt():
            merge_perfs(expt_perfs, expt_runner.run())

        for iteration in xrange(parsed_args.iterations):
            print
            print '=================================== iteration %d/%d' % (
                iteration + 1, parsed_args.iterations)
            for do in random.sample((do_ctrl, do_expt), 2):
                do()

    print
    print 'VRAWPERF_CTRL=%r' % dict(ctrl_perfs)  # Convert from defaultdict.
    print 'VRAWPERF_EXPT=%r' % dict(expt_perfs)  # Convert from defaultdict.
    print
    print 'PERF=runs=%d CI=%d%%' % (parsed_args.iterations,
                                    parsed_args.confidence_level)
    if expt_options == ctrl_options:
        print '     configure_opts=%s' % expt_options
    else:
        print '     configure_opts=%s (vs. %s)' % (expt_options, ctrl_options)
    print '     launch_chrome_opts=%s' % ' '.join(
        parsed_args.launch_chrome_opt)

    def _print_metric(prefix, key, unit, frac_digits=0):
        def format_frac(k, sign=False):
            format_string = '%'
            if sign:
                format_string += '+'
            format_string += '.%d' % frac_digits
            format_string += 'f'
            return (format_string % k) + unit

        ctrl_sample = ctrl_perfs[key]
        expt_sample = expt_perfs[key]
        ctrl_median = statistics.compute_median(ctrl_sample)
        expt_median = statistics.compute_median(expt_sample)
        diff_estimate_lower, diff_estimate_upper = (bootstrap_estimation(
            ctrl_sample, expt_sample, statistics.compute_median,
            parsed_args.confidence_level))
        if diff_estimate_upper < 0:
            significance = '[--]'
        elif diff_estimate_lower > 0:
            significance = '[++]'
        else:
            significance = '[not sgfnt.]'
        print '     %s: ctrl=%s, expt=%s, diffCI=(%s,%s) %s' % (
            prefix, format_frac(ctrl_median), format_frac(expt_median),
            format_frac(diff_estimate_lower, sign=True),
            format_frac(diff_estimate_upper, sign=True), significance)

    _print_metric('boot', 'boot_time_ms', 'ms')
    _print_metric('  preEmbed', 'pre_embed_time_ms', 'ms')
    _print_metric('  pluginLoad', 'plugin_load_time_ms', 'ms')
    _print_metric('  onResume', 'on_resume_time_ms', 'ms')
    _print_metric('virt', 'app_virt_mem', 'MB', frac_digits=1)
    _print_metric('res', 'app_res_mem', 'MB', frac_digits=1)
    _print_metric('pdirt', 'app_pdirt_mem', 'MB', frac_digits=1)

    print '     (see go/arcipt for how to interpret these numbers)'
Пример #7
0
def handle_stash(parsed_args):
    """The entry point for stash command.

  Args:
    parsed_args: An argparse.Namespace object.
  """
    arc_root = get_abs_arc_root()
    stash_root = get_abs_stash_root()

    check_current_configure_options(parsed_args)

    options = load_configure_options(arc_root)
    logging.info('options: %s', options)
    if parsed_args.run_ninja:
        build_common.run_ninja()

    # See FILTER RULES section in rsync manpages for syntax.
    rules_text = """
  # No git repo.
  - .git/
  # Artifacts for the target arch and common.
  + /{out}/target/{target}/runtime/
  + /{out}/target/{target}/unittest_info/
  - /{out}/target/{target}/*
  + /{out}/target/{target}/
  + /{out}/target/common/
  - /{out}/target/*
  - /{out}/staging/
  # No internal-apks build artifacts.
  - /{out}/gms-core-build/
  - /{out}/google-contacts-sync-adapter-build/
  + /{out}/
  + /src/
  # aapt etc.
  + /third_party/android-sdk/
  # ninja etc.
  + /third_party/tools/ninja/
  + /third_party/tools/crosutils/mod_for_test_scripts/ssh_keys/
  - /third_party/tools/crosutils/mod_for_test_scripts/*
  + /third_party/tools/crosutils/mod_for_test_scripts/
  - /third_party/tools/crosutils/*
  + /third_party/tools/crosutils/
  - /third_party/tools/*
  + /third_party/tools/
  - /third_party/*
  + /third_party/
  + /launch_chrome
  - /*
  """.format(out=build_common.OUT_DIR,
             target=build_common.get_target_dir_name())

    rules = []
    for line in rules_text.strip().splitlines():
        line = line.strip()
        if line and not line.startswith('#'):
            rules.append(line)

    args = ['rsync', '-a', '--delete', '--delete-excluded', '--copy-links']
    if parsed_args.verbose:
        args.append('-v')
    args.extend(['--filter=%s' % rule for rule in rules])
    # A trailing dot is required to make rsync work as we expect.
    args.extend([os.path.join(arc_root, '.'), stash_root])

    logging.info(
        'running rsync to copy the arc tree to %s. please be patient...',
        stash_root)
    subprocess.check_call(args)

    logging.info('stashed the arc tree at %s.', stash_root)
Пример #8
0
                      default=True, dest='run_ninja',
                      help='Do not run ninja before archiving test bundle.')
  parser.add_argument('-o', '--output',
                      default=build_common.get_test_bundle_name(),
                      help=('The name of the test bundle to be created.'))
  return parser.parse_args()


if __name__ == '__main__':
  OPTIONS.parse_configure_file()

  # Prepare all the files needed to run integration tests.
  parsed_args = _parse_args()
  # Build arc runtime.
  if parsed_args.run_ninja:
    build_common.run_ninja()

  logging.basicConfig(level=logging.WARNING)
  integration_tests_args = _get_integration_tests_args(parsed_args.jobs)
  assert run_integration_tests.prepare_suites(integration_tests_args)

  # Prepare art.901-perf for perf vm tests. The test is marked as a LARGE test.
  integration_tests_args.include_patterns = ['art.901-perf:*']
  integration_tests_args.include_large = True
  assert run_integration_tests.prepare_suites(integration_tests_args)

  # Archive all the files needed to run integration tests into a zip file.
  paths = _get_archived_file_paths()
  if OPTIONS.is_debug_info_enabled():
    paths = _convert_to_stripped_paths(paths)
  print 'Creating %s' % parsed_args.output
Пример #9
0
                        help='Do not run ninja before archiving test bundle.')
    parser.add_argument('-o',
                        '--output',
                        default=build_common.get_test_bundle_name(),
                        help=('The name of the test bundle to be created.'))
    return parser.parse_args()


if __name__ == '__main__':
    OPTIONS.parse_configure_file()

    # Prepare all the files needed to run integration tests.
    parsed_args = _parse_args()
    # Build arc runtime.
    if parsed_args.run_ninja:
        build_common.run_ninja()

    logging.basicConfig(level=logging.WARNING)
    integration_tests_args = _get_integration_tests_args(parsed_args.jobs)
    assert run_integration_tests.prepare_suites(integration_tests_args)

    # Prepare art.901-perf for perf vm tests. The test is marked as a LARGE test.
    integration_tests_args.include_patterns = ['art.901-perf:*']
    integration_tests_args.include_large = True
    assert run_integration_tests.prepare_suites(integration_tests_args)

    # Archive all the files needed to run integration tests into a zip file.
    paths = _get_archived_file_paths()
    if OPTIONS.is_debug_info_enabled():
        paths = _convert_to_stripped_paths(paths)
    print 'Creating %s' % parsed_args.output