Ejemplo n.º 1
0
def main(args):
    host = typ.Host()
    runner = typ.Runner(host)
    parser = ArgumentParser(host)
    parser.prog = os.path.basename(sys.argv[0])
    parser.description = __doc__
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    runner.parse_args(
        parser=parser,
        argv=args,
        isolate=['installer_test.*'],  # InstallerTest must be serialized.
        top_level_dir=CUR_DIR,
        retry_limit=3,  # Retry failures by default since the tests are flaky.
    )
    if parser.exit_status is not None:
        return parser.exit_status

    # Stuff args into environment vars for use by child procs.
    _prepare_env_for_subprocesses(parser, runner.args)

    try:
        return runner.run()[0]
    except KeyboardInterrupt:
        self.print_("interrupted, exiting", stream=self.host.stderr)
        return 130
Ejemplo n.º 2
0
def main(argv):
    """Runs Blink bindings IDL compiler on test IDL files and compares the
    results with reference files.

    Please execute the script whenever changes are made to the compiler
    (this is automatically done as a presubmit script),
    and submit changes to the test results in the same patch.
    This makes it easier to track and review changes in generated code.
    """

    argument_parser = create_argument_parser()

    # First, run bindings unit tests.
    runner = typ.Runner()
    runner.parse_args(argument_parser, argv[1:])
    if argument_parser.exit_status is not None:
        return argument_parser.exit_status

    args = runner.args
    args.top_level_dir = path_finder.get_bindings_scripts_dir()
    if not args.skip_unit_tests:
        return_code, _, _ = runner.run()
        if return_code != 0:
            return return_code

    # Now run the bindings end-to-end tests.
    if args.skip_reference_tests:
        return 0

    return run_bindings_tests(args.reset_results, args.verbose,
                              args.suppress_diff)
Ejemplo n.º 3
0
  def Run(self, args):
    runner = typ.Runner()
    if self.stream:
      runner.host.stdout = self.stream

    if args.no_browser:
      possible_browser = None
      platform = platform_module.GetHostPlatform()
    else:
      possible_browser = browser_finder.FindBrowser(args)
      platform = possible_browser.platform

    # Telemetry seems to overload the system if we run one test per core,
    # so we scale things back a fair amount. Many of the telemetry tests
    # are long-running, so there's a limit to how much parallelism we
    # can effectively use for now anyway.
    #
    # It should be possible to handle multiple devices if we adjust the
    # browser_finder code properly, but for now we only handle one on ChromeOS.
    if platform.GetOSName() == 'chromeos':
      runner.args.jobs = 1
    elif platform.GetOSName() == 'android':
      runner.args.jobs = len(device_finder.GetDevicesMatchingOptions(args))
      print 'Running tests with %d Android device(s).' % runner.args.jobs
    elif platform.GetOSVersionName() == 'xp':
      # For an undiagnosed reason, XP falls over with more parallelism.
      # See crbug.com/388256
      runner.args.jobs = max(int(args.jobs) // 4, 1)
    else:
      runner.args.jobs = max(int(args.jobs) // 2, 1)

    runner.args.metadata = args.metadata
    runner.args.passthrough = args.passthrough
    runner.args.path = args.path
    runner.args.retry_limit = args.retry_limit
    runner.args.test_results_server = args.test_results_server
    runner.args.test_type = args.test_type
    runner.args.top_level_dir = args.top_level_dir
    runner.args.write_full_results_to = args.write_full_results_to
    runner.args.write_trace_to = args.write_trace_to
    runner.args.list_only = args.list_only

    runner.args.path.append(util.GetUnittestDataDir())

    # Always print out these info for the ease of debugging.
    runner.args.timing = True
    runner.args.verbose = 3

    runner.classifier = GetClassifier(args, possible_browser)
    runner.context = args
    runner.setup_fn = _SetUpProcess
    runner.teardown_fn = _TearDownProcess
    runner.win_multiprocessing = typ.WinMultiprocessing.importable
    try:
      ret, _, _ = runner.run()
    except KeyboardInterrupt:
      print >> sys.stderr, "interrupted, exiting"
      ret = 130
    return ret
Ejemplo n.º 4
0
def main():
    parser = typ.ArgumentParser()
    parser.add_argument('--build-dir',
                        help='Specifies chromium build directory.')
    parser.add_argument('--target-gen-dir')

    runner = typ.Runner()

    # Set this when using context that will be passed to tests.
    runner.win_multiprocessing = typ.WinMultiprocessing.importable

    runner.parse_args(parser,
                      argv=None,
                      tests=[path_finder.get_integration_tests_dir()])

    # setup logging level
    if runner.args.verbose > 1:
        level = logging.DEBUG
    else:
        level = logging.INFO
    logging.basicConfig(level=level)

    # copy dynamically generated updater version_info.py from
    # target gen directory to
    # //chrome/updater/test/integration_tests/updater so that
    # it can be imported as a module during test runs.
    target_gen_dir_abs_path = os.path.abspath(runner.args.target_gen_dir)
    version_file_path = os.path.join(target_gen_dir_abs_path, 'gen', 'chrome',
                                     'updater', 'version_info.py')
    if os.path.exists(version_file_path):
        dest = os.path.join(path_finder.get_integration_tests_dir(), 'updater',
                            'version_info.py')
        copy_file(version_file_path, dest)
    else:
        logging.info('File not found: %s' % version_file_path)
        return -1

    # copy dynamically generated updater branding_info.py from
    # target gen directory to
    # //chrome/updater/test/integration_tests/updater so that
    # it can be imported as a module during test runs.
    branding_file_path = os.path.join(target_gen_dir_abs_path, 'gen', 'chrome',
                                      'updater', 'branding_info.py')
    if os.path.exists(branding_file_path):
        dest = os.path.join(path_finder.get_integration_tests_dir(), 'updater',
                            'branding_info.py')
        copy_file(branding_file_path, dest)
    else:
        logging.info('File not found: %s' % branding_file_path)
        return -2

    runner.context = Context(runner.args.build_dir)
    return runner.run()[0]
Ejemplo n.º 5
0
def main(argv):
    argument_parser = create_argument_parser()

    runner = typ.Runner()
    runner.parse_args(argument_parser, argv[1:])
    runner.args.top_level_dirs = [os.path.dirname(__file__)]
    runner.context = runner.args

    # Needs to be set to enable customizing runner.context
    runner.win_multiprocessing = typ.WinMultiprocessing.importable

    return_code, _, _ = runner.run()
    return return_code
def main(argv):
    devil_chromium.Initialize()
    argument_parser = create_argument_parser()

    runner = typ.Runner()
    runner.parse_args(argument_parser, argv[1:])
    if argument_parser.exit_status is not None:
        return argument_parser.exit_status
    runner.args.top_level_dirs = [os.path.dirname(__file__)]
    runner.context = runner.args

    # Needs to be set to enable customizing runner.context
    runner.win_multiprocessing = typ.WinMultiprocessing.importable

    return_code, _, _ = runner.run()
    return return_code
Ejemplo n.º 7
0
    def Run(self, args):
        possible_browser = browser_finder.FindBrowser(args)

        runner = typ.Runner()
        if self.stream:
            runner.host.stdout = self.stream

        # Telemetry seems to overload the system if we run one test per core,
        # so we scale things back a fair amount. Many of the telemetry tests
        # are long-running, so there's a limit to how much parallelism we
        # can effectively use for now anyway.
        #
        # It should be possible to handle multiple devices if we adjust
        # the browser_finder code properly, but for now we only handle the one
        # on Android and ChromeOS.
        if possible_browser.platform.GetOSName() in ('android', 'chromeos'):
            runner.args.jobs = 1
        else:
            runner.args.jobs = max(int(args.jobs) // 4, 1)

        runner.args.metadata = args.metadata
        runner.args.passthrough = args.passthrough
        runner.args.path = args.path
        runner.args.retry_limit = args.retry_limit
        runner.args.test_results_server = args.test_results_server
        runner.args.test_type = args.test_type
        runner.args.timing = args.timing
        runner.args.top_level_dir = args.top_level_dir
        runner.args.verbose = args.verbosity
        runner.args.write_full_results_to = args.write_full_results_to
        runner.args.write_trace_to = args.write_trace_to

        runner.args.path.append(util.GetUnittestDataDir())

        runner.classifier = GetClassifier(args, possible_browser)
        runner.context = args
        runner.setup_fn = _SetUpProcess
        runner.teardown_fn = _TearDownProcess
        runner.win_multiprocessing = typ.WinMultiprocessing.importable
        try:
            ret, _, _ = runner.run()
        except KeyboardInterrupt:
            print >> sys.stderr, "interrupted, exiting"
            ret = 130
        return ret
Ejemplo n.º 8
0
def main(argv):
    argument_parser = create_argument_parser()

    # Run bindings unit tests.
    runner = typ.Runner()
    runner.parse_args(argument_parser, argv[1:])
    if argument_parser.exit_status is not None:
        return argument_parser.exit_status

    args = runner.args
    args.top_level_dirs = [
        path_finder.get_bindings_scripts_dir(),
        path_finder.get_build_scripts_dir(),
    ]
    if not args.skip_unit_tests:
        return_code, _, _ = runner.run()
        if return_code != 0:
            return return_code

    return 0
Ejemplo n.º 9
0
def RunTests(args):
  parser = _CreateTestArgParsers()
  try:
    options, extra_args = parser.parse_known_args(args)
  except arg_parser._Bailout:
    PrintTelemetryHelp()
    return parser.exit_status
  binary_manager.InitDependencyManager(options.client_configs)
  for start_dir in options.start_dirs:
    modules_to_classes = discover.DiscoverClasses(
        start_dir,
        options.top_level_dir,
        base_class=serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)
    browser_test_classes = modules_to_classes.values()

  _ValidateDistinctNames(browser_test_classes)

  test_class = None
  for cl in browser_test_classes:
    if cl.Name() == options.test:
      test_class = cl
      break

  if not test_class:
    print('Cannot find test class with name matching %s' % options.test)
    print('Available tests: %s' % '\n'.join(
        cl.Name() for cl in browser_test_classes))
    return 1

  test_class._typ_runner = typ_runner = typ.Runner()

  # Create test context.
  typ_runner.context = browser_test_context.TypTestContext()
  for c in options.client_configs:
    typ_runner.context.client_configs.append(c)
  typ_runner.context.finder_options = ProcessCommandLineOptions(
      test_class, options, extra_args)
  typ_runner.context.test_class = test_class
  typ_runner.context.expectations_files = options.expectations_files
  test_times = None
  if options.read_abbreviated_json_results_from:
    with open(options.read_abbreviated_json_results_from, 'r') as f:
      abbr_results = json.load(f)
      test_times = abbr_results.get('times')

  # Setup typ.Runner instance.
  typ_runner.args.all = options.all
  typ_runner.args.expectations_files = options.expectations_files
  typ_runner.args.jobs = options.jobs
  typ_runner.args.list_only = options.list_only
  typ_runner.args.metadata = options.metadata
  typ_runner.args.passthrough = options.passthrough
  typ_runner.args.path = options.path
  typ_runner.args.quiet = options.quiet
  typ_runner.args.repeat = options.repeat
  typ_runner.args.repository_absolute_path = options.repository_absolute_path
  typ_runner.args.retry_limit = options.retry_limit
  typ_runner.args.retry_only_retry_on_failure_tests = (
      options.retry_only_retry_on_failure_tests)
  typ_runner.args.skip = options.skip
  typ_runner.args.suffixes = TEST_SUFFIXES
  typ_runner.args.tags = options.tags
  typ_runner.args.test_name_prefix = options.test_name_prefix
  typ_runner.args.test_filter = options.test_filter
  typ_runner.args.test_results_server = options.test_results_server
  typ_runner.args.test_type = options.test_type
  typ_runner.args.top_level_dir = options.top_level_dir
  typ_runner.args.write_full_results_to = options.write_full_results_to
  typ_runner.args.write_trace_to = options.write_trace_to
  typ_runner.args.disable_resultsink = options.disable_resultsink

  typ_runner.classifier = _GetClassifier(typ_runner)
  typ_runner.path_delimiter = test_class.GetJSONResultsDelimiter()
  typ_runner.setup_fn = _SetUpProcess
  typ_runner.teardown_fn = _TearDownProcess

  tests_to_run = LoadTestCasesToBeRun(
      test_class=test_class, finder_options=typ_runner.context.finder_options,
      filter_tests_after_sharding=options.filter_tests_after_sharding,
      total_shards=options.total_shards, shard_index=options.shard_index,
      test_times=test_times,
      debug_shard_distributions=options.debug_shard_distributions,
      typ_runner=typ_runner)
  for t in tests_to_run:
    typ_runner.context.test_case_ids_to_run.add(t.id())
  typ_runner.context.Freeze()
  browser_test_context._global_test_context = typ_runner.context

  # several class level variables are set for GPU tests  when
  # LoadTestCasesToBeRun is called. Functions line ExpectationsFiles and
  # GenerateTags which use these variables should be called after
  # LoadTestCasesToBeRun

  test_class_expectations_files = test_class.ExpectationsFiles()
  # all file paths in test_class_expectations-files must be absolute
  assert all(os.path.isabs(path) for path in test_class_expectations_files)
  typ_runner.args.expectations_files.extend(
      test_class_expectations_files)
  typ_runner.args.ignored_tags.extend(test_class.IgnoredTags())

  # Since sharding logic is handled by browser_test_runner harness by passing
  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
  # cases to be run, we explicitly disable sharding logic in typ.
  typ_runner.args.total_shards = 1
  typ_runner.args.shard_index = 0

  typ_runner.args.timing = True
  typ_runner.args.verbose = options.verbose
  typ_runner.win_multiprocessing = typ.WinMultiprocessing.importable

  try:
    ret, _, _ = typ_runner.run()
  except KeyboardInterrupt:
    print("interrupted, exiting", file=sys.stderr)
    ret = 130
  return ret
Ejemplo n.º 10
0
  def Run(self, args):
    runner = typ.Runner()
    if self.stream:
      runner.host.stdout = self.stream

    if args.no_browser:
      possible_browser = None
      platform = platform_module.GetHostPlatform()
    else:
      possible_browser = browser_finder.FindBrowser(args)
      platform = possible_browser.platform

    fetch_reference_chrome_binary = False
    # Fetch all binaries needed by telemetry before we run the benchmark.
    if possible_browser and possible_browser.browser_type == 'reference':
      fetch_reference_chrome_binary = True
    binary_manager.FetchBinaryDependencies(
        platform, args.client_configs, fetch_reference_chrome_binary)

    # Telemetry seems to overload the system if we run one test per core,
    # so we scale things back a fair amount. Many of the telemetry tests
    # are long-running, so there's a limit to how much parallelism we
    # can effectively use for now anyway.
    #
    # It should be possible to handle multiple devices if we adjust the
    # browser_finder code properly, but for now we only handle one on ChromeOS.
    if platform.GetOSName() == 'chromeos':
      runner.args.jobs = 1
    elif platform.GetOSName() == 'android':
      android_devs = android_device.FindAllAvailableDevices(args)
      runner.args.jobs = len(android_devs)
      if runner.args.jobs == 0:
        raise RuntimeError("No Android device found")
      print 'Running tests with %d Android device(s).' % runner.args.jobs
    elif platform.GetOSVersionName() == 'xp':
      # For an undiagnosed reason, XP falls over with more parallelism.
      # See crbug.com/388256
      runner.args.jobs = max(int(args.jobs) // 4, 1)
    else:
      runner.args.jobs = max(int(args.jobs) // 2, 1)

    runner.args.skip = args.skip
    runner.args.metadata = args.metadata
    runner.args.passthrough = args.passthrough
    runner.args.path = args.path
    runner.args.retry_limit = args.retry_limit
    runner.args.test_results_server = args.test_results_server
    runner.args.test_type = args.test_type
    runner.args.top_level_dirs = args.top_level_dirs
    runner.args.write_full_results_to = args.write_full_results_to
    runner.args.write_trace_to = args.write_trace_to
    runner.args.repeat = args.repeat
    runner.args.list_only = args.list_only
    runner.args.shard_index = args.shard_index
    runner.args.total_shards = args.total_shards

    runner.args.path.append(util.GetUnittestDataDir())

    # Standard verbosity will only emit output on test failure. Higher verbosity
    # levels spam the output with logging, making it very difficult to figure
    # out what's going on when digging into test failures.
    runner.args.timing = True
    runner.args.verbose = 1

    runner.classifier = GetClassifier(args, possible_browser)
    runner.context = args
    runner.setup_fn = _SetUpProcess
    runner.teardown_fn = _TearDownProcess
    runner.win_multiprocessing = typ.WinMultiprocessing.importable
    try:
      ret, _, _ = runner.run()
    except KeyboardInterrupt:
      print >> sys.stderr, "interrupted, exiting"
      ret = 130
    return ret
Ejemplo n.º 11
0
def RunTests(args):
    parser = _CreateTestArgParsers()
    try:
        options, extra_args = parser.parse_known_args(args)
    except arg_parser._Bailout:
        return parser.exit_status
    binary_manager.InitDependencyManager(options.client_configs)

    for start_dir in options.start_dirs:
        modules_to_classes = discover.DiscoverClasses(
            start_dir,
            options.top_level_dir,
            base_class=serially_executed_browser_test_case.
            SeriallyExecutedBrowserTestCase)
        browser_test_classes = modules_to_classes.values()

    _ValidateDistinctNames(browser_test_classes)

    test_class = None
    for cl in browser_test_classes:
        if cl.Name() == options.test:
            test_class = cl
            break

    if not test_class:
        print 'Cannot find test class with name matching %s' % options.test
        print 'Available tests: %s' % '\n'.join(cl.Name()
                                                for cl in browser_test_classes)
        return 1

    # Create test context.
    context = browser_test_context.TypTestContext()
    for c in options.client_configs:
        context.client_configs.append(c)
    context.finder_options = ProcessCommandLineOptions(test_class, options,
                                                       extra_args)
    context.test_class = test_class
    test_times = None
    if options.read_abbreviated_json_results_from:
        with open(options.read_abbreviated_json_results_from, 'r') as f:
            abbr_results = json.load(f)
            test_times = abbr_results.get('times')
    tests_to_run = LoadTestCasesToBeRun(
        test_class=test_class,
        finder_options=context.finder_options,
        filter_regex_str=options.test_filter,
        filter_tests_after_sharding=options.filter_tests_after_sharding,
        total_shards=options.total_shards,
        shard_index=options.shard_index,
        test_times=test_times,
        debug_shard_distributions=options.debug_shard_distributions)
    for t in tests_to_run:
        context.test_case_ids_to_run.add(t.id())
    context.Freeze()
    browser_test_context._global_test_context = context

    # Setup typ runner.
    runner = typ.Runner()

    runner.context = context
    runner.setup_fn = _SetUpProcess
    runner.teardown_fn = _TearDownProcess

    runner.args.jobs = options.jobs
    runner.args.metadata = options.metadata
    runner.args.passthrough = options.passthrough
    runner.args.path = options.path
    runner.args.retry_limit = options.retry_limit
    runner.args.test_results_server = options.test_results_server
    runner.args.test_type = options.test_type
    runner.args.top_level_dir = options.top_level_dir
    runner.args.write_full_results_to = options.write_full_results_to
    runner.args.write_trace_to = options.write_trace_to
    runner.args.list_only = options.list_only
    runner.classifier = _GetClassifier(options)

    runner.args.suffixes = TEST_SUFFIXES

    # Since sharding logic is handled by browser_test_runner harness by passing
    # browser_test_context.test_case_ids_to_run to subprocess to indicate test
    # cases to be run, we explicitly disable sharding logic in typ.
    runner.args.total_shards = 1
    runner.args.shard_index = 0

    runner.args.timing = True
    runner.args.verbose = options.verbose
    runner.win_multiprocessing = typ.WinMultiprocessing.importable
    try:
        ret, _, _ = runner.run()
    except KeyboardInterrupt:
        print >> sys.stderr, "interrupted, exiting"
        ret = 130
    return ret
Ejemplo n.º 12
0
def RunTests(args):
  parser = _CreateTestArgParsers()
  try:
    options, extra_args = parser.parse_known_args(args)
  except arg_parser._Bailout:
    PrintTelemetryHelp()
    return parser.exit_status
  binary_manager.InitDependencyManager(options.client_configs)

  not_using_typ_expectation = False
  if options.expectations_files:
    parser.error('--expectation-files flag is not supported yet.')
  else:
    not_using_typ_expectation = True

  for start_dir in options.start_dirs:
    modules_to_classes = discover.DiscoverClasses(
        start_dir,
        options.top_level_dir,
        base_class=serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)
    browser_test_classes = modules_to_classes.values()

  _ValidateDistinctNames(browser_test_classes)

  test_class = None
  for cl in browser_test_classes:
    if cl.Name() == options.test:
      test_class = cl
      break

  if not test_class:
    print 'Cannot find test class with name matching %s' % options.test
    print 'Available tests: %s' % '\n'.join(
        cl.Name() for cl in browser_test_classes)
    return 1

  # Create test context.
  context = browser_test_context.TypTestContext()
  for c in options.client_configs:
    context.client_configs.append(c)
  context.finder_options = ProcessCommandLineOptions(
      test_class, options, extra_args)
  context.test_class = test_class
  test_times = None
  if options.read_abbreviated_json_results_from:
    with open(options.read_abbreviated_json_results_from, 'r') as f:
      abbr_results = json.load(f)
      test_times = abbr_results.get('times')
  tests_to_run = LoadTestCasesToBeRun(
      test_class=test_class, finder_options=context.finder_options,
      filter_regex_str=options.test_filter,
      filter_tests_after_sharding=options.filter_tests_after_sharding,
      total_shards=options.total_shards, shard_index=options.shard_index,
      test_times=test_times,
      debug_shard_distributions=options.debug_shard_distributions)
  for t in tests_to_run:
    context.test_case_ids_to_run.add(t.id())
  context.Freeze()
  browser_test_context._global_test_context = context

  # Setup typ runner.
  runner = typ.Runner()

  runner.context = context
  runner.setup_fn = _SetUpProcess
  runner.teardown_fn = _TearDownProcess

  runner.args.jobs = options.jobs
  runner.args.metadata = options.metadata
  runner.args.passthrough = options.passthrough
  runner.args.path = options.path
  runner.args.repeat = options.repeat
  runner.args.retry_limit = options.retry_limit
  runner.args.test_results_server = options.test_results_server
  runner.args.test_type = options.test_type
  runner.args.top_level_dir = options.top_level_dir
  runner.args.write_full_results_to = options.write_full_results_to
  runner.args.write_trace_to = options.write_trace_to
  runner.args.list_only = options.list_only
  runner.classifier = _GetClassifier(options)

  runner.args.suffixes = TEST_SUFFIXES

  # Since sharding logic is handled by browser_test_runner harness by passing
  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
  # cases to be run, we explicitly disable sharding logic in typ.
  runner.args.total_shards = 1
  runner.args.shard_index = 0

  runner.args.timing = True
  runner.args.verbose = options.verbose
  runner.win_multiprocessing = typ.WinMultiprocessing.importable
  try:
    ret, _, _ = runner.run()
  except KeyboardInterrupt:
    print >> sys.stderr, "interrupted, exiting"
    ret = 130
  finally:
    if (options.write_full_results_to and
        os.path.exists(options.write_full_results_to) and
        not_using_typ_expectation):
      # Set expectation of all skipped tests to skip to keep the test behavior
      # the same as when typ doesn't support test expectation.
      # (also see crbug.com/904019) for why this work around is needed)
      # TODO(crbug.com/698902): remove this once gpu tests are converted to use
      # typ's expectation.
      _SetSkippedTestExpectationsToSkip(options.write_full_results_to)
  return ret