def testLoadAllTestsInModule(self):
   context = browser_test_context.TypTestContext()
   context.finder_options = options_for_unittests.GetCopy()
   context.test_class = Algebra
   context.test_case_ids_to_run.add(
     'telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber')
   context.test_case_ids_to_run.add(
     'telemetry.testing.browser_test_runner_unittest.Algebra.testOne')
   context.Freeze()
   browser_test_context._global_test_context = context
   try:
     # This should not invoke GenerateTestCases of ErrorneousGeometric class,
     # otherwise that would throw Exception.
     tests = serially_executed_browser_test_case.LoadAllTestsInModule(
         sys.modules[__name__])
     self.assertEquals(sorted([t.id() for t in tests]),
         ['telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber',
          'telemetry.testing.browser_test_runner_unittest.Algebra.testOne'])
   finally:
     browser_test_context._global_test_context = None
예제 #2
0
def RunTests(args):
  parser = _CreateTestArgParsers()
  try:
    options, extra_args = parser.parse_known_args(args)
  except arg_parser._Bailout:
    PrintTelemetryHelp()
    return parser.exit_status
  binary_manager.InitDependencyManager(options.client_configs)
  for start_dir in options.start_dirs:
    modules_to_classes = discover.DiscoverClasses(
        start_dir,
        options.top_level_dir,
        base_class=serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)
    browser_test_classes = modules_to_classes.values()

  _ValidateDistinctNames(browser_test_classes)

  test_class = None
  for cl in browser_test_classes:
    if cl.Name() == options.test:
      test_class = cl
      break

  if not test_class:
    print('Cannot find test class with name matching %s' % options.test)
    print('Available tests: %s' % '\n'.join(
        cl.Name() for cl in browser_test_classes))
    return 1

  test_class._typ_runner = typ_runner = typ.Runner()

  # Create test context.
  typ_runner.context = browser_test_context.TypTestContext()
  for c in options.client_configs:
    typ_runner.context.client_configs.append(c)
  typ_runner.context.finder_options = ProcessCommandLineOptions(
      test_class, options, extra_args)
  typ_runner.context.test_class = test_class
  typ_runner.context.expectations_files = options.expectations_files
  test_times = None
  if options.read_abbreviated_json_results_from:
    with open(options.read_abbreviated_json_results_from, 'r') as f:
      abbr_results = json.load(f)
      test_times = abbr_results.get('times')

  # Setup typ.Runner instance.
  typ_runner.args.all = options.all
  typ_runner.args.expectations_files = options.expectations_files
  typ_runner.args.jobs = options.jobs
  typ_runner.args.list_only = options.list_only
  typ_runner.args.metadata = options.metadata
  typ_runner.args.passthrough = options.passthrough
  typ_runner.args.path = options.path
  typ_runner.args.quiet = options.quiet
  typ_runner.args.repeat = options.repeat
  typ_runner.args.repository_absolute_path = options.repository_absolute_path
  typ_runner.args.retry_limit = options.retry_limit
  typ_runner.args.retry_only_retry_on_failure_tests = (
      options.retry_only_retry_on_failure_tests)
  typ_runner.args.skip = options.skip
  typ_runner.args.suffixes = TEST_SUFFIXES
  typ_runner.args.tags = options.tags
  typ_runner.args.test_name_prefix = options.test_name_prefix
  typ_runner.args.test_filter = options.test_filter
  typ_runner.args.test_results_server = options.test_results_server
  typ_runner.args.test_type = options.test_type
  typ_runner.args.top_level_dir = options.top_level_dir
  typ_runner.args.write_full_results_to = options.write_full_results_to
  typ_runner.args.write_trace_to = options.write_trace_to
  typ_runner.args.disable_resultsink = options.disable_resultsink

  typ_runner.classifier = _GetClassifier(typ_runner)
  typ_runner.path_delimiter = test_class.GetJSONResultsDelimiter()
  typ_runner.setup_fn = _SetUpProcess
  typ_runner.teardown_fn = _TearDownProcess

  tests_to_run = LoadTestCasesToBeRun(
      test_class=test_class, finder_options=typ_runner.context.finder_options,
      filter_tests_after_sharding=options.filter_tests_after_sharding,
      total_shards=options.total_shards, shard_index=options.shard_index,
      test_times=test_times,
      debug_shard_distributions=options.debug_shard_distributions,
      typ_runner=typ_runner)
  for t in tests_to_run:
    typ_runner.context.test_case_ids_to_run.add(t.id())
  typ_runner.context.Freeze()
  browser_test_context._global_test_context = typ_runner.context

  # several class level variables are set for GPU tests  when
  # LoadTestCasesToBeRun is called. Functions line ExpectationsFiles and
  # GenerateTags which use these variables should be called after
  # LoadTestCasesToBeRun

  test_class_expectations_files = test_class.ExpectationsFiles()
  # all file paths in test_class_expectations-files must be absolute
  assert all(os.path.isabs(path) for path in test_class_expectations_files)
  typ_runner.args.expectations_files.extend(
      test_class_expectations_files)
  typ_runner.args.ignored_tags.extend(test_class.IgnoredTags())

  # Since sharding logic is handled by browser_test_runner harness by passing
  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
  # cases to be run, we explicitly disable sharding logic in typ.
  typ_runner.args.total_shards = 1
  typ_runner.args.shard_index = 0

  typ_runner.args.timing = True
  typ_runner.args.verbose = options.verbose
  typ_runner.win_multiprocessing = typ.WinMultiprocessing.importable

  try:
    ret, _, _ = typ_runner.run()
  except KeyboardInterrupt:
    print("interrupted, exiting", file=sys.stderr)
    ret = 130
  return ret
예제 #3
0
def RunTests(args):
    parser = _CreateTestArgParsers()
    try:
        options, extra_args = parser.parse_known_args(args)
    except arg_parser._Bailout:
        return parser.exit_status
    binary_manager.InitDependencyManager(options.client_configs)

    for start_dir in options.start_dirs:
        modules_to_classes = discover.DiscoverClasses(
            start_dir,
            options.top_level_dir,
            base_class=serially_executed_browser_test_case.
            SeriallyExecutedBrowserTestCase)
        browser_test_classes = modules_to_classes.values()

    _ValidateDistinctNames(browser_test_classes)

    test_class = None
    for cl in browser_test_classes:
        if cl.Name() == options.test:
            test_class = cl
            break

    if not test_class:
        print 'Cannot find test class with name matching %s' % options.test
        print 'Available tests: %s' % '\n'.join(cl.Name()
                                                for cl in browser_test_classes)
        return 1

    # Create test context.
    context = browser_test_context.TypTestContext()
    for c in options.client_configs:
        context.client_configs.append(c)
    context.finder_options = ProcessCommandLineOptions(test_class, options,
                                                       extra_args)
    context.test_class = test_class
    test_times = None
    if options.read_abbreviated_json_results_from:
        with open(options.read_abbreviated_json_results_from, 'r') as f:
            abbr_results = json.load(f)
            test_times = abbr_results.get('times')
    tests_to_run = LoadTestCasesToBeRun(
        test_class=test_class,
        finder_options=context.finder_options,
        filter_regex_str=options.test_filter,
        filter_tests_after_sharding=options.filter_tests_after_sharding,
        total_shards=options.total_shards,
        shard_index=options.shard_index,
        test_times=test_times,
        debug_shard_distributions=options.debug_shard_distributions)
    for t in tests_to_run:
        context.test_case_ids_to_run.add(t.id())
    context.Freeze()
    browser_test_context._global_test_context = context

    # Setup typ runner.
    runner = typ.Runner()

    runner.context = context
    runner.setup_fn = _SetUpProcess
    runner.teardown_fn = _TearDownProcess

    runner.args.jobs = options.jobs
    runner.args.metadata = options.metadata
    runner.args.passthrough = options.passthrough
    runner.args.path = options.path
    runner.args.retry_limit = options.retry_limit
    runner.args.test_results_server = options.test_results_server
    runner.args.test_type = options.test_type
    runner.args.top_level_dir = options.top_level_dir
    runner.args.write_full_results_to = options.write_full_results_to
    runner.args.write_trace_to = options.write_trace_to
    runner.args.list_only = options.list_only
    runner.classifier = _GetClassifier(options)

    runner.args.suffixes = TEST_SUFFIXES

    # Since sharding logic is handled by browser_test_runner harness by passing
    # browser_test_context.test_case_ids_to_run to subprocess to indicate test
    # cases to be run, we explicitly disable sharding logic in typ.
    runner.args.total_shards = 1
    runner.args.shard_index = 0

    runner.args.timing = True
    runner.args.verbose = options.verbose
    runner.win_multiprocessing = typ.WinMultiprocessing.importable
    try:
        ret, _, _ = runner.run()
    except KeyboardInterrupt:
        print >> sys.stderr, "interrupted, exiting"
        ret = 130
    return ret
예제 #4
0
def RunTests(args):
  parser = _CreateTestArgParsers()
  try:
    options, extra_args = parser.parse_known_args(args)
  except arg_parser._Bailout:
    PrintTelemetryHelp()
    return parser.exit_status
  binary_manager.InitDependencyManager(options.client_configs)

  not_using_typ_expectation = False
  if options.expectations_files:
    parser.error('--expectation-files flag is not supported yet.')
  else:
    not_using_typ_expectation = True

  for start_dir in options.start_dirs:
    modules_to_classes = discover.DiscoverClasses(
        start_dir,
        options.top_level_dir,
        base_class=serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)
    browser_test_classes = modules_to_classes.values()

  _ValidateDistinctNames(browser_test_classes)

  test_class = None
  for cl in browser_test_classes:
    if cl.Name() == options.test:
      test_class = cl
      break

  if not test_class:
    print 'Cannot find test class with name matching %s' % options.test
    print 'Available tests: %s' % '\n'.join(
        cl.Name() for cl in browser_test_classes)
    return 1

  # Create test context.
  context = browser_test_context.TypTestContext()
  for c in options.client_configs:
    context.client_configs.append(c)
  context.finder_options = ProcessCommandLineOptions(
      test_class, options, extra_args)
  context.test_class = test_class
  test_times = None
  if options.read_abbreviated_json_results_from:
    with open(options.read_abbreviated_json_results_from, 'r') as f:
      abbr_results = json.load(f)
      test_times = abbr_results.get('times')
  tests_to_run = LoadTestCasesToBeRun(
      test_class=test_class, finder_options=context.finder_options,
      filter_regex_str=options.test_filter,
      filter_tests_after_sharding=options.filter_tests_after_sharding,
      total_shards=options.total_shards, shard_index=options.shard_index,
      test_times=test_times,
      debug_shard_distributions=options.debug_shard_distributions)
  for t in tests_to_run:
    context.test_case_ids_to_run.add(t.id())
  context.Freeze()
  browser_test_context._global_test_context = context

  # Setup typ runner.
  runner = typ.Runner()

  runner.context = context
  runner.setup_fn = _SetUpProcess
  runner.teardown_fn = _TearDownProcess

  runner.args.jobs = options.jobs
  runner.args.metadata = options.metadata
  runner.args.passthrough = options.passthrough
  runner.args.path = options.path
  runner.args.repeat = options.repeat
  runner.args.retry_limit = options.retry_limit
  runner.args.test_results_server = options.test_results_server
  runner.args.test_type = options.test_type
  runner.args.top_level_dir = options.top_level_dir
  runner.args.write_full_results_to = options.write_full_results_to
  runner.args.write_trace_to = options.write_trace_to
  runner.args.list_only = options.list_only
  runner.classifier = _GetClassifier(options)

  runner.args.suffixes = TEST_SUFFIXES

  # Since sharding logic is handled by browser_test_runner harness by passing
  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
  # cases to be run, we explicitly disable sharding logic in typ.
  runner.args.total_shards = 1
  runner.args.shard_index = 0

  runner.args.timing = True
  runner.args.verbose = options.verbose
  runner.win_multiprocessing = typ.WinMultiprocessing.importable
  try:
    ret, _, _ = runner.run()
  except KeyboardInterrupt:
    print >> sys.stderr, "interrupted, exiting"
    ret = 130
  finally:
    if (options.write_full_results_to and
        os.path.exists(options.write_full_results_to) and
        not_using_typ_expectation):
      # Set expectation of all skipped tests to skip to keep the test behavior
      # the same as when typ doesn't support test expectation.
      # (also see crbug.com/904019) for why this work around is needed)
      # TODO(crbug.com/698902): remove this once gpu tests are converted to use
      # typ's expectation.
      _SetSkippedTestExpectationsToSkip(options.write_full_results_to)
  return ret