def _RunIntegrationTest(self, test_args):
    """Runs an integration and asserts fail/success/skip expectations.

    Args:
      test_args: A _IntegrationTestArgs instance to use.
    """
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetGpuTestDir(),
        benchmark_dirs=[
            os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
        ])

    with binary_manager.TemporarilyReplaceBinaryManager(None), \
         tempfile_ext.NamedTemporaryDirectory() as temp_dir:
      test_results_path = os.path.join(temp_dir, 'test_results.json')
      test_state_path = os.path.join(temp_dir, 'test_state.json')
      # We are processing ChromiumConfig instance and getting the argument
      # list. Then we pass it directly to run_browser_tests.RunTests. If
      # we called browser_test_runner.Run, then it would spawn another
      # subprocess which is less efficient.
      args = browser_test_runner.ProcessConfig(config, [
          test_args.test_name,
          '--write-full-results-to=%s' % test_results_path,
          '--test-state-json-path=%s' % test_state_path
      ] + test_args.additional_args)
      run_browser_tests.RunTests(args)
      with open(test_results_path) as f:
        self._test_result = json.load(f)
      with open(test_state_path) as f:
        self._test_state = json.load(f)
      actual_successes, actual_failures, actual_skips = (_ExtractTestResults(
          self._test_result))
      self.assertEquals(set(actual_failures), set(test_args.failures))
      self.assertEquals(set(actual_successes), set(test_args.successes))
      self.assertEquals(set(actual_skips), set(test_args.skips))
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
   extra_args = extra_args or []
   unittest_config = chromium_config.ChromiumConfig(
       top_level_dir=path_util.GetGpuTestDir(),
       benchmark_dirs=[
           os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
       ])
   with binary_manager.TemporarilyReplaceBinaryManager(None), \
        mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
     # TODO(crbug.com/1103792): Using NamedTemporaryFile() as a generator is
     # causing windows bots to fail. When the issue is fixed with
     # tempfile_ext.NamedTemporaryFile(), put it in the list of generators
     # starting this with block. Also remove the try finally statement
     # below.
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     try:
       test_argv = [
           test_name, '--write-full-results-to=%s' % temp_file.name
       ] + extra_args
       processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
       telemetry_args = browser_test_runner.ProcessConfig(
           unittest_config, processed_args)
       run_browser_tests.RunTests(telemetry_args)
       with open(temp_file.name) as f:
         self._test_result = json.load(f)
     finally:
       temp_file.close()
 def _RunTest(self,
              test_filter,
              expected_failures,
              expected_successes,
              expected_skips=None,
              test_name='SimpleTest',
              expectations='',
              tags=None,
              extra_args=None):
     expected_skips = expected_skips or []
     tags = tags or []
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     if expectations:
         expectations_file = tempfile.NamedTemporaryFile(delete=False)
         expectations_file.write(expectations)
         expectations_file.close()
         extra_args.extend(['-X', expectations_file.name] +
                           ['-x=%s' % tag for tag in tags])
     args = ([
         test_name,
         '--write-full-results-to=%s' % temp_file_name,
         '--test-filter=%s' % test_filter,
         # We don't want the underlying tests to report their results to
         # ResultDB.
         '--disable-resultsink',
     ] + extra_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(expected_failures))
         self.assertEquals(set(actual_successes), set(expected_successes))
         self.assertEquals(set(actual_skips), set(expected_skips))
     finally:
         os.remove(temp_file_name)
 def BaseShardingTest(self,
                      total_shards,
                      shard_index,
                      failures,
                      successes,
                      opt_abbr_input_json_file=None,
                      opt_test_filter='',
                      opt_filter_tests_after_sharding=False,
                      opt_test_name_prefix=''):
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     opt_args = []
     if opt_abbr_input_json_file:
         opt_args += [
             '--read-abbreviated-json-results-from=%s' %
             opt_abbr_input_json_file
         ]
     if opt_test_filter:
         opt_args += ['--test-filter=%s' % opt_test_filter]
     if opt_filter_tests_after_sharding:
         opt_args += ['--filter-tests-after-sharding']
     if opt_test_name_prefix:
         opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
     args = ([
         'SimpleShardingTest',
         '--write-full-results-to=%s' % temp_file_name,
         '--total-shards=%d' % total_shards,
         '--shard-index=%d' % shard_index
     ] + opt_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             test_result = json.load(f)
         (actual_successes, actual_failures,
          _) = self._ExtractTestResults(test_result)
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
     finally:
         os.remove(temp_file_name)
示例#5
0
  def _RunTest(
      self, expected_failures, expected_successes, expected_skips,
      expected_return_code=0, test_name='', extra_args=None, no_browser=True):
    extra_args = extra_args or []
    config = project_config.ProjectConfig(
        top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
        client_configs=[],
        benchmark_dirs=[
            os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
    )
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    temp_file.close()
    temp_file_name = temp_file.name
    try:
      passed_args = []
      if test_name:
        passed_args.append(test_name)
      if no_browser:
        passed_args.append('--no-browser')
      passed_args.append('--write-full-results-to=%s' % temp_file_name)
      args = unittest_runner.ProcessConfig(config, passed_args + extra_args)
      test_runner = run_tests.RunTestsCommand()
      with binary_manager.TemporarilyReplaceBinaryManager(None):
        ret = test_runner.main(args=args)
      assert ret == expected_return_code, (
          'actual return code %d, does not equal the expected return code %d' %
          (ret, expected_return_code))
      with open(temp_file_name) as f:
        self._test_result = json.load(f)
      (actual_successes,
       actual_failures,
       actual_skips) = self._ExtractTestResults(self._test_result)

      # leave asserts below because we may miss tests
      # that are running when they are not supposed to
      self.assertEquals(set(actual_failures), set(expected_failures))
      self.assertEquals(set(actual_successes), set(expected_successes))
      self.assertEquals(set(actual_skips), set(expected_skips))
    finally:
      os.remove(temp_file_name)
    return actual_failures, actual_successes, actual_skips
示例#6
0
 def _RunTestsWithExpectationsFile(
     self, full_test_name, expectations, test_tags='foo', extra_args=None,
     expected_exit_code=0):
   extra_args = extra_args or []
   test_expectations = (('# tags: [ foo bar mac ]\n'
                         '# results: [ {expectations} ]\n'
                         'crbug.com/123 [ {tags} ] {test} [ {expectations} ]')
                        .format(expectations=expectations, tags=test_tags,
                                test=full_test_name))
   expectations_file = tempfile.NamedTemporaryFile(delete=False)
   expectations_file.write(test_expectations)
   results = tempfile.NamedTemporaryFile(delete=False)
   results.close()
   expectations_file.close()
   config = project_config.ProjectConfig(
       top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
       client_configs=[],
       expectations_files=[expectations_file.name],
       benchmark_dirs=[
           os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
   )
   try:
     passed_args = ([full_test_name, '--no-browser',
                     ('--write-full-results-to=%s' % results.name)] +
                    ['--tag=%s' % tag for tag in test_tags.split()])
     args = unittest_runner.ProcessConfig(config, passed_args + extra_args)
     test_runner = run_tests.RunTestsCommand()
     with binary_manager.TemporarilyReplaceBinaryManager(None):
       ret = test_runner.main(args=args)
     self.assertEqual(ret, expected_exit_code)
     with open(results.name) as f:
       self._test_result = json.load(f)
   finally:
     os.remove(expectations_file.name)
     os.remove(results.name)
   return self._test_result