def _RunIntegrationTest(self, test_args):
    """Runs an integration and asserts fail/success/skip expectations.

    Args:
      test_args: A _IntegrationTestArgs instance to use.
    """
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetGpuTestDir(),
        benchmark_dirs=[
            os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
        ])

    with binary_manager.TemporarilyReplaceBinaryManager(None), \
         tempfile_ext.NamedTemporaryDirectory() as temp_dir:
      test_results_path = os.path.join(temp_dir, 'test_results.json')
      test_state_path = os.path.join(temp_dir, 'test_state.json')
      # We are processing ChromiumConfig instance and getting the argument
      # list. Then we pass it directly to run_browser_tests.RunTests. If
      # we called browser_test_runner.Run, then it would spawn another
      # subprocess which is less efficient.
      args = browser_test_runner.ProcessConfig(config, [
          test_args.test_name,
          '--write-full-results-to=%s' % test_results_path,
          '--test-state-json-path=%s' % test_state_path
      ] + test_args.additional_args)
      run_browser_tests.RunTests(args)
      with open(test_results_path) as f:
        self._test_result = json.load(f)
      with open(test_state_path) as f:
        self._test_state = json.load(f)
      actual_successes, actual_failures, actual_skips = (_ExtractTestResults(
          self._test_result))
      self.assertEquals(set(actual_failures), set(test_args.failures))
      self.assertEquals(set(actual_successes), set(test_args.successes))
      self.assertEquals(set(actual_skips), set(test_args.skips))
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
   extra_args = extra_args or []
   unittest_config = chromium_config.ChromiumConfig(
       top_level_dir=path_util.GetGpuTestDir(),
       benchmark_dirs=[
           os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
       ])
   with binary_manager.TemporarilyReplaceBinaryManager(None), \
        mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
     # TODO(crbug.com/1103792): Using NamedTemporaryFile() as a generator is
     # causing windows bots to fail. When the issue is fixed with
     # tempfile_ext.NamedTemporaryFile(), put it in the list of generators
     # starting this with block. Also remove the try finally statement
     # below.
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     try:
       test_argv = [
           test_name, '--write-full-results-to=%s' % temp_file.name
       ] + extra_args
       processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
       telemetry_args = browser_test_runner.ProcessConfig(
           unittest_config, processed_args)
       run_browser_tests.RunTests(telemetry_args)
       with open(temp_file.name) as f:
         self._test_result = json.load(f)
     finally:
       temp_file.close()
示例#3
0
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
     extra_args = extra_args or []
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     test_argv = [test_name,
                  '--write-full-results-to=%s' % temp_file.name
                  ] + extra_args
     unittest_config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     old_manager = binary_manager._binary_manager
     with mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
         processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
         telemetry_args = browser_test_runner.ProcessConfig(
             unittest_config, processed_args)
         try:
             binary_manager._binary_manager = None
             run_browser_tests.RunTests(telemetry_args)
             with open(temp_file.name) as f:
                 self._test_result = json.load(f)
         finally:
             binary_manager._binary_manager = old_manager
             temp_file.close()
 def _RunTest(self,
              test_filter,
              expected_failures,
              expected_successes,
              expected_skips=None,
              test_name='SimpleTest',
              expectations='',
              tags=None,
              extra_args=None):
     expected_skips = expected_skips or []
     tags = tags or []
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     if expectations:
         expectations_file = tempfile.NamedTemporaryFile(delete=False)
         expectations_file.write(expectations)
         expectations_file.close()
         extra_args.extend(['-X', expectations_file.name] +
                           ['-x=%s' % tag for tag in tags])
     args = ([
         test_name,
         '--write-full-results-to=%s' % temp_file_name,
         '--test-filter=%s' % test_filter,
         # We don't want the underlying tests to report their results to
         # ResultDB.
         '--disable-resultsink',
     ] + extra_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(expected_failures))
         self.assertEquals(set(actual_successes), set(expected_successes))
         self.assertEquals(set(actual_skips), set(expected_skips))
     finally:
         os.remove(temp_file_name)
 def BaseShardingTest(self,
                      total_shards,
                      shard_index,
                      failures,
                      successes,
                      opt_abbr_input_json_file=None,
                      opt_test_filter='',
                      opt_filter_tests_after_sharding=False,
                      opt_test_name_prefix=''):
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     opt_args = []
     if opt_abbr_input_json_file:
         opt_args += [
             '--read-abbreviated-json-results-from=%s' %
             opt_abbr_input_json_file
         ]
     if opt_test_filter:
         opt_args += ['--test-filter=%s' % opt_test_filter]
     if opt_filter_tests_after_sharding:
         opt_args += ['--filter-tests-after-sharding']
     if opt_test_name_prefix:
         opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
     args = ([
         'SimpleShardingTest',
         '--write-full-results-to=%s' % temp_file_name,
         '--total-shards=%d' % total_shards,
         '--shard-index=%d' % shard_index
     ] + opt_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             test_result = json.load(f)
         (actual_successes, actual_failures,
          _) = self._ExtractTestResults(test_result)
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
     finally:
         os.remove(temp_file_name)
示例#6
0
 def _RunIntegrationTest(self, test_name, failures, successes, skips,
                         additional_args):
     # pylint: disable=too-many-locals
     config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     temp_dir = tempfile.mkdtemp()
     test_results_path = os.path.join(temp_dir, 'test_results.json')
     test_state_path = os.path.join(temp_dir, 'test_state.json')
     old_manager = binary_manager._binary_manager
     try:
         # TODO(crbug.com/1099856): Fix telemetry binary_manager API so that
         # we don't need to access its private global variable
         binary_manager._binary_manager = None
         # We are proccissing ChromiumConfig instance and getting the argument
         # list. Then we pass it directly to run_browser_tests.RunTests. If
         # we called browser_test_runner.Run, then it would spawn another
         # subprocess which is less efficient.
         args = browser_test_runner.ProcessConfig(config, [
             test_name,
             '--write-full-results-to=%s' % test_results_path,
             '--test-state-json-path=%s' % test_state_path
         ] + additional_args)
         run_browser_tests.RunTests(args)
         with open(test_results_path) as f:
             self._test_result = json.load(f)
         with open(test_state_path) as f:
             self._test_state = json.load(f)
         actual_successes, actual_failures, actual_skips = (
             _ExtractTestResults(self._test_result))
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
         self.assertEquals(set(actual_skips), set(skips))
     finally:
         binary_manager._binary_manager = old_manager
         shutil.rmtree(temp_dir)