Пример #1
0
def main():
    rest_args = sys.argv[1:]
    FailIfScreenLockedOnMac()
    parser = argparse.ArgumentParser(description='Extra argument parser',
                                     add_help=False)

    rest_args_filtered = ProcessArgs(rest_args, parser)

    retval = browser_test_runner.Run(gpu_project_config.CONFIG,
                                     rest_args_filtered)

    # We're not relying on argparse to print the help in the normal way, because
    # we need the help output from both the argument parser here and the argument
    # parser in browser_test_runner.
    if '--help' in rest_args:
        print('\n\nCommand line arguments handed by run_gpu_integration_test:')
        parser.print_help()
        return retval

    # This duplicates an argument of browser_test_runner.
    parser.add_argument(
        '--write-full-results-to',
        metavar='FILENAME',
        action='store',
        help=('If specified, writes the full results to that path.'))

    option, _ = parser.parse_known_args(rest_args)

    # Postprocess the outputted JSON to add test arguments.
    if option.write_run_test_arguments and option.write_full_results_to:
        PostprocessJSON(option.write_full_results_to, rest_args)
    return retval
 def baseTest(self, mockInitDependencyManager, test_filter,
              failures, successes):
   options = browser_test_runner.TestRunOptions()
   options.verbosity = 0
   config = project_config.ProjectConfig(
       top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
       client_configs=['a', 'b', 'c'],
       benchmark_dirs=[
           os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
   )
   temp_file = tempfile.NamedTemporaryFile(delete=False)
   temp_file.close()
   temp_file_name = temp_file.name
   try:
     browser_test_runner.Run(
         config, options,
         ['SimpleTest',
          '--write-abbreviated-json-results-to=%s' % temp_file_name,
          '--test-filter=%s' % test_filter])
     mockInitDependencyManager.assert_called_with(['a', 'b', 'c'])
     with open(temp_file_name) as f:
       test_result = json.load(f)
     self.assertEquals(test_result['failures'], failures)
     self.assertEquals(test_result['successes'], successes)
     self.assertEquals(test_result['valid'], True)
   finally:
     os.remove(temp_file_name)
Пример #3
0
 def testSimpleIntegrationUnittest(self, mockInitDependencyManager):
     options = browser_test_runner.TestRunOptions()
     # Suppress printing out information for passing tests.
     options.verbosity = 0
     config = gpu_project_config.CONFIG
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         browser_test_runner.Run(config, options, [
             'simple_integration_unittest',
             '--write-abbreviated-json-results-to=%s' % temp_file_name
         ])
         with open(temp_file_name) as f:
             test_result = json.load(f)
         self.assertEquals(test_result['failures'],
                           ['unexpected_error', 'unexpected_failure'])
         self.assertEquals(test_result['successes'],
                           ['expected_failure', 'expected_flaky'])
         self.assertEquals(test_result['valid'], True)
         # It might be nice to be more precise about the order of operations
         # with these browser restarts, but this is at least a start.
         self.assertEquals(SimpleIntegrationUnittest._num_browser_starts, 5)
     finally:
         os.remove(temp_file_name)
 def testJsonOutputFormat(self, mockInitDependencyManager):
     options = browser_test_runner.TestRunOptions()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=['a', 'b', 'c'],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         browser_test_runner.Run(config, options, [
             'SimpleTest',
             '--write-abbreviated-json-results-to=%s' % temp_file_name
         ])
         mockInitDependencyManager.assert_called_with(['a', 'b', 'c'])
         with open(temp_file_name) as f:
             test_result = json.load(f)
         self.assertEquals(test_result['failures'], [
             'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2',
             'browser_tests.simple_numeric_test.SimpleTest.add_1_and_2',
             'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3',
             'browser_tests.simple_numeric_test.SimpleTest.testSimple'
         ])
         self.assertEquals(test_result['valid'], True)
     finally:
         os.remove(temp_file_name)
Пример #5
0
 def _RunIntegrationTest(self, test_name, failures, successes, skips,
                         additional_args):
     config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     temp_dir = tempfile.mkdtemp()
     test_results_path = os.path.join(temp_dir, 'test_results.json')
     test_state_path = os.path.join(temp_dir, 'test_state.json')
     try:
         browser_test_runner.Run(config, [
             test_name,
             '--write-full-results-to=%s' % test_results_path,
             '--test-state-json-path=%s' % test_state_path
         ] + additional_args)
         with open(test_results_path) as f:
             self._test_result = json.load(f)
         with open(test_state_path) as f:
             self._test_state = json.load(f)
         actual_successes, actual_failures, actual_skips = (
             _ExtractTestResults(self._test_result))
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
         self.assertEquals(set(actual_skips), set(skips))
     finally:
         shutil.rmtree(temp_dir)
 def baseShardingTest(self, total_shards, shard_index, failures, successes):
   options = browser_test_runner.TestRunOptions()
   options.verbosity = 0
   config = project_config.ProjectConfig(
       top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
       client_configs=['a', 'b', 'c'],
       benchmark_dirs=[
           os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
   )
   temp_file = tempfile.NamedTemporaryFile(delete=False)
   temp_file.close()
   temp_file_name = temp_file.name
   try:
     browser_test_runner.Run(
         config, options,
         ['SimpleShardingTest',
          '--write-abbreviated-json-results-to=%s' % temp_file_name,
          '--total-shards=%d' % total_shards,
          '--shard-index=%d' % shard_index])
     with open(temp_file_name) as f:
       test_result = json.load(f)
     self.assertEquals(test_result['failures'], failures)
     self.assertEquals(test_result['successes'], successes)
     self.assertEquals(test_result['valid'], True)
   finally:
     os.remove(temp_file_name)
 def _RunTest(self,
              test_filter,
              expected_failures,
              expected_successes,
              expected_skips=None,
              test_name='SimpleTest',
              extra_args=None):
     expected_skips = expected_skips or []
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         browser_test_runner.Run(config, [
             test_name,
             '--write-full-results-to=%s' % temp_file_name,
             '--test-filter=%s' % test_filter
         ] + extra_args)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(expected_failures))
         self.assertEquals(set(actual_successes), set(expected_successes))
         self.assertEquals(set(actual_skips), set(expected_skips))
     finally:
         os.remove(temp_file_name)
def main():
    options = browser_test_runner.TestRunOptions()
    config = project_config.ProjectConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[
            os.path.join(os.path.dirname(__file__), 'browser_tests')
        ])
    return browser_test_runner.Run(config, options, sys.argv[1:])
def main():
    FailIfScreenLockedOnMac()
    rest_args = sys.argv[1:]
    parser = argparse.ArgumentParser(description='Extra argument parser',
                                     add_help=False)

    parser.add_argument(
        '--write-run-test-arguments',
        action='store_true',
        help=('Write the test script arguments to the results file.'))
    option, rest_args_filtered = parser.parse_known_args(rest_args)

    parser.add_argument('test', nargs='*', type=str, help=argparse.SUPPRESS)
    option, _ = parser.parse_known_args(rest_args_filtered)

    if option.test:
        test_class = FindTestCase(option.test[0])
    else:
        test_class = None

    if test_class:
        rest_args_filtered.extend([
            '--test-name-prefix=%s.%s.' %
            (test_class.__module__, test_class.__name__)
        ])

    if not any(arg.startswith('--retry-limit') for arg in rest_args_filtered):
        if '--retry-only-retry-on-failure-tests' not in rest_args_filtered:
            rest_args_filtered.append('--retry-only-retry-on-failure-tests')
        rest_args_filtered.append('--retry-limit=2')
    rest_args_filtered.extend(
        ['--repository-absolute-path',
         path_util.GetChromiumSrcDir()])

    retval = browser_test_runner.Run(gpu_project_config.CONFIG,
                                     rest_args_filtered)

    # We're not relying on argparse to print the help in the normal way, because
    # we need the help output from both the argument parser here and the argument
    # parser in browser_test_runner.
    if '--help' in rest_args:
        print('\n\nCommand line arguments handed by run_gpu_integration_test:')
        parser.print_help()
        return retval

    # This duplicates an argument of browser_test_runner.
    parser.add_argument(
        '--write-full-results-to',
        metavar='FILENAME',
        action='store',
        help=('If specified, writes the full results to that path.'))

    option, _ = parser.parse_known_args(rest_args)

    # Postprocess the outputted JSON to add test arguments.
    if option.write_run_test_arguments and option.write_full_results_to:
        PostprocessJSON(option.write_full_results_to, rest_args)
    return retval
Пример #10
0
def main():
    rest_args = sys.argv[1:]
    retval = browser_test_runner.Run(gpu_project_config.CONFIG, rest_args)
    # Postprocess the outputted JSON to trim all of the prefixes from
    # the test names, to keep them as similar to the old form as
    # possible -- and keep them from getting crazily long.
    parser = argparse.ArgumentParser(description='Temporary argument parser')
    parser.add_argument('--write-abbreviated-json-results-to',
                        metavar='FILENAME',
                        action='store',
                        help=('Full path for json results'))
    option, _ = parser.parse_known_args(rest_args)
    if option.write_abbreviated_json_results_to:
        PostprocessJSON(option.write_abbreviated_json_results_to, rest_args)
    return retval
 def _RunBrowserTest(self,
                     modulename,
                     classname,
                     test_name,
                     expectation='Pass',
                     test_tags='foo',
                     extra_args=None,
                     expected_exit_code=0,
                     include_expectations=True):
     extra_args = extra_args or []
     if include_expectations:
         expectations = ('# tags: [ foo bar mac ]\n'
                         'crbug.com/123 [ %s ] '
                         'browser_tests.%s.%s.%s'
                         ' [ %s ]')
         expectations = expectations % (test_tags, modulename, classname,
                                        test_name, expectation)
         expectations_file = tempfile.NamedTemporaryFile(delete=False)
         expectations_file.write(expectations)
         expectations_file.close()
         expectations_file_paths = [expectations_file.name]
     else:
         expectations_file_paths = []
     results = tempfile.NamedTemporaryFile(delete=False)
     results.close()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         expectations_files=expectations_file_paths,
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     try:
         ret = browser_test_runner.Run(config, [
             '%s' % classname,
             ('--write-full-results-to=%s' % results.name),
             ('--test-filter=.*%s.*' % test_name)
         ] + extra_args)
         self.assertEqual(ret, expected_exit_code)
         with open(results.name) as f:
             test_result = json.load(f)
     finally:
         if expectations_file_paths:
             os.remove(expectations_file.name)
         os.remove(results.name)
     return test_result
Пример #12
0
 def BaseShardingTest(self,
                      total_shards,
                      shard_index,
                      failures,
                      successes,
                      opt_abbr_input_json_file=None,
                      opt_test_filter='',
                      opt_filter_tests_after_sharding=False,
                      opt_test_name_prefix=''):
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     opt_args = []
     if opt_abbr_input_json_file:
         opt_args += [
             '--read-abbreviated-json-results-from=%s' %
             opt_abbr_input_json_file
         ]
     if opt_test_filter:
         opt_args += ['--test-filter=%s' % opt_test_filter]
     if opt_filter_tests_after_sharding:
         opt_args += ['--filter-tests-after-sharding']
     if opt_test_name_prefix:
         opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
     try:
         browser_test_runner.Run(config, [
             'SimpleShardingTest',
             '--write-full-results-to=%s' % temp_file_name,
             '--total-shards=%d' % total_shards,
             '--shard-index=%d' % shard_index
         ] + opt_args)
         with open(temp_file_name) as f:
             test_result = json.load(f)
         (actual_successes, actual_failures,
          _) = self._ExtractTestResults(test_result)
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
     finally:
         os.remove(temp_file_name)
Пример #13
0
def main():
    FailIfScreenLockedOnMac()
    rest_args = sys.argv[1:]
    parser = argparse.ArgumentParser(description='Extra argument parser',
                                     add_help=False)

    parser.add_argument(
        '--write-run-test-arguments',
        action='store_true',
        help=('Write the test script arguments to the results file.'))
    option, rest_args_filtered = parser.parse_known_args(rest_args)

    parser.add_argument('test', type=str, help='Name of the test suite to run')
    option, _ = parser.parse_known_args(rest_args_filtered)

    test_class = FindTestCase(option)
    assert test_class
    rest_args_filtered.extend([
        '--test-name-prefix=%s.%s.' %
        (test_class.__module__, test_class.__name__)
    ])

    retval = browser_test_runner.Run(gpu_project_config.CONFIG,
                                     rest_args_filtered)

    # We're not relying on argparse to print the help in the normal way, because
    # we need the help output from both the argument parser here and the argument
    # parser in browser_test_runner.
    if '--help' in rest_args:
        print '\n\nCommand line arguments handed by run_gpu_integration_test:'
        parser.print_help()
        return retval

    # This duplicates an argument of browser_test_runner.
    parser.add_argument(
        '--write-full-results-to',
        metavar='FILENAME',
        action='store',
        help=('If specified, writes the full results to that path.'))

    option, _ = parser.parse_known_args(rest_args)

    # Postprocess the outputted JSON to add test arguments.
    if option.write_run_test_arguments and option.write_full_results_to:
        PostprocessJSON(option.write_full_results_to, rest_args)
    return retval
  def _RunIntegrationTest(self, test_name, failures, successes):
    options = browser_test_runner.TestRunOptions()
    # Suppress printing out information for passing tests.
    options.verbosity = 0
    config = gpu_project_config.CONFIG
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    temp_file.close()
    temp_file_name = temp_file.name
    try:
      browser_test_runner.Run(
          config, options,
          [test_name,
           '--write-abbreviated-json-results-to=%s' % temp_file_name])
      with open(temp_file_name) as f:
        test_result = json.load(f)
      self.assertEquals(test_result['failures'], failures)
      self.assertEquals(test_result['successes'], successes)
      self.assertEquals(test_result['valid'], True)

    finally:
      os.remove(temp_file_name)
Пример #15
0
 def _RunBrowserTest(self,
                     modulename,
                     classname,
                     test_name,
                     expectation,
                     test_tags='foo'):
     expectations = ('# tags: [ foo bar mac ]\n'
                     'crbug.com/123 [ %s ] '
                     'browser_tests.%s.%s.%s'
                     ' [ %s ]')
     expectations = expectations % (test_tags, modulename, classname,
                                    test_name, expectation)
     expectations_file = tempfile.NamedTemporaryFile(delete=False)
     expectations_file.write(expectations)
     results = tempfile.NamedTemporaryFile(delete=False)
     results.close()
     expectations_file.close()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         expectations_files=[expectations_file.name],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     try:
         browser_test_runner.Run(config, [
             '%s' % classname,
             '--write-full-results-to=%s' % results.name,
             '--test-filter=.*%s.*' % test_name
         ])
         with open(results.name) as f:
             test_result = json.load(f)
     finally:
         os.remove(expectations_file.name)
         os.remove(results.name)
     return test_result
def main(args):
    parser = argparse.ArgumentParser(description='Extra argument parser',
                                     add_help=False)
    parser.add_argument(
        '--output-directory',
        action='store',
        default=None,
        help='Sets the CHROMIUM_OUTPUT_DIR environment variable')
    known_options, rest_args = parser.parse_known_args(args)

    constants.SetOutputDirectory(
        os.path.realpath(known_options.output_directory or os.getcwd()))

    config = chromium_config.ChromiumConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[os.path.dirname(__file__)])

    ret_val = browser_test_runner.Run(config, rest_args)
    if '--help' in rest_args or '-h' in rest_args:
        print('\n\nCommand line arguments used in '
              'run_webview_component_smoketest.py')
        parser.print_help()

    return ret_val
def main():
    options = browser_test_runner.TestRunOptions()
    return browser_test_runner.Run(gpu_project_config.CONFIG, options,
                                   sys.argv[1:])
def main(args):
    config = chromium_config.ChromiumConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[os.path.dirname(__file__)])
    return browser_test_runner.Run(config, args)
Пример #19
0
def main():
    retval = browser_test_runner.Run(cast_project_config.CONFIG, sys.argv)
    return retval