def baseShardingTest(self, total_shards, shard_index, failures, successes):
   options = browser_test_runner.TestRunOptions()
   options.verbosity = 0
   config = project_config.ProjectConfig(
       top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
       client_configs=['a', 'b', 'c'],
       benchmark_dirs=[
           os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
   )
   temp_file = tempfile.NamedTemporaryFile(delete=False)
   temp_file.close()
   temp_file_name = temp_file.name
   try:
     browser_test_runner.Run(
         config, options,
         ['SimpleShardingTest',
          '--write-abbreviated-json-results-to=%s' % temp_file_name,
          '--total-shards=%d' % total_shards,
          '--shard-index=%d' % shard_index])
     with open(temp_file_name) as f:
       test_result = json.load(f)
     self.assertEquals(test_result['failures'], failures)
     self.assertEquals(test_result['successes'], successes)
     self.assertEquals(test_result['valid'], True)
   finally:
     os.remove(temp_file_name)
 def baseTest(self, mockInitDependencyManager, test_filter,
              failures, successes):
   options = browser_test_runner.TestRunOptions()
   options.verbosity = 0
   config = project_config.ProjectConfig(
       top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
       client_configs=['a', 'b', 'c'],
       benchmark_dirs=[
           os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
   )
   temp_file = tempfile.NamedTemporaryFile(delete=False)
   temp_file.close()
   temp_file_name = temp_file.name
   try:
     browser_test_runner.Run(
         config, options,
         ['SimpleTest',
          '--write-abbreviated-json-results-to=%s' % temp_file_name,
          '--test-filter=%s' % test_filter])
     mockInitDependencyManager.assert_called_with(['a', 'b', 'c'])
     with open(temp_file_name) as f:
       test_result = json.load(f)
     self.assertEquals(test_result['failures'], failures)
     self.assertEquals(test_result['successes'], successes)
     self.assertEquals(test_result['valid'], True)
   finally:
     os.remove(temp_file_name)
示例#3
0
 def baseTest(self,
              failures,
              successes,
              skips,
              test_name='',
              extra_args=None):
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         passed_args = [
             test_name, '--no-browser',
             ('--write-full-results-to=%s' % temp_file_name)
         ]
         ret = unittest_runner.Run(config,
                                   passed_args=passed_args + extra_args)
         self.assertEquals(ret, 0)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
         self.assertEquals(set(actual_skips), set(skips))
     finally:
         os.remove(temp_file_name)
 def _RunTest(self,
              test_filter,
              expected_failures,
              expected_successes,
              expected_skips=None,
              test_name='SimpleTest',
              extra_args=None):
     expected_skips = expected_skips or []
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         browser_test_runner.Run(config, [
             test_name,
             '--write-full-results-to=%s' % temp_file_name,
             '--test-filter=%s' % test_filter
         ] + extra_args)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(expected_failures))
         self.assertEquals(set(actual_successes), set(expected_successes))
         self.assertEquals(set(actual_skips), set(expected_skips))
     finally:
         os.remove(temp_file_name)
 def testJsonOutputFormat(self, mockInitDependencyManager):
     options = browser_test_runner.TestRunOptions()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=['a', 'b', 'c'],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     try:
         browser_test_runner.Run(config, options, [
             'SimpleTest',
             '--write-abbreviated-json-results-to=%s' % temp_file_name
         ])
         mockInitDependencyManager.assert_called_with(['a', 'b', 'c'])
         with open(temp_file_name) as f:
             test_result = json.load(f)
         self.assertEquals(test_result['failures'], [
             'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2',
             'browser_tests.simple_numeric_test.SimpleTest.add_1_and_2',
             'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3',
             'browser_tests.simple_numeric_test.SimpleTest.testSimple'
         ])
         self.assertEquals(test_result['valid'], True)
     finally:
         os.remove(temp_file_name)
def main():
    options = browser_test_runner.TestRunOptions()
    config = project_config.ProjectConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[
            os.path.join(os.path.dirname(__file__), 'browser_tests')
        ])
    return browser_test_runner.Run(config, options, sys.argv[1:])
示例#7
0
def main():
    constants.SetBuildType(BUILD_TYPE)
    # Install APK
    device = GetDevice()
    device.EnableRoot()
    device.Install(APP_APK)
    # Start USB reverse tethering.
    # Port map is ignored for tethering; must create one to placate assertions.
    named_port_pair_map = {
        'http': (forwarders.PortPair(0, 0)),
        'https': None,
        'dns': None
    }
    port_pairs = forwarders.PortPairs(**named_port_pair_map)
    forwarder = GetForwarderFactory(device).Create(port_pairs)
    # Start HTTP server.
    http_server_doc_root = GenerateHttpTestResources()
    config_file = tempfile.NamedTemporaryFile()
    http_server = lighttpd_server.LighttpdServer(
        http_server_doc_root,
        port=HTTP_PORT,
        base_config_path=config_file.name)
    GenerateLighttpdConfig(config_file, http_server_doc_root, http_server)
    assert http_server.StartupHttpServer()
    config_file.close()
    # Start QUIC server.
    quic_server_doc_root = GenerateQuicTestResources(device)
    quic_server = QuicServer(quic_server_doc_root)
    quic_server.StartupQuicServer(device)
    # Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
    # By specifying this file's directory as the benchmark directory, it will
    # allow benchmark_runner to in turn open this file up and find the
    # CronetPerfTestBenchmark class to run the benchmark.
    top_level_dir = os.path.dirname(os.path.realpath(__file__))
    # The perf config file is required to continue using dependencies on the
    # Chromium checkout in Telemetry.
    perf_config_file = os.path.join(REPOSITORY_ROOT, 'tools', 'perf', 'core',
                                    'binary_dependencies.json')
    with open(perf_config_file, "w") as config_file:
        config_file.write('{"config_type": "BaseConfig"}')
    runner_config = project_config.ProjectConfig(
        top_level_dir=top_level_dir,
        benchmark_dirs=[top_level_dir],
        client_configs=[perf_config_file],
        default_chrome_root=REPOSITORY_ROOT)
    sys.argv.insert(1, 'run')
    sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
    sys.argv.insert(3, '--android-rndis')
    benchmark_runner.main(runner_config)
    # Shutdown.
    quic_server.ShutdownQuicServer()
    shutil.rmtree(quic_server_doc_root)
    http_server.ShutdownHttpServer()
    shutil.rmtree(http_server_doc_root)
 def _RunTest(self,
              test_filter,
              expected_failures,
              expected_successes,
              expected_skips=None,
              test_name='SimpleTest',
              expectations='',
              tags=None,
              extra_args=None):
     expected_skips = expected_skips or []
     tags = tags or []
     extra_args = extra_args or []
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     if expectations:
         expectations_file = tempfile.NamedTemporaryFile(delete=False)
         expectations_file.write(expectations)
         expectations_file.close()
         extra_args.extend(['-X', expectations_file.name] +
                           ['-x=%s' % tag for tag in tags])
     args = ([
         test_name,
         '--write-full-results-to=%s' % temp_file_name,
         '--test-filter=%s' % test_filter,
         # We don't want the underlying tests to report their results to
         # ResultDB.
         '--disable-resultsink',
     ] + extra_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             self._test_result = json.load(f)
         (actual_successes, actual_failures,
          actual_skips) = self._ExtractTestResults(self._test_result)
         self.assertEquals(set(actual_failures), set(expected_failures))
         self.assertEquals(set(actual_successes), set(expected_successes))
         self.assertEquals(set(actual_skips), set(expected_skips))
     finally:
         os.remove(temp_file_name)
 def BaseShardingTest(self,
                      total_shards,
                      shard_index,
                      failures,
                      successes,
                      opt_abbr_input_json_file=None,
                      opt_test_filter='',
                      opt_filter_tests_after_sharding=False,
                      opt_test_name_prefix=''):
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     temp_file_name = temp_file.name
     opt_args = []
     if opt_abbr_input_json_file:
         opt_args += [
             '--read-abbreviated-json-results-from=%s' %
             opt_abbr_input_json_file
         ]
     if opt_test_filter:
         opt_args += ['--test-filter=%s' % opt_test_filter]
     if opt_filter_tests_after_sharding:
         opt_args += ['--filter-tests-after-sharding']
     if opt_test_name_prefix:
         opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
     args = ([
         'SimpleShardingTest',
         '--write-full-results-to=%s' % temp_file_name,
         '--total-shards=%d' % total_shards,
         '--shard-index=%d' % shard_index
     ] + opt_args)
     try:
         args = browser_test_runner.ProcessConfig(config, args)
         with binary_manager.TemporarilyReplaceBinaryManager(None):
             run_browser_tests.RunTests(args)
         with open(temp_file_name) as f:
             test_result = json.load(f)
         (actual_successes, actual_failures,
          _) = self._ExtractTestResults(test_result)
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
     finally:
         os.remove(temp_file_name)
 def _RunBrowserTest(self,
                     modulename,
                     classname,
                     test_name,
                     expectation='Pass',
                     test_tags='foo',
                     extra_args=None,
                     expected_exit_code=0,
                     include_expectations=True):
     extra_args = extra_args or []
     if include_expectations:
         expectations = ('# tags: [ foo bar mac ]\n'
                         'crbug.com/123 [ %s ] '
                         'browser_tests.%s.%s.%s'
                         ' [ %s ]')
         expectations = expectations % (test_tags, modulename, classname,
                                        test_name, expectation)
         expectations_file = tempfile.NamedTemporaryFile(delete=False)
         expectations_file.write(expectations)
         expectations_file.close()
         expectations_file_paths = [expectations_file.name]
     else:
         expectations_file_paths = []
     results = tempfile.NamedTemporaryFile(delete=False)
     results.close()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         expectations_files=expectations_file_paths,
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     try:
         ret = browser_test_runner.Run(config, [
             '%s' % classname,
             ('--write-full-results-to=%s' % results.name),
             ('--test-filter=.*%s.*' % test_name)
         ] + extra_args)
         self.assertEqual(ret, expected_exit_code)
         with open(results.name) as f:
             test_result = json.load(f)
     finally:
         if expectations_file_paths:
             os.remove(expectations_file.name)
         os.remove(results.name)
     return test_result
示例#11
0
    def _RunTest(self,
                 expected_failures,
                 expected_successes,
                 expected_skips,
                 expected_return_code=0,
                 test_name='',
                 extra_args=None,
                 no_browser=True):
        extra_args = extra_args or []
        config = project_config.ProjectConfig(
            top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
            client_configs=[],
            benchmark_dirs=[
                os.path.join(util.GetTelemetryDir(), 'examples',
                             'browser_tests')
            ])
        temp_file = tempfile.NamedTemporaryFile(delete=False)
        temp_file.close()
        temp_file_name = temp_file.name
        try:
            passed_args = []
            if test_name:
                passed_args.append(test_name)
            if no_browser:
                passed_args.append('--no-browser')
            passed_args.append('--write-full-results-to=%s' % temp_file_name)
            ret = unittest_runner.Run(config,
                                      passed_args=passed_args + extra_args)
            assert ret == expected_return_code, (
                'actual return code %d, does not equal the expected return code %d'
                % (ret, expected_return_code))
            with open(temp_file_name) as f:
                self._test_result = json.load(f)
            (actual_successes, actual_failures,
             actual_skips) = self._ExtractTestResults(self._test_result)

            # leave asserts below because we may miss tests
            # that are running when they are not supposed to
            self.assertEquals(set(actual_failures), set(expected_failures))
            self.assertEquals(set(actual_successes), set(expected_successes))
            self.assertEquals(set(actual_skips), set(expected_skips))
        finally:
            os.remove(temp_file_name)
        return actual_failures, actual_successes, actual_skips
示例#12
0
 def _RunTestsWithExpectationsFile(self,
                                   full_test_name,
                                   expectations,
                                   test_tags='foo',
                                   extra_args=None,
                                   expected_exit_code=0):
     extra_args = extra_args or []
     test_expectations = (
         ('# tags: [ foo bar mac ]\n'
          '# results: [ {expectations} ]\n'
          'crbug.com/123 [ {tags} ] {test} [ {expectations} ]').format(
              expectations=expectations,
              tags=test_tags,
              test=full_test_name))
     expectations_file = tempfile.NamedTemporaryFile(delete=False)
     expectations_file.write(test_expectations)
     results = tempfile.NamedTemporaryFile(delete=False)
     results.close()
     expectations_file.close()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         expectations_files=[expectations_file.name],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     try:
         passed_args = ([
             full_test_name, '--no-browser',
             ('--write-full-results-to=%s' % results.name)
         ] + ['--tag=%s' % tag for tag in test_tags.split()])
         ret = unittest_runner.Run(config,
                                   passed_args=passed_args + extra_args)
         self.assertEqual(ret, expected_exit_code)
         with open(results.name) as f:
             self._test_result = json.load(f)
     finally:
         os.remove(expectations_file.name)
         os.remove(results.name)
     return self._test_result
示例#13
0
    def setUp(self):
        # TODO(crbug.com/981349): Ideally parsing args should not have any side
        # effects; for now we need to mock out calls to set up logging and binary
        # manager.
        mock.patch('telemetry.command_line.parser.logging').start()
        mock.patch('telemetry.command_line.parser.binary_manager').start()

        mock.patch.object(argparse.ArgumentParser,
                          'exit',
                          side_effect=ParserExit).start()
        mock.patch.object(optparse.OptionParser,
                          'exit',
                          side_effect=ParserExit).start()
        self._argparse_error = mock.patch.object(
            argparse.ArgumentParser, 'error', side_effect=ParserError).start()
        self._optparse_error = mock.patch.object(
            optparse.OptionParser, 'error', side_effect=ParserError).start()

        examples_dir = os.path.join(util.GetTelemetryDir(), 'examples')
        self.config = project_config.ProjectConfig(
            top_level_dir=examples_dir,
            benchmark_dirs=[os.path.join(examples_dir, 'benchmarks')])
示例#14
0
 def _RunBrowserTest(self,
                     modulename,
                     classname,
                     test_name,
                     expectation,
                     test_tags='foo'):
     expectations = ('# tags: [ foo bar mac ]\n'
                     'crbug.com/123 [ %s ] '
                     'browser_tests.%s.%s.%s'
                     ' [ %s ]')
     expectations = expectations % (test_tags, modulename, classname,
                                    test_name, expectation)
     expectations_file = tempfile.NamedTemporaryFile(delete=False)
     expectations_file.write(expectations)
     results = tempfile.NamedTemporaryFile(delete=False)
     results.close()
     expectations_file.close()
     config = project_config.ProjectConfig(
         top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
         client_configs=[],
         expectations_files=[expectations_file.name],
         benchmark_dirs=[
             os.path.join(util.GetTelemetryDir(), 'examples',
                          'browser_tests')
         ])
     try:
         browser_test_runner.Run(config, [
             '%s' % classname,
             '--write-full-results-to=%s' % results.name,
             '--test-filter=.*%s.*' % test_name
         ])
         with open(results.name) as f:
             test_result = json.load(f)
     finally:
         os.remove(expectations_file.name)
         os.remove(results.name)
     return test_result
 def setUp(self):
     top_level_dir = os.path.dirname(__file__)
     self.config = project_config.ProjectConfig(
         top_level_dir=top_level_dir,
         benchmark_dirs=[os.path.join(top_level_dir, 'testdata')])