def testJsonOutputFormat(self, mockInitDependencyManager): options = browser_test_runner.TestRunOptions() config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=['a', 'b', 'c'], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: browser_test_runner.Run(config, options, [ 'SimpleTest', '--write-abbreviated-json-results-to=%s' % temp_file_name ]) mockInitDependencyManager.assert_called_with(['a', 'b', 'c']) with open(temp_file_name) as f: test_result = json.load(f) self.assertEquals(test_result['failures'], [ 'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2', 'browser_tests.simple_numeric_test.SimpleTest.add_1_and_2', 'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3', 'browser_tests.simple_numeric_test.SimpleTest.testSimple' ]) self.assertEquals(test_result['valid'], True) finally: os.remove(temp_file_name)
def baseTest(self, failures, successes, skips, test_name='', extra_args=None): extra_args = extra_args or [] config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: passed_args = [ test_name, '--no-browser', ('--write-full-results-to=%s' % temp_file_name) ] ret = unittest_runner.Run(config, passed_args=passed_args + extra_args) self.assertEquals(ret, 0) with open(temp_file_name) as f: self._test_result = json.load(f) (actual_successes, actual_failures, actual_skips) = self._ExtractTestResults(self._test_result) self.assertEquals(set(actual_failures), set(failures)) self.assertEquals(set(actual_successes), set(successes)) self.assertEquals(set(actual_skips), set(skips)) finally: os.remove(temp_file_name)
def _RunTest(self, test_filter, expected_failures, expected_successes, expected_skips=None, test_name='SimpleTest', extra_args=None): expected_skips = expected_skips or [] extra_args = extra_args or [] config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: browser_test_runner.Run(config, [ test_name, '--write-full-results-to=%s' % temp_file_name, '--test-filter=%s' % test_filter ] + extra_args) with open(temp_file_name) as f: self._test_result = json.load(f) (actual_successes, actual_failures, actual_skips) = self._ExtractTestResults(self._test_result) self.assertEquals(set(actual_failures), set(expected_failures)) self.assertEquals(set(actual_successes), set(expected_successes)) self.assertEquals(set(actual_skips), set(expected_skips)) finally: os.remove(temp_file_name)
def FindDependencies(paths, options): # Verify arguments. for path in paths: if not os.path.exists(path): raise ValueError('Path does not exist: %s' % path) dependencies = path_set.PathSet() # Including __init__.py will include Telemetry and its dependencies. # If the user doesn't pass any arguments, we just have Telemetry. dependencies |= FindPythonDependencies( os.path.realpath( os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py'))) dependencies |= FindBootstrapDependencies(util.GetTelemetryDir()) # Add dependencies. for path in paths: base_dir = os.path.dirname(os.path.realpath(path)) dependencies.add(base_dir) dependencies |= FindBootstrapDependencies(base_dir) dependencies |= FindPythonDependencies(path) if options.include_page_set_data: dependencies |= FindPageSetDependencies(base_dir) # Remove excluded files. dependencies -= FindExcludedFiles(set(dependencies), options) return dependencies
def baseShardingTest(self, total_shards, shard_index, failures, successes): options = browser_test_runner.TestRunOptions() options.verbosity = 0 config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=['a', 'b', 'c'], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')] ) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: browser_test_runner.Run( config, options, ['SimpleShardingTest', '--write-abbreviated-json-results-to=%s' % temp_file_name, '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index]) with open(temp_file_name) as f: test_result = json.load(f) self.assertEquals(test_result['failures'], failures) self.assertEquals(test_result['successes'], successes) self.assertEquals(test_result['valid'], True) finally: os.remove(temp_file_name)
def baseTest(self, mockInitDependencyManager, test_filter, failures, successes): options = browser_test_runner.TestRunOptions() options.verbosity = 0 config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=['a', 'b', 'c'], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')] ) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: browser_test_runner.Run( config, options, ['SimpleTest', '--write-abbreviated-json-results-to=%s' % temp_file_name, '--test-filter=%s' % test_filter]) mockInitDependencyManager.assert_called_with(['a', 'b', 'c']) with open(temp_file_name) as f: test_result = json.load(f) self.assertEquals(test_result['failures'], failures) self.assertEquals(test_result['successes'], successes) self.assertEquals(test_result['valid'], True) finally: os.remove(temp_file_name)
def testIsPageSetFile(self): top_10_ps_dir = os.path.join(util.GetChromiumSrcDir(), 'tools/perf/page_sets/top_10.py') test_ps_dir = os.path.join(util.GetTelemetryDir(), 'unittest_data/test_page_set.py') page_set_dir = os.path.join(util.GetTelemetryDir(), 'telemetry/page/page_set.py') self.assertTrue(discover.IsPageSetFile(top_10_ps_dir)) self.assertFalse(discover.IsPageSetFile(test_ps_dir)) self.assertFalse(discover.IsPageSetFile(page_set_dir))
def _GetEnabledTests(self, browser_type, os_name, os_version_name, supports_tab_control): runner = run_tests.typ.Runner() host = runner.host runner.top_level_dir = util.GetTelemetryDir() runner.args.tests = [host.join(util.GetTelemetryDir(), 'telemetry', 'unittest_util', 'disabled_cases.py')] possible_browser = MockPossibleBrowser( browser_type, os_name, os_version_name, supports_tab_control) runner.classifier = run_tests.GetClassifier(MockArgs(), possible_browser) _, test_set = runner.find_tests(runner.args) return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def _InstallIpfw(self): ipfw_bin = os.path.join(util.GetTelemetryDir(), 'bin', 'ipfw') ipfw_mod = os.path.join(util.GetTelemetryDir(), 'bin', 'ipfw_mod.ko') try: changed = cloud_storage.GetIfChanged( cloud_storage.INTERNAL_BUCKET, ipfw_bin) changed |= cloud_storage.GetIfChanged( cloud_storage.INTERNAL_BUCKET, ipfw_mod) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing dummynet. See: ' 'http://info.iet.unipi.it/~luigi/dummynet/') sys.exit(1)
def _RunTest(self, test_filter, expected_failures, expected_successes, expected_skips=None, test_name='SimpleTest', expectations='', tags=None, extra_args=None): expected_skips = expected_skips or [] tags = tags or [] extra_args = extra_args or [] config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name if expectations: expectations_file = tempfile.NamedTemporaryFile(delete=False) expectations_file.write(expectations) expectations_file.close() extra_args.extend(['-X', expectations_file.name] + ['-x=%s' % tag for tag in tags]) args = ([ test_name, '--write-full-results-to=%s' % temp_file_name, '--test-filter=%s' % test_filter, # We don't want the underlying tests to report their results to # ResultDB. '--disable-resultsink', ] + extra_args) try: args = browser_test_runner.ProcessConfig(config, args) with binary_manager.TemporarilyReplaceBinaryManager(None): run_browser_tests.RunTests(args) with open(temp_file_name) as f: self._test_result = json.load(f) (actual_successes, actual_failures, actual_skips) = self._ExtractTestResults(self._test_result) self.assertEquals(set(actual_failures), set(expected_failures)) self.assertEquals(set(actual_successes), set(expected_successes)) self.assertEquals(set(actual_skips), set(expected_skips)) finally: os.remove(temp_file_name)
def BaseShardingTest(self, total_shards, shard_index, failures, successes, opt_abbr_input_json_file=None, opt_test_filter='', opt_filter_tests_after_sharding=False, opt_test_name_prefix=''): config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name opt_args = [] if opt_abbr_input_json_file: opt_args += [ '--read-abbreviated-json-results-from=%s' % opt_abbr_input_json_file ] if opt_test_filter: opt_args += ['--test-filter=%s' % opt_test_filter] if opt_filter_tests_after_sharding: opt_args += ['--filter-tests-after-sharding'] if opt_test_name_prefix: opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix] args = ([ 'SimpleShardingTest', '--write-full-results-to=%s' % temp_file_name, '--total-shards=%d' % total_shards, '--shard-index=%d' % shard_index ] + opt_args) try: args = browser_test_runner.ProcessConfig(config, args) with binary_manager.TemporarilyReplaceBinaryManager(None): run_browser_tests.RunTests(args) with open(temp_file_name) as f: test_result = json.load(f) (actual_successes, actual_failures, _) = self._ExtractTestResults(test_result) self.assertEquals(set(actual_failures), set(failures)) self.assertEquals(set(actual_successes), set(successes)) finally: os.remove(temp_file_name)
def Run(project_config, no_browser=False, disable_cloud_storage_io_during_test=False): args = sys.argv[1:] assert '--top-level-dir' not in args, ( 'Top level directory for running tests should be specified through ' 'the instance of telemetry.project_config.ProjectConfig.') assert '--client-config' not in args, ( 'Client config file to be used for telemetry should be specified through ' 'the instance of telemetry.project_config.ProjectConfig.') assert project_config.top_level_dir, 'Must specify top level dir for project' args.extend(['--top-level-dir', project_config.top_level_dir]) for c in project_config.client_configs: args.extend(['--client-config', c]) if no_browser and not '--no-browser' in args: args.extend(['--no-browser']) if project_config.default_chrome_root and not '--chrome-root' in args: args.extend(['--chrome-root', project_config.default_chrome_root]) if disable_cloud_storage_io_during_test: args.extend(['--disable-cloud-storage-io']) env = os.environ.copy() telemetry_dir = util.GetTelemetryDir() if 'PYTHONPATH' in env: env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir]) else: env['PYTHONPATH'] = telemetry_dir path_to_run_tests = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'run_tests.py') return subprocess.call([sys.executable, path_to_run_tests] + args, env=env)
def Start(self, local_server_controller): assert self._subprocess == None self._local_server_controller = local_server_controller self.host_ip = local_server_controller.host_ip server_args = self.GetBackendStartupArgs() server_args_as_json = json.dumps(server_args) server_module_name = self._server_backend_class.__module__ self._devnull = open(os.devnull, 'w') cmd = [ sys.executable, '-m', __name__, 'run_backend', server_module_name, self._server_backend_class.__name__, server_args_as_json, ] env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(sys.path) self._subprocess = subprocess.Popen( cmd, cwd=util.GetTelemetryDir(), env=env, stdout=subprocess.PIPE) named_ports = self._GetNamedPortsFromBackend() named_port_pair_map = {'http': None, 'https': None, 'dns': None} for name, port in named_ports: assert name in named_port_pair_map, '%s forwarding is unsupported' % name named_port_pair_map[name] = ( forwarders.PortPair(port, local_server_controller.GetRemotePort(port))) self.forwarder = local_server_controller.CreateForwarder( forwarders.PortPairs(**named_port_pair_map))
def WarnIfMissingCredentials(self, page_set): num_pages_missing_login = 0 missing_credentials = set() for page in page_set: if (page.credentials and not self.CanLogin(page.credentials)): num_pages_missing_login += 1 missing_credentials.add(page.credentials) if num_pages_missing_login > 0: files_to_tweak = [] if page_set.credentials_path: files_to_tweak.append( os.path.relpath(os.path.join(os.path.dirname(page_set.file_path), page_set.credentials_path))) files_to_tweak.append('~/.telemetry-credentials') example_credentials_file = os.path.join( util.GetTelemetryDir(), 'examples', 'credentials_example.json') logging.warning(""" Credentials for %s were not found. %i pages will not be tested. To fix this, either add svn-internal to your .gclient using http://goto/read-src-internal, or add your own credentials to: %s An example credentials file you can copy from is here: %s\n""" % (', '.join(missing_credentials), num_pages_missing_login, ' or '.join(files_to_tweak), example_credentials_file))
def UpdateDependency(dependency, dep_local_path, version, os_name=None, arch_name=None): config = os.path.join( util.GetTelemetryDir(), 'telemetry', 'binary_dependencies.json') if not os_name: assert not arch_name, 'arch_name is specified but not os_name' os_name = py_utils.GetHostOsName() arch_name = py_utils.GetHostArchName() else: assert arch_name, 'os_name is specified but not arch_name' dep_platform = '%s_%s' % (os_name, arch_name) c = base_config.BaseConfig(config, writable=True) try: old_version = c.GetVersion(dependency, dep_platform) print 'Updating from version: {}'.format(old_version) except ValueError: raise RuntimeError( ('binary_dependencies.json entry for %s missing or invalid; please add ' 'it first! (need download_path and path_within_archive)') % dep_platform) if dep_local_path: c.AddCloudStorageDependencyUpdateJob( dependency, dep_platform, dep_local_path, version=version, execute_job=True)
def DriverCreator(): ie_driver_exe = os.path.join( util.GetTelemetryDir(), 'bin', 'IEDriverServer_%s.exe' % self._architecture) cloud_storage.GetIfChanged(cloud_storage.PUBLIC_BUCKET, ie_driver_exe) return webdriver.Ie(executable_path=ie_driver_exe)
def WarnIfMissingCredentials(self, page_set): num_pages_missing_login = 0 missing_credentials = set() for page in page_set: if (page.credentials and not self.CanLogin(page.credentials)): num_pages_missing_login += 1 missing_credentials.add(page.credentials) if num_pages_missing_login > 0: files_to_tweak = [] if page_set.credentials_path: files_to_tweak.append( os.path.relpath(os.path.join(os.path.dirname(page_set.file_path), page_set.credentials_path))) files_to_tweak.append('~/.telemetry-credentials') example_credentials_file = os.path.join( util.GetTelemetryDir(), 'examples', 'credentials_example.json') logging.warning(""" Credentials for %s were not found. %i pages will not be tested. To fix this, either follow the instructions to authenticate to gsutil here: http://www.chromium.org/developers/telemetry/upload_to_cloud_storage, or add your own credentials to: %s An example credentials file you can copy from is here: %s\n""" % (', '.join(missing_credentials), num_pages_missing_login, ' or '.join(files_to_tweak), example_credentials_file))
def __init__(self): super(PywebsocketServerBackend, self).__init__() self.port = 8001 self.base_dir = os.path.relpath(os.path.join( path_util.GetChromiumSrcDir(), 'third_party', 'pywebsocket', 'src'), start=util.GetTelemetryDir())
def Run(project_config, no_browser=False, disable_cloud_storage_io_during_test=False, passed_args=None): args = ProcessConfig(project_config, passed_args or sys.argv[1:], no_browser, disable_cloud_storage_io_during_test) env = os.environ.copy() telemetry_dir = util.GetTelemetryDir() if 'PYTHONPATH' in env: env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir]) else: env['PYTHONPATH'] = telemetry_dir path_to_run_tests = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'run_tests.py') exit_code = subprocess.call([sys.executable, path_to_run_tests] + args, env=env) if exit_code: print '**Non zero exit code**' print( 'If you don\'t see any error stack, this could have been a ' 'native crash. Consider installing faulthandler ' '(https://faulthandler.readthedocs.io/) for more useful error ' 'message') return exit_code
def ListOfValuesFromListOfDicts(value_dicts, page_dict): """Takes a list of value dicts to values. Given a list of value dicts produced by AsDict, this method deserializes the dicts given a dict mapping page IDs to pages. This method performs memoization for deserializing a list of values efficiently, where FromDict is meant to handle one-offs. values: a list of value dicts produced by AsDict() on a value subclass. page_dict: a dictionary mapping IDs to page objects. """ value_dir = os.path.dirname(__file__) value_classes = discover.DiscoverClasses(value_dir, util.GetTelemetryDir(), Value, index_by_class_name=True) value_json_types = dict( (value_classes[x].GetJSONTypeName(), x) for x in value_classes) values = [] for value_dict in value_dicts: value_class = value_classes[value_json_types[value_dict['type']]] assert 'FromDict' in value_class.__dict__, \ 'Subclass doesn\'t override FromDict' values.append(value_class.FromDict(value_dict, page_dict)) return values
def _RunBrowserTest(self, modulename, classname, test_name, expectation='Pass', test_tags='foo', extra_args=None, expected_exit_code=0, include_expectations=True): extra_args = extra_args or [] if include_expectations: expectations = ('# tags: [ foo bar mac ]\n' 'crbug.com/123 [ %s ] ' 'browser_tests.%s.%s.%s' ' [ %s ]') expectations = expectations % (test_tags, modulename, classname, test_name, expectation) expectations_file = tempfile.NamedTemporaryFile(delete=False) expectations_file.write(expectations) expectations_file.close() expectations_file_paths = [expectations_file.name] else: expectations_file_paths = [] results = tempfile.NamedTemporaryFile(delete=False) results.close() config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], expectations_files=expectations_file_paths, benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) try: ret = browser_test_runner.Run(config, [ '%s' % classname, ('--write-full-results-to=%s' % results.name), ('--test-filter=.*%s.*' % test_name) ] + extra_args) self.assertEqual(ret, expected_exit_code) with open(results.name) as f: test_result = json.load(f) finally: if expectations_file_paths: os.remove(expectations_file.name) os.remove(results.name) return test_result
def ZipDependencies(paths, dependencies, options): base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir())) with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file: # Add dependencies to archive. for path in dependencies: path_in_archive = os.path.join('telemetry', os.path.relpath(path, base_dir)) zip_file.write(path, path_in_archive) # Add symlinks to executable paths, for ease of use. for path in paths: link_info = zipfile.ZipInfo( os.path.join('telemetry', os.path.basename(path))) link_info.create_system = 3 # Unix attributes. # 010 is regular file, 0111 is the permission bits rwxrwxrwx. link_info.external_attr = 0100777 << 16 # Octal. relative_path = os.path.relpath(path, base_dir) link_script = ( '#!/usr/bin/env python\n\n' 'import os\n' 'import sys\n\n\n' 'script = os.path.join(os.path.dirname(__file__), \'%s\')\n' 'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])' % relative_path) zip_file.writestr(link_info, link_script) # Add gsutil to the archive, if it's available. The gsutil in # depot_tools is modified to allow authentication using prodaccess. # TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps # will include it. Then there will be two copies of gsutil at the same # location in the archive. This can be confusing for users. gsutil_path = os.path.realpath(cloud_storage.FindGsutil()) if cloud_storage.SupportsProdaccess(gsutil_path): gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir) gsutil_dependencies = path_set.PathSet() gsutil_dependencies.add(os.path.dirname(gsutil_path)) # Also add modules from depot_tools that are needed by gsutil. gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto')) gsutil_dependencies.add( os.path.join(gsutil_base_dir, 'retry_decorator')) gsutil_dependencies -= FindExcludedFiles(set(gsutil_dependencies), options) # Also add upload.py to the archive from depot_tools, if it is available. # This allows us to post patches without requiring a full depot_tools # install. There's no real point in including upload.py if we do not # also have gsutil, which is why this is inside the gsutil block. gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py')) for path in gsutil_dependencies: path_in_archive = os.path.join( 'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir), 'third_party', os.path.relpath(path, gsutil_base_dir)) zip_file.write(path, path_in_archive)
class WebComponentsProject(trace_viewer_project.TraceViewerProject): telemetry_path = os.path.abspath(util.GetTelemetryDir()) d3_path = os.path.abspath(os.path.join(telemetry_path, 'third_party', 'd3')) def __init__(self): super(WebComponentsProject, self).__init__([self.telemetry_path, self.d3_path])
def _RunTest(self, expected_failures, expected_successes, expected_skips, expected_return_code=0, test_name='', extra_args=None, no_browser=True): extra_args = extra_args or [] config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) temp_file = tempfile.NamedTemporaryFile(delete=False) temp_file.close() temp_file_name = temp_file.name try: passed_args = [] if test_name: passed_args.append(test_name) if no_browser: passed_args.append('--no-browser') passed_args.append('--write-full-results-to=%s' % temp_file_name) ret = unittest_runner.Run(config, passed_args=passed_args + extra_args) assert ret == expected_return_code, ( 'actual return code %d, does not equal the expected return code %d' % (ret, expected_return_code)) with open(temp_file_name) as f: self._test_result = json.load(f) (actual_successes, actual_failures, actual_skips) = self._ExtractTestResults(self._test_result) # leave asserts below because we may miss tests # that are running when they are not supposed to self.assertEquals(set(actual_failures), set(expected_failures)) self.assertEquals(set(actual_successes), set(expected_successes)) self.assertEquals(set(actual_skips), set(expected_skips)) finally: os.remove(temp_file_name) return actual_failures, actual_successes, actual_skips
def _RunTestsWithExpectationsFile(self, full_test_name, expectations, test_tags='foo', extra_args=None, expected_exit_code=0): extra_args = extra_args or [] test_expectations = ( ('# tags: [ foo bar mac ]\n' '# results: [ {expectations} ]\n' 'crbug.com/123 [ {tags} ] {test} [ {expectations} ]').format( expectations=expectations, tags=test_tags, test=full_test_name)) expectations_file = tempfile.NamedTemporaryFile(delete=False) expectations_file.write(test_expectations) results = tempfile.NamedTemporaryFile(delete=False) results.close() expectations_file.close() config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], expectations_files=[expectations_file.name], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) try: passed_args = ([ full_test_name, '--no-browser', ('--write-full-results-to=%s' % results.name) ] + ['--tag=%s' % tag for tag in test_tags.split()]) ret = unittest_runner.Run(config, passed_args=passed_args + extra_args) self.assertEqual(ret, expected_exit_code) with open(results.name) as f: self._test_result = json.load(f) finally: os.remove(expectations_file.name) os.remove(results.name) return self._test_result
def _InstallAvconv(self): avconv_bin = support_binaries.FindPath('avconv', self.GetOSName()) os.environ['PATH'] += os.pathsep + os.path.join(util.GetTelemetryDir(), 'bin') try: cloud_storage.GetIfChanged(avconv_bin, cloud_storage.INTERNAL_BUCKET) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing avconv via:\n' 'sudo apt-get install libav-tools') sys.exit(1)
def _InstallAvconv(self): telemetry_bin_dir = os.path.join(util.GetTelemetryDir(), 'bin') avconv_bin = os.path.join(telemetry_bin_dir, 'avconv') os.environ['PATH'] += os.pathsep + telemetry_bin_dir try: cloud_storage.GetIfChanged(cloud_storage.INTERNAL_BUCKET, avconv_bin) except cloud_storage.CloudStorageError, e: logging.error(e) logging.error('You may proceed by manually installing avconv via:\n' 'sudo apt-get install libav-tools') sys.exit(1)
def testSkipOnlyWhenTestMatchesTestFilterWithBrowser(self): test_name = 'unit_tests_test.ExampleTests.test_also_fail' runner = run_tests.typ.Runner() runner.args.test_filter = test_name runner.args.skip.append('*fail') runner.top_level_dirs = [os.path.join(util.GetTelemetryDir(), 'examples')] possible_browser = MockPossibleBrowser( 'system', 'mac', 'mavericks', True) runner.classifier = run_tests.GetClassifier(runner, possible_browser) _, test_set = runner.find_tests(runner.args) self.assertEqual(len(test_set.tests_to_skip), 1) self.assertEqual(test_set.tests_to_skip[0].name, test_name)
def GetProfileDir(profile_type): """Given a |profile_type| (as returned by GetProfileTypes()), return the directory to use for that profile or None if the profile doesn't need a profile directory (e.g. using the browser default profile). """ if profile_type in BASE_PROFILE_TYPES: return None path = os.path.join(util.GetTelemetryDir(), 'telemetry', 'internal', 'browser_profiles') assert os.path.exists(path) return path
def _RunBrowserTest(self, modulename, classname, test_name, expectation, test_tags='foo'): expectations = ('# tags: [ foo bar mac ]\n' 'crbug.com/123 [ %s ] ' 'browser_tests.%s.%s.%s' ' [ %s ]') expectations = expectations % (test_tags, modulename, classname, test_name, expectation) expectations_file = tempfile.NamedTemporaryFile(delete=False) expectations_file.write(expectations) results = tempfile.NamedTemporaryFile(delete=False) results.close() expectations_file.close() config = project_config.ProjectConfig( top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'), client_configs=[], expectations_files=[expectations_file.name], benchmark_dirs=[ os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests') ]) try: browser_test_runner.Run(config, [ '%s' % classname, '--write-full-results-to=%s' % results.name, '--test-filter=.*%s.*' % test_name ]) with open(results.name) as f: test_result = json.load(f) finally: os.remove(expectations_file.name) os.remove(results.name) return test_result