def MainTestWrapper(options): # Restart adb to work around bugs, sleep to wait for usb discovery. RunCmd(['adb', 'kill-server']) RunCmd(['adb', 'start-server']) RunCmd(['sleep', '1']) # Spawn logcat monitor logcat_dir = os.path.join(CHROME_SRC, 'out/logcat') shutil.rmtree(logcat_dir, ignore_errors=True) SpawnCmd(['build/android/adb_logcat_monitor.py', logcat_dir]) # Wait for logcat_monitor to pull existing logcat RunCmd(['sleep', '5']) if options.reboot: RebootDevices() # Device check and alert emails buildbot_report.PrintNamedStep('device_status_check') RunCmd(['build/android/device_status_check.py']) # Provision devices buildbot_report.PrintNamedStep('provision_devices') target = options.factory_properties.get('target', 'Debug') RunCmd(['build/android/provision_devices.py', '-t', target]) if options.install: test_obj = INSTRUMENTATION_TESTS[options.install] InstallApk(options, test_obj, print_step=True) if 'chromedriver' in options.test_filter: RunChromeDriverTests() if 'unit' in options.test_filter: RunTestSuites(options, gtest_config.STABLE_TEST_SUITES) if 'ui' in options.test_filter: for test in INSTRUMENTATION_TESTS.itervalues(): RunInstrumentationSuite(options, test) if 'webkit' in options.test_filter: RunTestSuites(options, [ gtest_config.Apk('webkit_unit_tests'), gtest_config.Apk('TestWebKitAPI'), ]) RunWebkitLint(options.target) if 'webkit_layout' in options.test_filter: RunWebkitLayoutTests(options) if options.experimental: RunTestSuites(options, gtest_config.EXPERIMENTAL_TEST_SUITES) RunBrowserTestSuite(options) # Print logcat, kill logcat monitor buildbot_report.PrintNamedStep('logcat_dump') RunCmd(['build/android/adb_logcat_printer.py', logcat_dir]) buildbot_report.PrintNamedStep('test_report') for report in glob.glob( os.path.join(CHROME_SRC, 'out', options.target, 'test_logs', '*.log')): RunCmd(['cat', report]) os.remove(report)
def RunHooks(build_type): RunCmd([SrcPath('build', 'landmines.py')]) build_path = SrcPath('out', build_type) landmine_path = os.path.join(build_path, '.landmines_triggered') clobber_env = os.environ.get('BUILDBOT_CLOBBER') if clobber_env or os.path.isfile(landmine_path): buildbot_report.PrintNamedStep('Clobber') if not clobber_env: print 'Clobbering due to triggered landmines:' with open(landmine_path) as f: print f.read() RunCmd(['rm', '-rf', build_path]) buildbot_report.PrintNamedStep('runhooks') RunCmd(['gclient', 'runhooks'], halt_on_failure=True)
def RunWebkitLayoutTests(options): """Run layout tests on an actual device.""" buildbot_report.PrintNamedStep('webkit_tests') cmd_args = [ '--no-show-results', '--no-new-test-results', '--full-results-html', '--clobber-old-results', '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', '--debug-rwt-logging', '--results-directory', '..layout-test-results', '--target', options.target, '--builder-name', options.build_properties.get('buildername', ''), '--build-number', options.build_properties.get('buildnumber', ''), '--master-name', options.build_properties.get('mastername', ''), '--build-name', options.build_properties.get('buildername', ''), '--platform=chromium-android' ] for flag in 'test_results_server', 'driver_name', 'additional_drt_flag': if flag in options.factory_properties: cmd_args.extend([ '--%s' % flag.replace('_', '-'), options.factory_properties.get(flag) ]) for f in options.factory_properties.get('additional_expectations_files', []): cmd_args.extend( ['--additional-expectations=%s' % os.path.join(CHROME_SRC, *f)]) RunCmd(['webkit/tools/layout_tests/run_webkit_tests.py'] + cmd_args)
def RunChromeDriverTests(): """Run all the steps for running chromedriver tests.""" buildbot_report.PrintNamedStep('chromedriver_annotation') RunCmd([ 'chrome/test/chromedriver/run_buildbot_steps.py', '--android-package=%s' % constants.CHROMIUM_TEST_SHELL_PACKAGE ])
def main(): parser = optparse.OptionParser() parser.add_option('', '--out-dir', help='Directory where the device path is stored', default=os.path.join(os.path.dirname(__file__), '..', '..', 'out')) options, args = parser.parse_args() if args: parser.error('Unknown options %s' % args) buildbot_report.PrintNamedStep('Device Status Check') devices = android_commands.GetAttachedDevices() types, builds, reports, errors = [], [], [], [] if devices: types, builds, reports, errors = zip( *[DeviceInfo(dev) for dev in devices]) unique_types = list(set(types)) unique_builds = list(set(builds)) buildbot_report.PrintMsg('Online devices: %d. Device types %s, builds %s' % (len(devices), unique_types, unique_builds)) print '\n'.join(reports) full_errors = [] for serial, device_errors in zip(devices, errors): full_errors.extend('%s: %s' % (serial, error) for error in device_errors) if full_errors: buildbot_report.PrintWarning() print '\n'.join(full_errors) CheckForMissingDevices(options, devices)
def RunInstrumentationSuite(options, test): """Manages an invocation of run_instrumentaiton_tests.py. Args: options: options object test: An I_TEST namedtuple """ buildbot_report.PrintNamedStep('%s_instrumentation_tests' % test.name.lower()) InstallApk(options, test) args = [ '--test-apk', test.test_apk, '--test_data', test.test_data, '--verbose', '-I' ] if options.target == 'Release': args.append('--release') if options.asan: args.append('--tool=asan') if options.upload_to_flakiness_server: args.append('--flakiness-dashboard-server=%s' % constants.UPSTREAM_FLAKINESS_SERVER) if test.host_driven_root: args.append('--python_test_root=%s' % test.host_driven_root) RunCmd(['build/android/run_instrumentation_tests.py'] + args)
def RunWebkitLint(target): """Lint WebKit's TestExpectation files.""" buildbot_report.PrintNamedStep('webkit_lint') RunCmd([ 'webkit/tools/layout_tests/run_webkit_tests.py', '--lint-test-files', '--chromium', '--target', target ])
def ExtractBuild(options): buildbot_report.PrintNamedStep('extract_build') RunCmd([ os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py'), '--build-dir', SrcPath('build'), '--build-output-dir', SrcPath('out') ] + bb_utils.EncodeProperties(options), warning_code=1)
def ZipBuild(options): buildbot_report.PrintNamedStep('zip_build') RunCmd([ os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'), '--src-dir', constants.DIR_SOURCE_ROOT, '--build-dir', SrcPath('out'), '--exclude-files', 'lib.target,gen,android_webview,jingle_unittests' ] + bb_utils.EncodeProperties(options))
def Compile(build_type, args, experimental=False): cmd = [ os.path.join(SLAVE_SCRIPTS_DIR, 'compile.py'), '--build-tool=ninja', '--compiler=goma', '--target=%s' % build_type, '--goma-dir=%s' % os.path.join(bb_utils.BB_BUILD_DIR, 'goma') ] if experimental: for compile_target in args: buildbot_report.PrintNamedStep('Experimental Compile %s' % compile_target) RunCmd(cmd + ['--build-args=%s' % compile_target], flunk_on_failure=False) else: buildbot_report.PrintNamedStep('compile') RunCmd(cmd + ['--build-args=%s' % ' '.join(args)], halt_on_failure=True)
def Compile(options): RunHooks(options.target) cmd = [ os.path.join(SLAVE_SCRIPTS_DIR, 'compile.py'), '--build-tool=ninja', '--compiler=goma', '--target=%s' % options.target, '--goma-dir=%s' % bb_utils.GOMA_DIR ] build_targets = options.build_targets.split(',') buildbot_report.PrintNamedStep('compile') for build_target in build_targets: RunCmd(cmd + ['--build-args=%s' % build_target], halt_on_failure=True) if options.experimental: for compile_target in EXPERIMENTAL_TARGETS: buildbot_report.PrintNamedStep('Experimental Compile %s' % compile_target) RunCmd(cmd + ['--build-args=%s' % compile_target], flunk_on_failure=False)
def ExtractBuild(factory_properties, build_properties): buildbot_report.PrintNamedStep('Download and extract build') RunCmd([ os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py'), '--build-dir', SrcPath('build'), '--build-output-dir', SrcPath('out'), '--factory-properties', json.dumps(factory_properties), '--build-properties', json.dumps(build_properties) ], warning_code=1)
def FindBugs(is_release): buildbot_report.PrintNamedStep('findbugs') build_type = [] if is_release: build_type = ['--release-build'] RunCmd([SrcPath('build', 'android', 'findbugs_diff.py')] + build_type) RunCmd([ SrcPath('tools', 'android', 'findbugs_plugin', 'test', 'run_findbugs_plugin_tests.py') ] + build_type)
def ZipBuild(factory_properties, build_properties): buildbot_report.PrintNamedStep('Zip build') RunCmd([ os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'), '--src-dir', constants.DIR_SOURCE_ROOT, '--build-dir', SrcPath('out'), '--exclude-files', 'lib.target,gen,android_webview,jingle_unittests', '--factory-properties', json.dumps(factory_properties), '--build-properties', json.dumps(build_properties) ])
def BisectPerfRegression(_): buildbot_report.PrintNamedStep('Bisect Perf Regression') RunCmd([ SrcPath('tools', 'prepare-bisect-perf-regression.py'), '-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir) ]) RunCmd([ SrcPath('tools', 'run-bisect-perf-regression.py'), '-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir), '-p', bb_utils.GOMA_DIR ])
def main(argv): option_parser = optparse.OptionParser() test_options_parser.AddInstrumentationOptions(option_parser) options, args = option_parser.parse_args(argv) test_options_parser.ValidateInstrumentationOptions(option_parser, options, args) run_tests_helper.SetLogLevel(options.verbose_count) buildbot_report.PrintNamedStep( 'Instrumentation tests: %s - %s' % (', '.join(options.annotation), options.test_apk)) return DispatchInstrumentationTests(options)
def MainTestWrapper(options): # Device check and alert emails buildbot_report.PrintNamedStep('device_status_check') RunCmd(['build/android/device_status_check.py'], flunk_on_failure=False) if options.install: test_obj = INSTRUMENTATION_TESTS[options.install] InstallApk(options, test_obj, print_step=True) if not options.test_filter: return # Spawn logcat monitor logcat_dir = os.path.join(CHROME_SRC, 'out/logcat') shutil.rmtree(logcat_dir, ignore_errors=True) SpawnCmd(['build/android/adb_logcat_monitor.py', logcat_dir]) if 'unit' in options.test_filter: RunTestSuites(options, gtest_config.STABLE_TEST_SUITES) if 'ui' in options.test_filter: for test in INSTRUMENTATION_TESTS.itervalues(): RunInstrumentationSuite(options, test) if 'webkit' in options.test_filter: RunTestSuites(options, ['webkit_unit_tests', 'TestWebKitAPI']) RunWebkitLint(options.target) if 'webkit_layout' in options.test_filter: RunWebkitLayoutTests(options) if options.experimental: RunTestSuites(options, gtest_config.EXPERIMENTAL_TEST_SUITES) # Print logcat, kill logcat monitor buildbot_report.PrintNamedStep('logcat_dump') RunCmd(['build/android/adb_logcat_printer.py', logcat_dir]) buildbot_report.PrintNamedStep('test_report') for report in glob.glob( os.path.join(CHROME_SRC, 'out', options.target, 'test_logs', '*.log')): subprocess.Popen(['cat', report]).wait() os.remove(report)
def RunBrowserTestSuite(options): """Manages an invocation of run_browser_tests.py. Args: options: options object. """ args = ['--verbose'] if options.target == 'Release': args.append('--release') if options.asan: args.append('--tool=asan') buildbot_report.PrintNamedStep(constants.BROWSERTEST_SUITE_NAME) RunCmd(['build/android/run_browser_tests.py'] + args)
def InstallApk(options, test, print_step=False): """Install an apk to all phones. Args: options: options object test: An I_TEST namedtuple print_step: Print a buildbot step """ if print_step: buildbot_report.PrintNamedStep('install_%s' % test.name.lower()) args = ['--apk', test.apk, '--apk_package', test.apk_package] if options.target == 'Release': args.append('--release') RunCmd(['build/android/adb_install_apk.py'] + args, halt_on_failure=True)
def RunTestSuites(options, suites): """Manages an invocation of run_tests.py. Args: options: options object. suites: List of suites to run. """ args = ['--verbose'] if options.target == 'Release': args.append('--release') if options.asan: args.append('--tool=asan') for suite in suites: buildbot_report.PrintNamedStep(suite) RunCmd(['build/android/run_tests.py', '-s', suite] + args)
def RunInstrumentationSuite(options, test): """Manages an invocation of run_instrumentaiton_tests.py. Args: options: options object test: An I_TEST namedtuple """ buildbot_report.PrintNamedStep('%s_instrumentation_tests' % test.name.lower()) InstallApk(options, test) args = ['--test-apk', test.test_apk, '--test_data', test.test_data, '-vvv', '-I'] if options.target == 'Release': args.append('--release') if options.asan: args.append('--tool=asan') RunCmd(['build/android/run_instrumentation_tests.py'] + args)
def RunWebkitLayoutTests(options): """Run layout tests on an actual device.""" buildbot_report.PrintNamedStep('webkit_tests') RunCmd(['webkit/tools/layout_tests/run_webkit_tests.py', '--no-show-results', '--no-new-test-results', '--full-results-html', '--clobber-old-results', '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', '--debug-rwt-logging', '--results-directory', '..layout-test-results', '--target', options.target, '--builder-name', options.build_properties.get('buildername', ''), '--build-number', options.build_properties.get('buildnumber', ''), '--master-name', options.build_properties.get('mastername', ''), '--build-name', options.build_properties.get('buildername', ''), '--platform=chromium-android', '--test-results-server', options.factory_properties.get('test_results_server', '')])
def RebootDevices(): """Reboot all attached and online devices.""" buildbot_report.PrintNamedStep('Reboot devices') # Early return here to avoid presubmit dependence on adb, # which might not exist in this checkout. if TESTING: return devices = android_commands.GetAttachedDevices() print 'Rebooting: %s' % devices if devices: pool = multiprocessing.Pool(len(devices)) results = pool.map_async(RebootDeviceSafe, devices).get(99999) for device, result in zip(devices, results): if result: print '%s failed to startup.' % device if any(results): buildbot_report.PrintWarning() else: print 'Reboots complete.'
def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline, timeout, performance_test, cleanup_test_files, tool, log_dump_name, fast_and_loose): """Runs the tests. Args: exe: boolean to state if we are using the exe based test runner device: Device to run the tests. test_suite: A specific test suite to run, empty to run all. gtest_filter: A gtest_filter flag. test_arguments: Additional arguments to pass to the test binary. rebaseline: Whether or not to run tests in isolation and update the filter. timeout: Timeout for each test. performance_test: Whether or not performance test(s). cleanup_test_files: Whether or not to cleanup test files on device. tool: Name of the Valgrind tool. log_dump_name: Name of log dump file. fast_and_loose: if set, skip copying data files. Returns: A TestResults object. """ results = [] if test_suite: if not os.path.exists(test_suite): logging.critical('Unrecognized test suite %s, supported: %s', test_suite, _TEST_SUITES) if test_suite in _TEST_SUITES: logging.critical( '(Remember to include the path: out/Release/%s)', test_suite) test_suite_basename = os.path.basename(test_suite) if test_suite_basename in _TEST_SUITES: logging.critical('Try "make -j15 %s"', test_suite_basename) else: logging.critical('Unrecognized test suite, supported: %s', _TEST_SUITES) return TestResults.FromRun([], [BaseTestResult(test_suite, '')], False, False) fully_qualified_test_suites = [test_suite] else: fully_qualified_test_suites = FullyQualifiedTestSuites( exe, _TEST_SUITES) debug_info_list = [] print 'Known suites: ' + str(_TEST_SUITES) print 'Running these: ' + str(fully_qualified_test_suites) for t in fully_qualified_test_suites: buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t)) test = SingleTestRunner(device, t, gtest_filter, test_arguments, timeout, rebaseline, performance_test, cleanup_test_files, tool, 0, not not log_dump_name, fast_and_loose) test.Run() results += [test.test_results] # Collect debug info. debug_info_list += [test.dump_debug_info] if rebaseline: test.UpdateFilter(test.test_results.failed) test.test_results.LogFull('Unit test', os.path.basename(t)) # Zip all debug info outputs into a file named by log_dump_name. debug_info.GTestDebugInfo.ZipAndCleanResults( os.path.join(constants.CHROME_DIR, 'out', 'Release', 'debug_info_dumps'), log_dump_name, [d for d in debug_info_list if d]) PrintAnnotationForTestResults(test.test_results) return TestResults.FromTestResults(results)
def _RunATestSuite(options): """Run a single test suite. Helper for Dispatch() to allow stop/restart of the emulator across test bundles. If using the emulator, we start it on entry and stop it on exit. Args: options: options for running the tests. Returns: 0 if successful, number of failing tests otherwise. """ step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') buildbot_report.PrintNamedStep(step_name) attached_devices = [] buildbot_emulators = [] if options.use_emulator: for n in range(options.emulator_count): t = time_profile.TimeProfile('Emulator launch %d' % n) avd_name = None if n > 0: # Creates a temporary AVD for the extra emulators. avd_name = 'run_tests_avd_%d' % n buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose) buildbot_emulator.Launch(kill_all_emulators=n == 0) t.Stop() buildbot_emulators.append(buildbot_emulator) attached_devices.append(buildbot_emulator.device) # Wait for all emulators to boot completed. map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), buildbot_emulators) elif options.test_device: attached_devices = [options.test_device] else: attached_devices = android_commands.GetAttachedDevices() if not attached_devices: logging.critical('A device must be attached and online.') buildbot_report.PrintError() return 1 # Reset the test port allocation. It's important to do it before starting # to dispatch any tests. if not ports.ResetTestServerPortAllocation(): raise Exception('Failed to reset test server port.') if options.gtest_filter: logging.warning('Sharding is not possible with these configurations.') attached_devices = [attached_devices[0]] sharder = TestSharder( attached_devices, options.test_suite, options.gtest_filter, options.test_arguments, options.timeout, options.cleanup_test_files, options.tool, options.log_dump, options.fast_and_loose, options.build_type, options.webkit, options.flakiness_dashboard_server) test_results = sharder.RunShardedTests() for buildbot_emulator in buildbot_emulators: buildbot_emulator.Shutdown() return len(test_results.failed)
def _RunATestSuite(options): """Run a single test suite. Helper for Dispatch() to allow stop/restart of the emulator across test bundles. If using the emulator, we start it on entry and stop it on exit. Args: options: options for running the tests. Returns: 0 if successful, number of failing tests otherwise. """ step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') buildbot_report.PrintNamedStep(step_name) attached_devices = [] buildbot_emulators = [] if options.use_emulator: for n in range(options.emulator_count): t = TimeProfile('Emulator launch %d' % n) avd_name = None if n > 0: # Creates a temporary AVD for the extra emulators. avd_name = 'run_tests_avd_%d' % n buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose) buildbot_emulator.Launch(kill_all_emulators=n == 0) t.Stop() buildbot_emulators.append(buildbot_emulator) attached_devices.append(buildbot_emulator.device) # Wait for all emulators to boot completed. map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), buildbot_emulators) elif options.test_device: attached_devices = [options.test_device] else: attached_devices = android_commands.GetAttachedDevices() if not attached_devices: logging.critical('A device must be attached and online.') buildbot_report.PrintError() return 1 # Reset the test port allocation. It's important to do it before starting # to dispatch any tests. if not ports.ResetTestServerPortAllocation(): raise Exception('Failed to reset test server port.') if options.performance_test or options.gtest_filter: # These configuration can't be split in multiple devices. attached_devices = [attached_devices[0]] sharder = TestSharder(attached_devices, options.test_suite, options.gtest_filter, options.test_arguments, options.timeout, options.rebaseline, options.performance_test, options.cleanup_test_files, options.tool, options.log_dump, options.fast_and_loose, options.build_type) test_results = sharder.RunShardedTests() for buildbot_emulator in buildbot_emulators: buildbot_emulator.Shutdown() # Another chance if we timed out? At this point It is safe(r) to # run fast and loose since we just uploaded all the test data and # binary. if test_results.timed_out and options.repeat: logging.critical('Timed out; repeating in fast_and_loose mode.') options.fast_and_loose = True options.repeat -= 1 logging.critical('Repeats left: ' + str(options.repeat)) return _RunATestSuite(options) return len(test_results.failed)
def RunHooks(): buildbot_report.PrintNamedStep('runhooks') RunCmd(['gclient', 'runhooks'], halt_on_failure=True)
def main(argv): parser = optparse.OptionParser() def ConvertJson(option, _, value, parser): setattr(parser.values, option.dest, json.loads(value)) parser.add_option('--build-properties', action='callback', callback=ConvertJson, type='string', default={}, help='build properties in JSON format') parser.add_option('--factory-properties', action='callback', callback=ConvertJson, type='string', default={}, help='factory properties in JSON format') parser.add_option('--bot-id', help='Specify bot id directly.') parser.add_option('--TESTING', action='store_true', help='For testing: print, but do not run commands') options, args = parser.parse_args(argv[1:]) if args: parser.error('Unused args: %s' % args) bot_id = options.bot_id or options.factory_properties.get('android_bot_id') if not bot_id: parser.error( 'A bot id must be specified through option or factory_props.') # Get a BotConfig object looking first for an exact bot-id match. If no exact # match, look for a bot-id which is a substring of the specified id. # This allows similar bots to have unique IDs, but to share config. # If multiple substring matches exist, pick the longest one. bot_map = GetBotStepMap() bot_config = bot_map.get(bot_id) if not bot_config: substring_matches = filter(lambda x: x in bot_id, bot_map.iterkeys()) if substring_matches: max_id = max(substring_matches, key=len) print 'Using config from id="%s" (substring match).' % max_id bot_config = bot_map[max_id] if not bot_config: print 'Error: config for id="%s" cannot be inferred.' % bot_id return 1 print 'Using config:', bot_config def CommandToString(command): """Returns quoted command that can be run in bash shell.""" return ' '.join(map(pipes.quote, command)) command_objs = GetCommands(options, bot_config) for command_obj in command_objs: print 'Will run:', CommandToString(command_obj.command) return_code = 0 for command_obj in command_objs: if command_obj.step_name: buildbot_report.PrintNamedStep(command_obj.step_name) command = command_obj.command print CommandToString(command) sys.stdout.flush() env = None if options.TESTING: # The bash command doesn't yet support the testing option. if command[0] == 'bash': continue env = dict(os.environ) env['BUILDBOT_TESTING'] = '1' return_code |= subprocess.call(command, cwd=CHROME_SRC, env=env) return return_code
def CheckWebViewLicenses(): buildbot_report.PrintNamedStep('Check licenses for WebView') RunCmd( [SrcPath('android_webview', 'tools', 'webview_licenses.py'), 'scan'], warning_code=1)