def main(server_constructor, sleep_fn=None, argv=None, description=None, **kwargs): host = Host() sleep_fn = sleep_fn or (lambda: host.sleep(1)) parser = optparse.OptionParser(description=description, formatter=RawTextHelpFormatter()) parser.add_option('--output-dir', type=str, default=None, help='output directory, for log files etc.') parser.add_option('-v', '--verbose', action='store_true', help='print debug logs') for opt in configuration_options(): parser.add_option(opt) options, _ = parser.parse_args(argv) configure_logging( logging_level=logging.DEBUG if options.verbose else logging.INFO, include_time=options.verbose) port_obj = host.port_factory.get(options=options) if not options.output_dir: options.output_dir = host.filesystem.join( port_obj.default_results_directory(), ARTIFACTS_SUB_DIR) # Create the output directory if it doesn't already exist. host.filesystem.maybe_make_directory(options.output_dir) def handler(signum, _): _log.debug('Received signal %d', signum) raise SystemExit signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) server = server_constructor(port_obj, options.output_dir, **kwargs) server.start() print('Press Ctrl-C or `kill {}` to stop the server'.format(os.getpid())) try: while True: sleep_fn() if not server.alive(): raise ServerError('Server is no longer listening') except ServerError as e: _log.error(e) except (SystemExit, KeyboardInterrupt): _log.info('Exiting...') finally: server.stop()
def main(): host = Host() importer = TestImporter(host) try: host.exit(importer.main()) except KeyboardInterrupt: host.print_("Interrupted, exiting") host.exit(exit_codes.INTERRUPTED_EXIT_STATUS)
def _prepare_config(self, options, args, tool): results_directory = args[0] host = Host() print 'Parsing full_results.json...' results_json_path = host.filesystem.join(results_directory, 'full_results.json') results_json = json_results_generator.load_json( host.filesystem, results_json_path) port = tool.port_factory.get() layout_tests_directory = port.layout_tests_dir() platforms = host.filesystem.listdir( host.filesystem.join(layout_tests_directory, 'platform')) self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host) print 'Gathering current baselines...' self._gather_baselines(results_json) return { 'test_config': self._test_config, 'results_json': results_json, 'platforms_json': { 'platforms': platforms, 'defaultPlatform': port.name(), }, }
def main(argv): parser = optparse.OptionParser(usage='%prog [stats.json]') parser.description = "Prints out lists of tests run on each worker as per the stats.json file." options, args = parser.parse_args(argv) if args and args[0]: stats_path = args[0] else: host = Host() stats_path = host.filesystem.join( host.port_factory.get().results_directory(), 'stats.json') with open(stats_path, 'r') as fp: stats_trie = json.load(fp) stats = convert_trie_to_flat_paths(stats_trie) stats_by_worker = {} for test_name, data in stats.items(): worker = "worker/" + str(data["results"][0]) if worker not in stats_by_worker: stats_by_worker[worker] = [] test_number = data["results"][1] stats_by_worker[worker].append({ "name": test_name, "number": test_number }) for worker in sorted(stats_by_worker.keys()): print worker + ':' for test in sorted(stats_by_worker[worker], key=lambda test: test["number"]): print test["name"] print
def main(argv, stderr, host=None): parser = optparse.OptionParser(option_list=platform_options(use_globs=True)) parser.add_option('--json', help='Path to JSON output file') parser.add_option('--verbose', action='store_true', default=False, help='log extra details that may be helpful when debugging') options, _ = parser.parse_args(argv) if not host: if options.platform and 'test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from blinkpy.common.host_mock import MockHost host = MockHost() else: host = Host() if options.verbose: configure_logging(logging_level=logging.DEBUG, stream=stderr) # Print full stdout/stderr when a command fails. host.executive.error_output_limit = None else: # PRESUBMIT.py relies on our output, so don't include timestamps. configure_logging(logging_level=logging.INFO, stream=stderr, include_time=False) try: exit_status = run_checks(host, options) except KeyboardInterrupt: exit_status = exit_codes.INTERRUPTED_EXIT_STATUS except Exception as error: # pylint: disable=broad-except print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error) traceback.print_exc(file=stderr) exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS return exit_status
def main(argv, stdout, stderr): options, args = parse_args(argv) if options.platform and 'test' in options.platform and not 'browser_test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from blinkpy.common.host_mock import MockHost host = MockHost() else: host = Host() try: port = host.port_factory.get(options.platform, options) except (NotImplementedError, ValueError) as error: # FIXME: is this the best way to handle unsupported port names? print >> stderr, str(error) return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS try: return run(port, options, args, stderr, stdout).exit_code # We need to still handle KeyboardInterrupt, at least for blinkpy unittest cases. except KeyboardInterrupt: return exit_codes.INTERRUPTED_EXIT_STATUS except test_run_results.TestRunException as error: print >> stderr, error.msg return error.code except BaseException as error: if isinstance(error, Exception): print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error) traceback.print_exc(file=stderr) return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
def main(server_constructor, input_fn=None, argv=None, description=None, **kwargs): input_fn = input_fn or raw_input parser = optparse.OptionParser(description=description, formatter=RawTextHelpFormatter()) parser.add_option('--output-dir', type=str, default=None, help='output directory, for log files etc.') parser.add_option('-v', '--verbose', action='store_true', help='print more information, including port numbers') for opt in configuration_options(): parser.add_option(opt) options, _ = parser.parse_args(argv) logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.DEBUG if options.verbose else logging.INFO) host = Host() port_obj = host.port_factory.get(options=options) if not options.output_dir: options.output_dir = port_obj.default_results_directory() # Create the output directory if it doesn't already exist. port_obj.host.filesystem.maybe_make_directory(options.output_dir) server = server_constructor(port_obj, options.output_dir, **kwargs) server.start() try: _ = input_fn('Hit any key to stop the server and exit.') except (KeyboardInterrupt, EOFError): pass server.stop()
def main(args): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--additional-expectations", action="append", help="Paths to additional expectations files for WPT.") parser.add_argument("--android-product", default=None, choices=PRODUCTS, help="Android product argument for wpt runner.") parser.add_argument( '--ignore-default-expectations', action='store_true', help='Do not use the default set of test expectations files.' ' i.e TestExpectations, NeverFixTests, etc...') known_args, rest_args = parser.parse_known_args(args) options = optparse.Values(vars(known_args)) host = Host() if known_args.android_product: port = AndroidPort(host, product=known_args.android_product, options=options) else: port = host.port_factory.get(options=options) expectations = TestExpectations(port) metadata_builder = WPTMetadataBuilder(expectations, port) sys.exit(metadata_builder.run(rest_args))
def main(): host = Host() exporter = TestExporter(host) try: success = exporter.main() host.exit(0 if success else 1) except KeyboardInterrupt: host.print_('Interrupted, exiting') host.exit(exit_codes.INTERRUPTED_EXIT_STATUS)
def get_updater(host=None, args=None): host = host or Host() args = args or [] if ('--update-android-expectations-only' in args or any(arg.startswith('--android-product') for arg in args)): return AndroidWPTExpectationsUpdater(host, args) else: return WPTExpectationsUpdater(host, args)
def test_use_bb_to_get_results(self): actual_mp = {'tests': {'test.html': {'actual': 'PASS'}}} baseline_mp = copy.deepcopy(actual_mp) baseline_mp['tests']['test.html']['actual'] = 'FAIL' host = Host() def process_cmds(cmd_args): if 'token' in cmd_args: return '00000' elif (('weblayer_shell_wpt on ' 'Ubuntu-16.04 or Ubuntu-18.04') in cmd_args): return json.dumps(actual_mp) elif (('chrome_public_wpt on ' 'Ubuntu-16.04 or Ubuntu-18.04') in cmd_args): raise ScriptError('Test Error') elif 'chrome_public_wpt' in cmd_args: return json.dumps(baseline_mp) else: return '{"number": 400, "id":"abcd"}' host.executive = MockExecutive(run_command_fn=process_cmds) with io.StringIO() as csv_out, \ _get_product_test_results(host, 'android_weblayer') as test_results, \ _get_product_test_results(host, 'chrome_android') as baseline_results: actual_results_json = json.loads(test_results.read()) baseline_results_json = json.loads(baseline_results.read()) tests_to_actual_results = {} tests_to_baseline_results = {} map_tests_to_results(tests_to_actual_results, actual_results_json['tests']) map_tests_to_results(tests_to_baseline_results, baseline_results_json['tests']) MockWPTResultsDiffer(tests_to_actual_results, tests_to_baseline_results, csv_out).create_csv() csv_out.seek(0) content = csv_out.read() heading = CSV_HEADING % (TEST_PRODUCT, TEST_BASELINE_PRODUCT) self.assertEquals( content, heading + ('test.html,PASS,FAIL,DIFFERENT RESULTS,' '"{FAIL, TIMEOUT, PASS}","{FAIL, CRASH}",No\n'))
def main(): parser = argparse.ArgumentParser( description='Get stats on WPT usage in Chromium') parser.add_argument('chromium_src', help='Path to the src/ folder of a Chromium checkout') parser.add_argument( '--csv-file', default='wpt-usage.csv', help='CSV file for results; also used to load existing results') parser.add_argument('--since', default='2019-01', help='Month to start at (inclusive)') parser.add_argument('--until', default=datetime.datetime.now().strftime('%Y-%m'), help='Month to end at (exclusive)') args = parser.parse_args() # We depend on the blinkpy library, so temporarily modify sys.path to bring # it in. blink_tools = os.path.join(args.chromium_src, 'third_party', 'blink', 'tools') sys.path.insert(0, blink_tools) from blinkpy.common.host import Host from blinkpy.w3c.chromium_finder import absolute_chromium_dir sys.path.remove(blink_tools) since = args.since until = args.until print('Processing WPT usage from', since, 'until', until) # Get existing CSV data, if any. usage = ChromiumWPTUsageDB(args.csv_file) try: usage.read() since = get_next_month(usage.values()[-1]['date']) print('Found existing CSV file, processing from', since, 'until', until) except (IOError, AssertionError): # Non-fatal error pass if not date_is_before(since, until): print('No data to update, finished!') return host = Host() chromium_dir = absolute_chromium_dir(host) while date_is_before(since, until): print('Getting stats for', since) next_month = get_next_month(since) usage.add(get_stats(host, chromium_dir, since, next_month)) since = next_month usage.write()
def __init__(self, host=None): super(BaseWptScriptAdapter, self).__init__() if not host: host = Host() self.fs = host.filesystem self.port = host.port_factory.get() self.wpt_manifest = self.port.wpt_manifest("external/wpt") # Path to the output of the test run. Comes from the args passed to the # run, parsed after this constructor. Can be overwritten by tests. self.wpt_output = None
def main(self): args = sys.argv[1:] host = Host() stderr = self._engage_awesome_stderr_hacks() # Checking for the verbose flag before calling check_blink_style_parser() # lets us enable verbose logging earlier. is_verbose = '-v' in args or '--verbose' in args checker.configure_logging(stream=stderr, is_verbose=is_verbose) _log.debug('Verbose logging enabled.') parser = checker.check_blink_style_parser() (paths, options) = parser.parse(args) configuration = checker.check_blink_style_configuration(options) paths = change_directory(host.filesystem, checkout_root=host.git().checkout_root, paths=paths) style_processor = StyleProcessor(configuration) file_reader = TextFileReader(host.filesystem, style_processor) if paths and not options.diff_files: file_reader.process_paths(paths) else: changed_files = paths if options.diff_files else None patch = host.git().create_patch(options.git_commit, changed_files=changed_files) patch_checker = PatchReader(file_reader) patch_checker.check(patch) error_count = style_processor.error_count file_count = file_reader.file_count delete_only_file_count = file_reader.delete_only_file_count _log.info('Total errors found: %d in %d files', error_count, file_count) # We fail when style errors are found. return error_count > 0
def main(args): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--additional-expectations", action="append", help="Paths to additional expectations files for WPT.") known_args, rest_args = parser.parse_known_args(args) host = Host() port = host.port_factory.get(options=optparse.Values(vars(known_args))) expectations = TestExpectations(port) output_updater = WPTOutputUpdater(expectations) sys.exit(output_updater.run(rest_args))
def __init__(self, args, actual_results_map, baseline_results_map, csv_output): self._args = args self._host = Host() self._actual_results_map = actual_results_map self._baseline_results_map = baseline_results_map self._csv_output = csv_output self._test_flaky_results = self._get_flaky_test_results( args.product_to_compare) self._baseline_flaky_results = self._get_flaky_test_results( args.baseline_product)
def main(args, stderr): parser = argparse.ArgumentParser( description= 'Start the WebGPU expectations server, then forwards to run_web_tests.py' ) parser.add_argument('--webgpu-cts-expectations', required=True) options, rest_args = parser.parse_known_args(args) web_test_expectations_fd, web_test_expectations_file = mkstemp() forwarded_args = rest_args + [ '--ignore-default-expectations', '--additional-expectations', web_test_expectations_file ] run_web_tests_options = run_web_tests.parse_args(forwarded_args)[0] # Construct a web tests port using the test arguments forwarded to run_web_tests.py # (ex. --platform=android) in order to discover the tags that the web tests harness will # use. This includes the OS, OS version, architecture, etc. platform_tags = Host().port_factory.get( run_web_tests_options.platform, run_web_tests_options).get_platform_tags() with open(options.webgpu_cts_expectations) as f: split_result = split_cts_expectations_and_web_test_expectations( f.read(), platform_tags) # Write the out expectation file for web tests. with open(web_test_expectations_file, 'w') as expectations_out: web_test_exp = split_result['web_test_expectations'] expectations_out.write('# tags: [ ' + ' '.join(web_test_exp['tag_set']) + ' ]\n') expectations_out.write('# results: [ Slow ' + ' '.join(web_test_exp['result_set']) + ' ]\n\n') for exp in web_test_exp['expectations']: expectations_out.write(exp.to_string() + '\n') server = ExpectationsServer(split_result['cts_expectations_js'], ('127.0.0.1', 3000)) print('Starting expectations server...') server.start() try: run_web_tests.main(forwarded_args, stderr) finally: print('Stopping expectations server...') server.stop() os.close(web_test_expectations_fd)
def __init__(self, file_path, handle_style_error, host=None): self._file_path = file_path self._handle_style_error = handle_style_error self._tab_checker = TabChecker(file_path, handle_style_error) # FIXME: host should be a required parameter, not an optional one. host = host or Host() self._port_obj = host.port_factory.get() # Suppress error messages of test_expectations module since they will be reported later. log = logging.getLogger('blinkpy.web_tests.layout_package.test_expectations') log.setLevel(logging.CRITICAL)
def main(argv, stderr): options, args = parse_args(argv) if options.platform and 'test' in options.platform and not 'browser_test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from blinkpy.common.host_mock import MockHost host = MockHost() else: host = Host() printer = printing.Printer(host, options, stderr) try: port = host.port_factory.get(options.platform, options) except (NotImplementedError, ValueError) as error: _log.error(error) printer.cleanup() return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS # Spawn ends up with pickle errors while creating workers on fuchsia. if not six.PY2 and ("fuchsia" not in port.port_name): multiprocessing.set_start_method('spawn') try: return run(port, options, args, printer).exit_code # We need to still handle KeyboardInterrupt, at least for blinkpy unittest cases. except KeyboardInterrupt: return exit_codes.INTERRUPTED_EXIT_STATUS except test_run_results.TestRunException as error: _log.error(error.msg) return error.code except BaseException as error: if isinstance(error, Exception): _log.error('\n%s raised: %s', error.__class__.__name__, error) traceback.print_exc(file=stderr) return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS finally: printer.cleanup()
def main(args): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--additional-expectations", action="append", help="Paths to additional expectations files for WPT.") parser.add_argument("--android-apk", default=None, help="Path to Android APK that is being tested") known_args, rest_args = parser.parse_known_args(args) options = optparse.Values(vars(known_args)) host = Host() if known_args.android_apk: port = AndroidPort(host, apk=known_args.android_apk, options=options) else: port = host.port_factory.get(options=options) expectations = TestExpectations(port) metadata_builder = WPTMetadataBuilder(expectations, port) sys.exit(metadata_builder.run(rest_args))
def test_generate_repaint_overlay_html(self): test_name = 'paint/invalidation/repaint-overlay/layers.html' host = Host() port = host.port_factory.get() layer_tree_file = port.expected_filename(test_name, '.txt') if not layer_tree_file or not host.filesystem.exists(layer_tree_file): # This can happen if the scripts are not in the standard blink directory. return layer_tree = str(host.filesystem.read_text_file(layer_tree_file)) self.assertTrue( repaint_overlay.result_contains_repaint_rects(layer_tree)) overlay_html = ( '<!-- Generated by third_party/blink/tools/run_blinkpy_tests.py\n' + ' test case: TestRepaintOverlay.test_generate_repaint_overlay_html. -->\n' + repaint_overlay.generate_repaint_overlay_html( test_name, layer_tree, layer_tree)) results_directory = port.results_directory() host.filesystem.maybe_make_directory(results_directory) actual_overlay_html_file = host.filesystem.join( results_directory, 'layers-overlay.html') host.filesystem.write_text_file(actual_overlay_html_file, overlay_html) overlay_html_file = port.abspath_for_test( 'paint/invalidation/repaint-overlay/layers-overlay.html') expected = host.filesystem.read_text_file(overlay_html_file) self.assertEquals( expected, overlay_html, 'This failure is probably caused by changed repaint_overlay.py. ' 'Please examine the diffs:\n diff %s %s\n' 'If the diffs are valid, update the file:\n cp %s %s\n' 'then update layers-overlay-expected.html in the same directory if needed,' ' and commit the files together with the changed repaint_overlay.py.' % (overlay_html_file, actual_overlay_html_file, actual_overlay_html_file, overlay_html_file))
def run(self): if not self.host: self.host = Host() if not self._running_inline: self._set_up_logging() worker = self._worker _log.debug('%s starting', self.name) self._running = True try: if hasattr(worker, 'start'): worker.start() while self._running: message = self._messages_to_worker.get() if message.from_user: worker.handle(message.name, message.src, *message.args) self._yield_to_manager() else: assert message.name == 'stop', 'bad message %s' % repr( message) break _log.debug('%s exiting', self.name) except Queue.Empty: assert False, '%s: ran out of messages in worker queue.' % self.name except KeyboardInterrupt: self._raise(sys.exc_info()) except Exception: self._raise(sys.exc_info()) finally: try: if hasattr(worker, 'stop'): worker.stop() finally: self._post(name='done', args=(), from_user=False) self._close()
def main(): host = Host() return TryFlag(sys.argv[1:], host, GitCL(host)).run()
def __init__(self): super(BaseWptScriptAdapter, self).__init__() host = Host() self.port = host.port_factory.get()
def test_check_generate_breakpad_symbols_actually_exists(self): host = Host() dump_reader = DumpReaderMultipart(host, build_dir=None) self.assertTrue( host.filesystem.exists( dump_reader._path_to_generate_breakpad_symbols()))
def main(args): host = Host() port = host.port_factory.get() expectations = TestExpectations(port) metadata_builder = WPTMetadataBuilder(expectations) sys.exit(metadata_builder.run(args))
options = parser.parse_args() env = os.environ total_shards = 1 shard_index = 0 if 'GTEST_TOTAL_SHARDS' in env: total_shards = int(env['GTEST_TOTAL_SHARDS']) if 'GTEST_SHARD_INDEX' in env: shard_index = int(env['GTEST_SHARD_INDEX']) test_shard = TestShard(total_shards, shard_index) test_results = [] log_level = logging.DEBUG if options.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) host = Host() port = host.port_factory.get() path_finder = PathFinder(host.filesystem) # Starts WPT Serve to serve the WPT WebDriver test content. port.start_wptserve() # WebDriverExpectations stores skipped and failed WebDriver tests. expectations = parse_webdriver_expectations(host, port) skipped_tests = preprocess_skipped_tests(test_results, expectations, path_finder) options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver) if (not os.path.exists(options.chromedriver) and util.GetPlatformName() == 'win' and not options.chromedriver.lower().endswith('.exe')):
def main(args): host = Host() port = host.port_factory.get() expectations = TestExpectations(port) output_updater = WPTOutputUpdater(expectations) sys.exit(output_updater.run(args))
def main(argv): host = Host() checker = HistoryChecker(host, argv) return checker.process()
def main(): host = Host() exporter = PrCleanupTool(host) success = exporter.main() host.exit(0 if success else 1)