def main(self, argv=None): """Creates PRs for in-flight CLs and merges changes that land on master. Returns: A boolean: True if success, False if there were any patch failures. """ args = self.parse_args(argv) self.dry_run = args.dry_run configure_logging(logging_level=logging.INFO, include_time=True) credentials = read_credentials(self.host, args.credentials_json) if not (credentials['GH_USER'] and credentials['GH_TOKEN']): _log.error('Must provide both user and token for GitHub.') return False self.wpt_github = self.wpt_github or WPTGitHub(self.host, credentials['GH_USER'], credentials['GH_TOKEN']) self.gerrit = self.gerrit or GerritAPI(self.host, credentials['GERRIT_USER'], credentials['GERRIT_TOKEN']) self.local_wpt = self.local_wpt or LocalWPT(self.host, credentials['GH_TOKEN']) self.local_wpt.fetch() open_gerrit_cls = self.gerrit.query_exportable_open_cls() self.process_gerrit_cls(open_gerrit_cls) exportable_commits, errors = self.get_exportable_commits() for error in errors: _log.warn(error) self.process_chromium_commits(exportable_commits) return not bool(errors)
def main(self, argv=None): """Creates PRs for in-flight CLs and merges changes that land on master. Returns: A boolean: True if success, False if there were any patch failures. """ options = self.parse_args(argv) self.dry_run = options.dry_run log_level = logging.DEBUG if options.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) if options.verbose: # Print out the full output when executive.run_command fails. self.host.executive.error_output_limit = None credentials = read_credentials(self.host, options.credentials_json) if not (credentials.get('GH_USER') and credentials.get('GH_TOKEN')): _log.error('You must provide your GitHub credentials for this ' 'script to work.') _log.error('See https://chromium.googlesource.com/chromium/src' '/+/master/docs/testing/web_platform_tests.md' '#GitHub-credentials for instructions on how to set ' 'your credentials up.') return False self.wpt_github = self.wpt_github or WPTGitHub( self.host, credentials['GH_USER'], credentials['GH_TOKEN']) self.gerrit = self.gerrit or GerritAPI( self.host, credentials['GERRIT_USER'], credentials['GERRIT_TOKEN']) self.local_wpt = self.local_wpt or LocalWPT(self.host, credentials['GH_TOKEN']) self.local_wpt.fetch() _log.info('Searching for exportable in-flight CLs.') # The Gerrit search API is slow and easy to fail, so we wrap it in a try # statement to continue exporting landed commits when it fails. try: open_gerrit_cls = self.gerrit.query_exportable_open_cls() except GerritError as e: _log.info( 'In-flight CLs cannot be exported due to the following error:') _log.error(str(e)) gerrit_error = True else: self.process_gerrit_cls(open_gerrit_cls) gerrit_error = False _log.info('Searching for exportable Chromium commits.') exportable_commits, git_errors = self.get_exportable_commits() self.process_chromium_commits(exportable_commits) if git_errors: _log.info( 'Attention: The following errors have prevented some commits from being ' 'exported:') for error in git_errors: _log.error(error) return not (gerrit_error or git_errors)
def main(): # This is a hack to let us enable DEBUG logging as early as possible. # Note this can't be ternary as versioning.check_version() # hasn't run yet and this python might be older than 2.5. if set(["-v", "--verbose"]).intersection(set(sys.argv)): logging_level = logging.DEBUG else: logging_level = logging.INFO configure_logging(logging_level=logging_level) BlinkTool(os.path.abspath(__file__)).main()
def run(self, args=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.') args = parser.parse_args(args) log_level = logging.DEBUG if args.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) self.update_expectations() return 0
def main(argv, stderr, host=None): parser = optparse.OptionParser(option_list=platform_options( use_globs=True)) parser.add_option('--json', help='Path to JSON output file') parser.add_option( '--verbose', action='store_true', default=False, help='log extra details that may be helpful when debugging') options, _ = parser.parse_args(argv) if not host: if options.platform and 'test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from webkitpy.common.host_mock import MockHost host = MockHost() else: host = Host() if options.verbose: configure_logging(logging_level=logging.DEBUG, stream=stderr) # Print full stdout/stderr when a command fails. host.executive.error_output_limit = None else: # PRESUBMIT.py relies on our output, so don't include timestamps. configure_logging(logging_level=logging.INFO, stream=stderr, include_time=False) try: # Need to generate MANIFEST.json since some expectations correspond to WPT # tests that aren't files and only exist in the manifest. _log.debug('Generating MANIFEST.json for web-platform-tests ...') WPTManifest.ensure_manifest(host) exit_status = run_checks(host, options) except KeyboardInterrupt: exit_status = exit_codes.INTERRUPTED_EXIT_STATUS except Exception as error: # pylint: disable=broad-except print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error) traceback.print_exc(file=stderr) exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS return exit_status
def main(self, argv=None): """Creates PRs for in-flight CLs and merges changes that land on master. Returns: A boolean: True if success, False if there were any patch failures. """ options = self.parse_args(argv) self.dry_run = options.dry_run log_level = logging.DEBUG if options.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) if options.verbose: # Print out the full output when executive.run_command fails. self.host.executive.error_output_limit = None credentials = read_credentials(self.host, options.credentials_json) if not (credentials['GH_USER'] and credentials['GH_TOKEN']): _log.error('Must provide both user and token for GitHub.') return False self.wpt_github = self.wpt_github or WPTGitHub( self.host, credentials['GH_USER'], credentials['GH_TOKEN']) self.gerrit = self.gerrit or GerritAPI( self.host, credentials['GERRIT_USER'], credentials['GERRIT_TOKEN']) self.local_wpt = self.local_wpt or LocalWPT(self.host, credentials['GH_TOKEN']) self.local_wpt.fetch() # The Gerrit search API is slow and easy to fail, so we wrap it in a try # statement to continue exporting landed commits when it fails. try: open_gerrit_cls = self.gerrit.query_exportable_open_cls() except GerritError as e: _log.error(str(e)) gerrit_error = True else: self.process_gerrit_cls(open_gerrit_cls) gerrit_error = False exportable_commits, git_errors = self.get_exportable_commits() for error in git_errors: _log.error(error) self.process_chromium_commits(exportable_commits) return not (gerrit_error or git_errors)
def setUp(self): log_stream = TestLogStream(self) # Use a logger other than the root logger or one prefixed with # "webkitpy." so as not to conflict with test-webkitpy logging. logger = logging.getLogger("unittest") # Configure the test logger not to pass messages along to the # root logger. This prevents test messages from being # propagated to loggers used by test-webkitpy logging (e.g. # the root logger). logger.propagate = False logging_level = self._logging_level() self._handlers = configure_logging(logging_level=logging_level, logger=logger, stream=log_stream) self._log = logger self._log_stream = log_stream
def main(argv): parser = argparse.ArgumentParser() parser.description = """\ Merges sharded layout test results into a single output directory. """ parser.epilog = """\ If a post merge script is given, it will be run on the resulting merged output directory. The script will be given the arguments plus '--results_dir <output_directory>'. """ parser.add_argument( '-v', '--verbose', action='store_true', help='Output information about merging progress.') parser.add_argument( '--results-json-override-value', nargs=2, metavar=('KEY', 'VALUE'), default=[], action='append', help='Override the value of a value in the result style JSON file ' '(--result-jsons-override-value layout_test_dirs /tmp/output).') parser.add_argument( '--results-json-allow-unknown-if-matching', action='store_true', default=False, help='Allow unknown values in the result.json file as long as the ' 'value match on all shards.') parser.add_argument( '--output-directory', help='Directory to create the merged results in.') parser.add_argument( '--allow-existing-output-directory', action='store_true', default=False, help='Allow merging results into a directory which already exists.') parser.add_argument( '--remove-existing-output-directory', action='store_true', default=False, help='Remove merging results into a directory which already exists.') parser.add_argument( '--input-directories', nargs='+', help='Directories to merge the results from.') # Swarming Isolated Merge Script API # script.py \ # --build-properties /s/build.json \ # --output-json /tmp/output.json \ # --task-output-dir /path/to/task/output/dir \ # shard0/output.json \ # shard1/output.json parser.add_argument( '-o', '--output-json', help='(Swarming Isolated Merge Script API) Output JSON file to create.') parser.add_argument( '--build-properties', help='(Swarming Isolated Merge Script API) Build property JSON file provided by recipes.') parser.add_argument( '--task-output-dir', help='(Swarming Isolated Merge Script API) Directory containing all swarming task results.') parser.add_argument( '--results-json-override-with-build-property', nargs=2, metavar=('RESULT_JSON_KEY', 'BUILD_PROPERTY_KEY'), default=[], action='append', help='Override the value of a value in the result style JSON file ' '(--result-jsons-override-value layout_test_dirs /tmp/output).') parser.add_argument( '--summary-json', help='(Swarming Isolated Merge Script API) Summary of shard state running on swarming.' '(Output of the swarming.py collect --task-summary-json=XXX command.)') # Script to run after merging the directories together. Normally used with archive_layout_test_results.py # scripts/slave/chromium/archive_layout_test_results.py \ # --results-dir /b/rr/tmpIcChUS/w/layout-test-results \ # --build-dir /b/rr/tmpIcChUS/w/src/out \ # --build-number 3665 \ # --builder-name 'WebKit Linux - RandomOrder' \ # --gs-bucket gs://chromium-layout-test-archives \ # --staging-dir /b/c/chrome_staging \ # --slave-utils-gsutil-py-path /b/rr/tmpIcChUS/rw/scripts/slave/.recipe_deps/depot_tools/gsutil.py # in dir /b/rr/tmpIcChUS/w parser.add_argument( '--post-merge-script', nargs='*', help='Script to call after the results have been merged.') # The position arguments depend on if we are using the isolated merge # script API mode or not. parser.add_argument( 'positional', nargs='*', help='output.json from shards.') args = parser.parse_args(argv) if args.verbose: logging_level = logging.DEBUG else: logging_level = logging.INFO configure_logging(logging_level=logging_level) # Map the isolate arguments back to our output / input arguments. if args.output_json: logging.info('Running with isolated arguments') assert args.positional # TODO(tansell): Once removed everywhere, these lines can be removed. # For now we just check nobody is supply arguments we didn't expect. if args.results_json_override_with_build_property: for result_key, build_prop_key in args.results_json_override_with_build_property: assert (result_key, build_prop_key) in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY, ( "%s not in %s" % (result_key, RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY)) if not args.output_directory: args.output_directory = os.getcwd() args.allow_existing_output_directory = True args.remove_existing_output_directory = True assert not args.input_directories args.input_directories = [os.path.dirname(f) for f in args.positional] args.positional = [] # Allow skipping the --input-directories bit, for example, # merge-layout-test-results -o outputdir shard0 shard1 shard2 if args.positional and not args.input_directories: args.input_directories = args.positional if not args.output_directory: args.output_directory = tempfile.mkdtemp(suffix='webkit_layout_test_results.') assert args.output_directory assert args.input_directories results_json_value_overrides = {} if args.build_properties: build_properties = json.loads(args.build_properties) for result_key, build_prop_key in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY: if build_prop_key not in build_properties: logging.warn('Required build property key "%s" was not found!', build_prop_key) continue results_json_value_overrides[result_key] = build_properties[build_prop_key] logging.debug('results_json_value_overrides: %r', results_json_value_overrides) merger = LayoutTestDirMerger( results_json_value_overrides=results_json_value_overrides, results_json_allow_unknown_if_matching=args.results_json_allow_unknown_if_matching) ensure_empty_dir( FileSystem(), args.output_directory, allow_existing=args.allow_existing_output_directory, remove_existing=args.remove_existing_output_directory) merger.merge(args.output_directory, args.input_directories) merged_output_json = os.path.join(args.output_directory, 'output.json') if os.path.exists(merged_output_json) and args.output_json: logging.debug( 'Copying output.json from %s to %s', merged_output_json, args.output_json) shutil.copyfile(merged_output_json, args.output_json) if args.post_merge_script: logging.debug('Changing directory to %s', args.output_directory) os.chdir(args.output_directory) post_script = list(args.post_merge_script) post_script.append('--result-dir', args.output_directory) logging.info('Running post merge script %r', post_script) os.execlp(post_script)
def main(self, argv=None): # TODO(robertma): Test this method! Split it to make it easier to test # if necessary. options = self.parse_args(argv) self.verbose = options.verbose log_level = logging.DEBUG if self.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) if options.verbose: # Print out the full output when executive.run_command fails. self.host.executive.error_output_limit = None if not self.checkout_is_okay(): return 1 credentials = read_credentials(self.host, options.credentials_json) gh_user = credentials.get('GH_USER') gh_token = credentials.get('GH_TOKEN') if not gh_user or not gh_token: _log.warning('You have not set your GitHub credentials. This ' 'script may fail with a network error when making ' 'an API request to GitHub.') _log.warning('See https://chromium.googlesource.com/chromium/src' '/+/master/docs/testing/web_platform_tests.md' '#GitHub-credentials for instructions on how to set ' 'your credentials up.') self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user, gh_token) self.git_cl = GitCL( self.host, auth_refresh_token_json=options.auth_refresh_token_json) _log.debug('Noting the current Chromium revision.') chromium_revision = self.chromium_git.latest_git_commit() # Instantiate Git after local_wpt.fetch() to make sure the path exists. local_wpt = LocalWPT(self.host, gh_token=gh_token) local_wpt.fetch() self.wpt_git = self.host.git(local_wpt.path) if options.revision is not None: _log.info('Checking out %s', options.revision) self.wpt_git.run(['checkout', options.revision]) _log.debug('Noting the revision we are importing.') self.wpt_revision = self.wpt_git.latest_git_commit() self.last_wpt_revision = self._get_last_imported_wpt_revision() import_commit = 'wpt@%s' % self.wpt_revision _log.info('Importing %s to Chromium %s', import_commit, chromium_revision) if options.ignore_exportable_commits: commit_message = self._commit_message(chromium_revision, import_commit) else: commits = self.apply_exportable_commits_locally(local_wpt) if commits is None: _log.error('Could not apply some exportable commits cleanly.') _log.error('Aborting import to prevent clobbering commits.') return 1 commit_message = self._commit_message( chromium_revision, import_commit, locally_applied_commits=commits) self._clear_out_dest_path() _log.info('Copying the tests from the temp repo to the destination.') test_copier = TestCopier(self.host, local_wpt.path) test_copier.do_import() # TODO(robertma): Implement `add --all` in Git (it is different from `commit --all`). self.chromium_git.run(['add', '--all', self.dest_path]) self._generate_manifest() # TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug. # self._delete_orphaned_baselines() # TODO(qyearsley): Consider running the imported tests with # `run-webkit-tests --reset-results external/wpt` to get some baselines # before the try jobs are started. _log.info( 'Updating TestExpectations for any removed or renamed tests.') self.update_all_test_expectations_files(self._list_deleted_tests(), self._list_renamed_tests()) if not self.chromium_git.has_working_directory_changes(): _log.info('Done: no changes to import.') return 0 if self._only_wpt_manifest_changed(): _log.info( 'Only WPT_BASE_MANIFEST.json was updated; skipping the import.' ) return 0 self._commit_changes(commit_message) _log.info('Changes imported and committed.') if not options.auto_update: return 0 self._upload_cl() _log.info('Issue: %s', self.git_cl.run(['issue']).strip()) if not self.update_expectations_for_cl(): return 1 if not self.run_commit_queue_for_cl(): return 1 if not self.send_notifications(local_wpt, options.auto_file_bugs, options.monorail_auth_json): return 1 return 0
def main(self, argv=None): options = self.parse_args(argv) self.verbose = options.verbose log_level = logging.DEBUG if self.verbose else logging.INFO configure_logging(logging_level=log_level, include_time=True) if not self.checkout_is_okay(): return 1 credentials = read_credentials(self.host, options.credentials_json) gh_user = credentials.get('GH_USER') gh_token = credentials.get('GH_TOKEN') self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user, gh_token) local_wpt = LocalWPT(self.host, gh_token=gh_token) self.git_cl = GitCL( self.host, auth_refresh_token_json=options.auth_refresh_token_json) _log.debug('Noting the current Chromium commit.') # TODO(qyearsley): Use Git (self.host.git) to run git commands. _, show_ref_output = self.run( ['git', 'show-ref', '--verify', '--head', '--hash', 'HEAD']) chromium_commit = show_ref_output.strip() local_wpt.fetch() if options.revision is not None: _log.info('Checking out %s', options.revision) self.run(['git', 'checkout', options.revision], cwd=local_wpt.path) _log.debug('Noting the revision we are importing.') _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'], cwd=local_wpt.path) import_commit = 'wpt@%s' % show_ref_output.split()[0] _log.info('Importing %s to Chromium %s', import_commit, chromium_commit) commit_message = self._commit_message(chromium_commit, import_commit) if not options.ignore_exportable_commits: commits = self.apply_exportable_commits_locally(local_wpt) if commits is None: _log.error('Could not apply some exportable commits cleanly.') _log.error('Aborting import to prevent clobbering commits.') return 1 commit_message = self._commit_message( chromium_commit, import_commit, locally_applied_commits=commits) self._clear_out_dest_path() _log.info('Copying the tests from the temp repo to the destination.') test_copier = TestCopier(self.host, local_wpt.path) test_copier.do_import() self.run(['git', 'add', '--all', 'external/wpt']) self._generate_manifest() self._delete_orphaned_baselines() # TODO(qyearsley): Consider running the imported tests with # `run-webkit-tests --reset-results external/wpt` to get some baselines # before the try jobs are started. _log.info( 'Updating TestExpectations for any removed or renamed tests.') self.update_all_test_expectations_files(self._list_deleted_tests(), self._list_renamed_tests()) has_changes = self._has_changes() if not has_changes: _log.info('Done: no changes to import.') return 0 self._commit_changes(commit_message) _log.info('Changes imported and committed.') if not options.auto_update: return 0 self._upload_cl() _log.info('Issue: %s', self.git_cl.run(['issue']).strip()) if not self.update_expectations_for_cl(): return 1 if not self.run_commit_queue_for_cl(): return 1 return 0