Ejemplo n.º 1
0
class TestImporter(object):
    def __init__(self, host, wpt_github=None):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = PathFinder(self.fs)
        self.verbose = False
        self.git_cl = None
        self.wpt_github = wpt_github

    def main(self, argv=None):
        options = self.parse_args(argv)

        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        if not self.checkout_is_okay():
            return 1

        credentials = read_credentials(self.host, options.credentials_json)
        gh_user = credentials.get('GH_USER')
        gh_token = credentials.get('GH_TOKEN')
        self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
                                                       gh_token)
        local_wpt = LocalWPT(self.host, gh_token=gh_token)
        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.debug('Noting the current Chromium commit.')
        # TODO(qyearsley): Use Git (self.host.git) to run git commands.
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commit = show_ref_output.split()[0]

        local_wpt.fetch()

        if options.revision is not None:
            _log.info('Checking out %s', options.revision)
            self.run(['git', 'checkout', options.revision], cwd=local_wpt.path)

        _log.info('Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'],
                                      cwd=local_wpt.path)
        import_commit = 'wpt@%s' % show_ref_output.split()[0]

        commit_message = self._commit_message(chromium_commit, import_commit)

        if not options.ignore_exportable_commits:
            commits = self.apply_exportable_commits_locally(local_wpt)
            if commits is None:
                _log.error('Could not apply some exportable commits cleanly.')
                _log.error('Aborting import to prevent clobbering commits.')
                return 1
            commit_message = self._commit_message(
                chromium_commit,
                import_commit,
                locally_applied_commits=commits)

        dest_path = self.finder.path_from_layout_tests('external', 'wpt')

        self._clear_out_dest_path(dest_path)

        _log.info('Copying the tests from the temp repo to the destination.')
        test_copier = TestCopier(self.host, local_wpt.path)
        test_copier.do_import()

        self.run(['git', 'add', '--all', 'external/wpt'])

        self._delete_orphaned_baselines(dest_path)

        # TODO(qyearsley): Consider updating manifest after adding baselines.
        self._generate_manifest(dest_path)

        # TODO(qyearsley): Consider running the imported tests with
        # `run-webkit-tests --reset-results external/wpt` to get some baselines
        # before the try jobs are started.

        _log.info(
            'Updating TestExpectations for any removed or renamed tests.')
        self.update_all_test_expectations_files(self._list_deleted_tests(),
                                                self._list_renamed_tests())

        has_changes = self._has_changes()
        if not has_changes:
            _log.info('Done: no changes to import.')
            return 0

        self.run(['git', 'commit', '--all', '-F', '-'], stdin=commit_message)
        self._commit_changes(commit_message)
        _log.info('Changes imported and committed.')

        if not options.auto_update:
            return 0

        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        if not self.update_expectations_for_cl():
            return 1

        if not self.run_commit_queue_for_cl():
            return 1

        return 0

    def update_expectations_for_cl(self):
        """Performs the expectation-updating part of an auto-import job.

        This includes triggering try jobs and waiting; then, if applicable,
        writing new baselines and TestExpectation lines, committing, and
        uploading a new patchset.

        This assumes that there is CL associated with the current branch.

        Returns True if everything is OK to continue, or False on failure.
        """
        _log.info('Triggering try jobs for updating expectations.')
        self.git_cl.trigger_try_jobs()
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            _log.error('No initial try job results, aborting.')
            self.git_cl.run(['set-close'])
            return False

        if try_results and self.git_cl.some_failed(try_results):
            self.fetch_new_expectations_and_baselines()
            if self.host.git().has_working_directory_changes():
                message = 'Update test expectations and baselines.'
                self.check_run(['git', 'commit', '-a', '-m', message])
                self._upload_patchset(message)
        return True

    def run_commit_queue_for_cl(self):
        """Triggers CQ and either commits or aborts; returns True on success."""
        _log.info('Triggering CQ try jobs.')
        self.git_cl.run(['try'])
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)
        try_results = self.git_cl.filter_latest(try_results)

        if not try_results:
            self.git_cl.run(['set-close'])
            _log.error('No CQ try job results, aborting.')
            return False

        if try_results and self.git_cl.all_success(try_results):
            _log.info('CQ appears to have passed; trying to commit.')
            self.git_cl.run(['upload', '-f',
                             '--send-mail'])  # Turn off WIP mode.
            self.git_cl.run(['set-commit'])
            _log.info('Update completed.')
            return True

        self.git_cl.run(['set-close'])
        _log.error('CQ appears to have failed; aborting.')
        return False

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument(
            '-v',
            '--verbose',
            action='store_true',
            help='log extra details that may be helpful when debugging')
        parser.add_argument(
            '--ignore-exportable-commits',
            action='store_true',
            help='do not check for exportable commits that would be clobbered')
        parser.add_argument('-r', '--revision', help='target wpt revision')
        parser.add_argument(
            '--auto-update',
            action='store_true',
            help='upload a CL, update expectations, and trigger CQ')
        parser.add_argument(
            '--auth-refresh-token-json',
            help='authentication refresh token JSON file used for try jobs, '
            'generally not necessary on developer machines')
        parser.add_argument('--credentials-json',
                            help='A JSON file with GitHub credentials, '
                            'generally not necessary on developer machines')

        return parser.parse_args(argv)

    def checkout_is_okay(self):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                       exit_on_failure=False)
        if git_diff_retcode:
            _log.warning('Checkout is dirty; aborting.')
            return False
        _, local_commits = self.run(
            ['git', 'log', '--oneline', 'origin/master..HEAD'])
        if local_commits:
            _log.warning('Checkout has local commits before import.')
        return True

    def apply_exportable_commits_locally(self, local_wpt):
        """Applies exportable Chromium changes to the local WPT repo.

        The purpose of this is to avoid clobbering changes that were made in
        Chromium but not yet merged upstream. By applying these changes to the
        local copy of web-platform-tests before copying files over, we make
        it so that the resulting change in Chromium doesn't undo the
        previous Chromium change.

        Args:
            A LocalWPT instance for our local copy of WPT.

        Returns:
            A list of commits applied (could be empty), or None if any
            of the patches could not be applied cleanly.
        """
        commits = self.exportable_but_not_exported_commits(local_wpt)
        for commit in commits:
            _log.info('Applying exportable commit locally:')
            _log.info(commit.url())
            _log.info('Subject: %s', commit.subject().strip())
            # TODO(qyearsley): We probably don't need to know about
            # corresponding PRs at all anymore, although this information
            # could still be useful for reference.
            pull_request = self.wpt_github.pr_for_chromium_commit(commit)
            if pull_request:
                _log.info(
                    'PR: https://github.com/w3c/web-platform-tests/pull/%d',
                    pull_request.number)
            else:
                _log.warning('No pull request found.')
            applied = local_wpt.apply_patch(commit.format_patch())
            if not applied:
                return None
            self.run(['git', 'commit', '--all', '-F', '-'],
                     stdin='Applying patch %s' % commit.sha,
                     cwd=local_wpt.path)
        return commits

    def exportable_but_not_exported_commits(self, local_wpt):
        """Returns a list of commits that would be clobbered by importer."""
        return exportable_commits_over_last_n_commits(self.host, local_wpt,
                                                      self.wpt_github)

    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, dest_path)
        manifest_path = self.fs.join(dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(dest_path, '..', 'WPT_BASE_MANIFEST.json'))
        self.copyfile(manifest_path, manifest_base_path)
        self.run(['git', 'add', manifest_base_path])

    def _clear_out_dest_path(self, dest_path):
        _log.info('Cleaning out tests from %s.', dest_path)
        should_remove = lambda fs, dirname, basename: (
            not self.is_baseline(basename) and
            # See http://crbug.com/702283 for context.
            basename != 'OWNERS')
        files_to_delete = self.fs.files_under(dest_path,
                                              file_filter=should_remove)
        for subpath in files_to_delete:
            self.remove(self.finder.path_from_layout_tests(
                'external', subpath))

    def _commit_changes(self, commit_message):
        _log.info('Committing changes.')
        self.run(['git', 'commit', '--all', '-F', '-'], stdin=commit_message)

    def _has_changes(self):
        return_code, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                  exit_on_failure=False)
        return return_code == 1

    def _commit_message(self,
                        chromium_commit_sha,
                        import_commit_sha,
                        locally_applied_commits=None):
        message = 'Import {}\n\nUsing wpt-import in Chromium {}.\n'.format(
            import_commit_sha, chromium_commit_sha)
        if locally_applied_commits is not None:
            message += 'With Chromium commits locally applied on WPT:\n'
            message += '\n'.join(
                str(commit) for commit in locally_applied_commits)
        message += '\nNo-Export: true'
        return message

    def _delete_orphaned_baselines(self, dest_path):
        _log.info('Deleting any orphaned baselines.')
        is_baseline_filter = lambda fs, dirname, basename: self.is_baseline(
            basename)
        previous_baselines = self.fs.files_under(
            dest_path, file_filter=is_baseline_filter)
        for sub_path in previous_baselines:
            full_baseline_path = self.fs.join(dest_path, sub_path)
            if not self._has_corresponding_test(full_baseline_path):
                self.fs.remove(full_baseline_path)

    def _has_corresponding_test(self, full_baseline_path):
        base = full_baseline_path.replace('-expected.txt', '')
        return any(
            self.fs.exists(base + ext)
            for ext in Port.supported_file_extensions)

    @staticmethod
    def is_baseline(basename):
        # TODO(qyearsley): Find a better, centralized place for this.
        # Also, the name for this method should be is_text_baseline.
        return basename.endswith('-expected.txt')

    def run(self, cmd, exit_on_failure=True, cwd=None, stdin=''):
        _log.debug('Running command: %s', ' '.join(cmd))

        cwd = cwd or self.finder.path_from_layout_tests()
        proc = self.executive.popen(cmd,
                                    stdout=self.executive.PIPE,
                                    stderr=self.executive.PIPE,
                                    stdin=self.executive.PIPE,
                                    cwd=cwd)
        out, err = proc.communicate(stdin)
        if proc.returncode or self.verbose:
            _log.info('# ret> %d', proc.returncode)
            if out:
                for line in out.splitlines():
                    _log.info('# out> %s', line)
            if err:
                for line in err.splitlines():
                    _log.info('# err> %s', line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command),
                            return_code)
        return out

    def copyfile(self, source, destination):
        _log.debug('cp %s %s', source, destination)
        self.fs.copyfile(source, destination)

    def remove(self, dest):
        _log.debug('rm %s', dest)
        self.fs.remove(dest)

    def _upload_patchset(self, message):
        self.git_cl.run(['upload', '-f', '-t', message, '--gerrit'])

    def _upload_cl(self):
        _log.info('Uploading change list.')
        directory_owners = self.get_directory_owners()
        description = self._cl_description(directory_owners)
        self.git_cl.run([
            'upload',
            '-f',
            '--gerrit',
            '-m',
            description,
            '--tbrs',
            '*****@*****.**',
        ] + self._cc_part(directory_owners))

    @staticmethod
    def _cc_part(directory_owners):
        cc_part = []
        for owner_tuple in sorted(directory_owners):
            cc_part.extend('--cc=' + owner for owner in owner_tuple)
        return cc_part

    def get_directory_owners(self):
        """Returns a mapping of email addresses to owners of changed tests."""
        _log.info('Gathering directory owners emails to CC.')
        changed_files = self.host.git().changed_files()
        extractor = DirectoryOwnersExtractor(self.fs)
        extractor.read_owner_map()
        return extractor.list_owners(changed_files)

    def _cl_description(self, directory_owners):
        """Returns a CL description string.

        Args:
            directory_owners: A dict of tuples of owner names to lists of directories.
        """
        description = self.check_run(['git', 'log', '-1', '--format=%B'])
        build_link = current_build_link(self.host)
        if build_link:
            description += 'Build: %s\n\n' % build_link

        description += (
            'Note to sheriffs: This CL imports external tests and adds\n'
            'expectations for those tests; if this CL is large and causes\n'
            'a few new failures, please fix the failures by adding new\n'
            'lines to TestExpectations rather than reverting. See:\n'
            'https://chromium.googlesource.com'
            '/chromium/src/+/master/docs/testing/web_platform_tests.md\n\n')

        if directory_owners:
            description += self._format_directory_owners(
                directory_owners) + '\n\n'

        # Move any No-Export tag to the end of the description.
        description = description.replace('No-Export: true', '')
        description = description.replace('\n\n\n\n', '\n\n')
        description += 'No-Export: true'
        return description

    @staticmethod
    def _format_directory_owners(directory_owners):
        message_lines = ['Directory owners for changes in this CL:']
        for owner_tuple, directories in sorted(directory_owners.items()):
            message_lines.append(', '.join(owner_tuple) + ':')
            message_lines.extend('  ' + d for d in directories)
        return '\n'.join(message_lines)

    def fetch_new_expectations_and_baselines(self):
        """Modifies expectation lines and baselines based on try job results.

        Assuming that there are some try job results available, this
        adds new expectation lines to TestExpectations and downloads new
        baselines based on the try job results.

        This is the same as invoking the `wpt-update-expectations` script.
        """
        _log.info(
            'Adding test expectations lines to LayoutTests/TestExpectations.')
        expectation_updater = WPTExpectationsUpdater(self.host)
        expectation_updater.run(args=[])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed.

        This is only for deleted or renamed tests in the initial import,
        not for tests that have failures in try jobs.
        """
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():
            parser = TestExpectationParser(port,
                                           all_tests=None,
                                           is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines,
                                                       deleted_tests,
                                                       renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines,
                                              deleted_tests, renamed_tests):
        """Updates a single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test
        # expectations that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(
            new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """List of layout tests that have been deleted."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=D',
            '--name-only'
        ])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Lists tests that have been renamed.

        Returns a dict mapping source name to destination name.
        """
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=R',
            '--name-status'
        ])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Ejemplo n.º 2
0
class TestImporter(object):

    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = PathFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.debug('Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commit = show_ref_output.split()[0]

        dest_dir_name = WPT_DEST_NAME
        repo_url = WPT_REPO_URL

        # TODO(qyearsley): Simplify this to use LocalWPT.fetch when csswg-test
        # is merged into web-platform-tests (crbug.com/706118).
        temp_repo_path = self.finder.path_from_layout_tests(dest_dir_name)
        _log.info('Cloning repo: %s', repo_url)
        _log.info('Local path: %s', temp_repo_path)
        self.run(['git', 'clone', repo_url, temp_repo_path])

        if not options.ignore_exportable_commits:
            commits = self.exportable_but_not_exported_commits(temp_repo_path)
            if commits:
                # If there are exportable commits, then there's no more work
                # to do for now. This isn't really an error case; we expect
                # to hit this case some of the time.
                _log.info('There were exportable but not-yet-exported commits:')
                for commit in commits:
                    _log.info('  https://chromium.googlesource.com/chromium/src/+/%s', commit.sha)
                _log.info('Aborting import to prevent clobbering these commits.')
                self.clean_up_temp_repo(temp_repo_path)
                return 0

        import_commit = self.update(dest_dir_name, temp_repo_path, options.revision)

        self.clean_up_temp_repo(temp_repo_path)

        self._copy_resources()

        has_changes = self._has_changes()
        if not has_changes:
            _log.info('Done: no changes to import.')
            return 0

        commit_message = self._commit_message(chromium_commit, import_commit)
        self._commit_changes(commit_message)
        _log.info('Done: changes imported and committed.')

        if options.auto_update:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v', '--verbose', action='store_true',
                            help='log what we are doing')
        parser.add_argument('--allow-local-commits', action='store_true',
                            help='allow script to run even if we have local commits')
        parser.add_argument('-r', dest='revision', action='store',
                            help='Target revision.')
        parser.add_argument('--auto-update', action='store_true',
                            help='uploads CL and initiates commit queue.')
        parser.add_argument('--auth-refresh-token-json',
                            help='authentication refresh token JSON file, '
                                 'used for authentication for try jobs, '
                                 'generally not necessary on developer machines')
        parser.add_argument('--ignore-exportable-commits', action='store_true',
                            help='Continue even if there are exportable commits that may be overwritten.')
        # TODO(qyearsley): Change this back to parse_args once this script
        # is no longer being called with the "wpt" argument. See crbug.com/706118.
        args, _ = parser.parse_known_args(argv)
        return args

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)
        if git_diff_retcode:
            _log.warning('Checkout is dirty; aborting.')
            return False

        local_commits = self.run(['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not allow_local_commits:
            _log.warning('Checkout has local commits; aborting. Use --allow-local-commits to allow this.')
            return False

        temp_repo_path = self.finder.path_from_layout_tests(WPT_DEST_NAME)
        if self.fs.exists(temp_repo_path):
            _log.warning('%s exists; aborting.', temp_repo_path)
            return False

        return True

    def exportable_but_not_exported_commits(self, wpt_path):
        """Checks for commits that might be overwritten by importing.

        Args:
            wpt_path: The path to a local checkout of web-platform-tests.

        Returns:
            A list of commits in the Chromium repo that are exportable
            but not yet exported to the web-platform-tests repo.
        """
        local_wpt = LocalWPT(self.host, path=wpt_path)
        assert self.host.filesystem.exists(wpt_path)
        _, chromium_commit = local_wpt.most_recent_chromium_commit()
        return exportable_commits_since(chromium_commit.sha, self.host, local_wpt)

    def clean_up_temp_repo(self, temp_repo_path):
        _log.info('Deleting temp repo directory %s.', temp_repo_path)
        self.fs.rmtree(temp_repo_path)

    def _copy_resources(self):
        """Copies resources from wpt to LayoutTests/resources.

        We copy idlharness.js and testharness.js in wpt to LayoutTests/resources
        in order to use them in non-imported tests.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        resources_to_copy_from_wpt = [
            ('idlharness.js', 'resources'),
            ('testharness.js', 'resources'),
        ]
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.finder.path_from_layout_tests('external', WPT_DEST_NAME, wpt_subdir, filename)
            destination = self.finder.path_from_layout_tests('resources', filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])

    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, dest_path)
        manifest_path = self.fs.join(dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(dest_path, '..', 'WPT_BASE_MANIFEST.json'))
        self.copyfile(manifest_path, manifest_base_path)
        self.run(['git', 'add', manifest_base_path])

    def update(self, dest_dir_name, temp_repo_path, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            temp_repo_path: Path to local checkout of W3C test repo.
            revision: A W3C test repo commit hash, or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        if revision is not None:
            _log.info('Checking out %s', revision)
            self.run(['git', 'checkout', revision], cwd=temp_repo_path)

        self.run(['git', 'submodule', 'update', '--init', '--recursive'], cwd=temp_repo_path)

        _log.info('Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'], cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        dest_path = self.finder.path_from_layout_tests('external', dest_dir_name)
        self._clear_out_dest_path(dest_path)

        _log.info('Importing the tests.')
        test_copier = TestCopier(self.host, temp_repo_path)
        test_copier.do_import()

        self.run(['git', 'add', '--all', 'external/%s' % dest_dir_name])

        self._delete_orphaned_baselines(dest_path)

        self._generate_manifest(dest_path)

        _log.info('Updating TestExpectations for any removed or renamed tests.')
        self.update_all_test_expectations_files(self._list_deleted_tests(), self._list_renamed_tests())

        return '%s@%s' % (dest_dir_name, master_commitish)

    def _clear_out_dest_path(self, dest_path):
        _log.info('Cleaning out tests from %s.', dest_path)
        should_remove = lambda fs, dirname, basename: (
            not self.is_baseline(basename) and
            # See http://crbug.com/702283 for context.
            basename != 'OWNERS')
        files_to_delete = self.fs.files_under(dest_path, file_filter=should_remove)
        for subpath in files_to_delete:
            self.remove(self.finder.path_from_layout_tests('external', subpath))

    def _commit_changes(self, commit_message):
        _log.info('Committing changes.')
        self.run(['git', 'commit', '--all', '-F', '-'], stdin=commit_message)

    def _has_changes(self):
        return_code, _ = self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)
        return return_code == 1

    def _commit_message(self, chromium_commit, import_commit):
        return ('Import %s\n\n'
                'Using wpt-import in Chromium %s.\n\n'
                'NOEXPORT=true' %
                (import_commit, chromium_commit))

    def _delete_orphaned_baselines(self, dest_path):
        _log.info('Deleting any orphaned baselines.')
        is_baseline_filter = lambda fs, dirname, basename: self.is_baseline(basename)
        previous_baselines = self.fs.files_under(dest_path, file_filter=is_baseline_filter)
        for sub_path in previous_baselines:
            full_baseline_path = self.fs.join(dest_path, sub_path)
            if not self._has_corresponding_test(full_baseline_path):
                self.fs.remove(full_baseline_path)

    def _has_corresponding_test(self, full_baseline_path):
        base = full_baseline_path.replace('-expected.txt', '')
        return any(self.fs.exists(base + ext) for ext in Port.supported_file_extensions)

    @staticmethod
    def is_baseline(basename):
        # TODO(qyearsley): Find a better, centralized place for this.
        # Also, the name for this method should be is_text_baseline.
        return basename.endswith('-expected.txt')

    def run(self, cmd, exit_on_failure=True, cwd=None, stdin=''):
        _log.debug('Running command: %s', ' '.join(cmd))

        cwd = cwd or self.finder.path_from_layout_tests()
        proc = self.executive.popen(cmd, stdout=self.executive.PIPE, stderr=self.executive.PIPE, stdin=self.executive.PIPE, cwd=cwd)
        out, err = proc.communicate(stdin)
        if proc.returncode or self.verbose:
            _log.info('# ret> %d', proc.returncode)
            if out:
                for line in out.splitlines():
                    _log.info('# out> %s', line)
            if err:
                for line in err.splitlines():
                    _log.info('# err> %s', line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command), return_code)
        return out

    def copyfile(self, source, destination):
        _log.debug('cp %s %s', source, destination)
        self.fs.copyfile(source, destination)

    def remove(self, dest):
        _log.debug('rm %s', dest)
        self.fs.remove(dest)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        # First, try on Blink try bots in order to get any new baselines.
        # TODO(qyearsley): Make this faster by triggering all try jobs in
        # one invocation.
        _log.info('Triggering try jobs.')
        self.git_cl.trigger_try_jobs()
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS, timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            self.git_cl.run(['set-close'])
            return False

        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()
            message = 'Update test expectations and baselines.'
            self.check_run(['git', 'commit', '-a', '-m', message])
            self._upload_patchset(message)

        # Trigger CQ and wait for CQ try jobs to finish.
        self.git_cl.run(['set-commit', '--gerrit'])
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS, timeout_seconds=TIMEOUT_SECONDS)

        _log.info('Try results: %s', try_results)

        # If the CQ passed, then the issue will be closed already.
        status = self.git_cl.run(['status', '--field', 'status']).strip()
        _log.info('CL status: "%s"', status)
        if status not in ('lgtm', 'closed'):
            _log.error('CQ appears to have failed; aborting.')
            self.git_cl.run(['set-close'])
            return False

        _log.info('Update completed.')
        return True

    def _upload_cl(self):
        _log.info('Uploading change list.')
        directory_owners = self.get_directory_owners()
        description = self._cl_description(directory_owners)
        self.git_cl.run([
            'upload',
            '-f',
            '--gerrit',
            '-m',
            description,
        ] + self._cc_part(directory_owners))

    def _upload_patchset(self, message):
        self.git_cl.run(['upload', '-f', '-t', message, '--gerrit'])

    @staticmethod
    def _cc_part(directory_owners):
        cc_part = []
        for owner_tuple in sorted(directory_owners):
            cc_part.extend('--cc=' + owner for owner in owner_tuple)
        return cc_part

    def get_directory_owners(self):
        """Returns a mapping of email addresses to owners of changed tests."""
        _log.info('Gathering directory owners emails to CC.')
        changed_files = self.host.git().changed_files()
        extractor = DirectoryOwnersExtractor(self.fs)
        extractor.read_owner_map()
        return extractor.list_owners(changed_files)

    def _cl_description(self, directory_owners):
        """Returns a CL description string.

        Args:
            directory_owners: A dict of tuples of owner names to lists of directories.
        """
        description = self.check_run(['git', 'log', '-1', '--format=%B'])
        build_link = current_build_link(self.host)
        if build_link:
            description += 'Build: %s\n\n' % build_link

        description += (
            'Background: https://chromium.googlesource.com'
            '/chromium/src/+/master/docs/testing/web_platform_tests.md\n\n'
            'Note to sheriffs: If this CL causes a small number of new layout\n'
            'test failures, it may be easier to add lines to TestExpectations\n'
            'rather than reverting.\n')

        if directory_owners:
            description += self._format_directory_owners(directory_owners) + '\n\n'
        description += '[email protected]\n'

        # Move any NOEXPORT tag to the end of the description.
        description = description.replace('NOEXPORT=true', '')
        description = description.replace('\n\n\n\n', '\n\n')
        description += 'NOEXPORT=true'
        return description

    @staticmethod
    def _format_directory_owners(directory_owners):
        message_lines = ['Directory owners for changes in this CL:']
        for owner_tuple, directories in sorted(directory_owners.items()):
            message_lines.append(', '.join(owner_tuple) + ':')
            message_lines.extend('  ' + d for d in directories)
        return '\n'.join(message_lines)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        _log.info('Adding test expectations lines to LayoutTests/TestExpectations.')
        expectation_updater = WPTExpectationsUpdater(self.host)
        expectation_updater.run(args=[])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():
            parser = TestExpectationParser(port, all_tests=None, is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines, deleted_tests, renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines, deleted_tests, renamed_tests):
        """Updates single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run(['git', 'diff', 'origin/master', '-M100%', '--diff-filter=D', '--name-only'])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run(['git', 'diff', 'origin/master', '-M100%', '--diff-filter=R', '--name-status'])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Ejemplo n.º 3
0
 def test_layout_test_name_not_in_layout_tests_dir(self):
     finder = PathFinder(MockFileSystem())
     self.assertIsNone(finder.layout_test_name('some/other/path/file.html'))
class DirectoryOwnersExtractor(object):
    def __init__(self, filesystem=None):
        self.filesystem = filesystem or FileSystem
        self.finder = PathFinder(filesystem)
        self.owner_map = None

    def read_owner_map(self):
        """Reads the W3CImportExpectations file and returns a map of directories to owners."""
        input_path = self.finder.path_from_layout_tests(
            'W3CImportExpectations')
        input_contents = self.filesystem.read_text_file(input_path)
        self.owner_map = self.lines_to_owner_map(input_contents.splitlines())

    def lines_to_owner_map(self, lines):
        current_owners = []
        owner_map = {}
        for line in lines:
            owners = self.extract_owners(line)
            if owners:
                current_owners = owners
            directory = self.extract_directory(line)
            if not owners and not directory:
                current_owners = []
            if current_owners and directory:
                owner_map[directory] = current_owners
        return owner_map

    @staticmethod
    def extract_owners(line):
        """Extracts owner email addresses listed on a line."""
        match = re.match(r'##? Owners?: (?P<addresses>.*)', line)
        if not match or not match.group('addresses'):
            return None
        email_part = match.group('addresses')
        addresses = [email_part] if ',' not in email_part else re.split(
            r',\s*', email_part)
        addresses = [s for s in addresses if re.match(r'\S+@\S+', s)]
        return addresses or None

    @staticmethod
    def extract_directory(line):
        match = re.match(r'# ?(?P<directory>\S+) \[ (Pass|Skip) \]', line)
        if match and match.group('directory'):
            return match.group('directory')
        match = re.match(r'(?P<directory>\S+) \[ Pass \]', line)
        if match and match.group('directory'):
            return match.group('directory')
        return None

    def list_owners(self, changed_files):
        """Looks up the owners for the given set of changed files.

        Args:
            changed_files: A list of file paths relative to the repository root.

        Returns:
            A dict mapping tuples of owner email addresses to lists of
            owned directories.
        """
        tests = [self.finder.layout_test_name(path) for path in changed_files]
        tests = [t for t in tests if t is not None]
        email_map = collections.defaultdict(list)
        for directory, owners in self.owner_map.iteritems():
            owned_tests = [t for t in tests if t.startswith(directory)]
            if not owned_tests:
                continue
            email_map[tuple(owners)].append(directory)
        return email_map
Ejemplo n.º 5
0
 def test_layout_test_name(self):
     finder = PathFinder(MockFileSystem())
     self.assertEqual(
         finder.layout_test_name(
             'third_party/WebKit/LayoutTests/test/name.html'),
         'test/name.html')